コード例 #1
0
def _get_variant_testcase_for_job(testcase, job_type):
    """Return a testcase entity for variant task use. This changes the fuzz
    target params for a particular fuzzing engine."""
    if testcase.job_type == job_type:
        # Update stack operation on same testcase.
        return testcase

    if not environment.is_engine_fuzzer_job(testcase.job_type):
        # For blackbox fuzzer testcases, there is no change of fuzzer required.
        return testcase

    engine_name = environment.get_engine_for_job(job_type)
    project = data_handler.get_project_name(job_type)
    binary_name = testcase.get_metadata("fuzzer_binary_name")
    fully_qualified_fuzzer_name = data_types.fuzz_target_fully_qualified_name(
        engine_name, project, binary_name)

    variant_testcase = data_types.clone_entity(testcase)
    variant_testcase.key = testcase.key
    variant_testcase.fuzzer_name = engine_name
    variant_testcase.overridden_fuzzer_name = fully_qualified_fuzzer_name
    variant_testcase.job_type = job_type

    # Remove put() method to avoid updates. DO NOT REMOVE THIS.
    variant_testcase.put = lambda: None

    return variant_testcase
コード例 #2
0
def save_crash_info_if_needed(testcase_id, crash_revision, job_type,
                              crash_type, crash_address, crash_frames):
    """Saves crash report for chromium project, skip otherwise."""
    if data_handler.get_project_name(job_type) != 'chromium':
        return None

    serialized_crash_stack_frames = get_symbolized_stack_bytes(
        crash_type, crash_address, crash_frames)
    if not serialized_crash_stack_frames:
        return None

    crash_info = CrashReportInfo(
        serialized_crash_stack_frames=serialized_crash_stack_frames)

    # Get product and version (required).
    platform = environment.platform()
    crash_info.product = PRODUCT_MAP[platform]
    crash_info.version = revisions.get_real_revision(crash_revision,
                                                     job_type,
                                                     display=True)

    # Update crash_info object with bot information and testcase id.
    crash_info.bot_id = environment.get_value('BOT_NAME')
    crash_info.testcase_id = int(testcase_id)

    # Store CrashInfo metadata.
    crash_report_metadata = crash_info.to_report_metadata()
    crash_report_metadata.job_type = job_type
    crash_report_metadata.crash_revision = crash_revision
    crash_report_metadata.put()

    logs.log('Created crash report entry for testcase %s.' % testcase_id)
    return crash_info
コード例 #3
0
def create_variant_tasks_if_needed(testcase):
    """Creates a variant task if needed."""
    if testcase.duplicate_of:
        # If another testcase exists with same params, no need to spend cycles on
        # calculating variants again.
        return

    testcase_id = testcase.key.id()
    project = data_handler.get_project_name(testcase.job_type)
    jobs = data_types.Job.query(data_types.Job.project == project)
    for job in jobs:
        # The variant needs to be tested in a different job type than us.
        job_type = job.name
        if testcase.job_type == job_type:
            continue

        # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
        # testcases and vice versa.
        if (environment.is_engine_fuzzer_job(testcase.job_type) !=
                environment.is_engine_fuzzer_job(job_type)):
            continue

        # Skip experimental jobs.
        job_environment = job.get_environment()
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            continue

        queue = tasks.queue_for_platform(job.platform)
        tasks.add_task('variant', testcase_id, job_type, queue)

        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = data_types.TestcaseVariantStatus.PENDING
        variant.put()
コード例 #4
0
def get_components_list(component_revisions_dict, job_type):
    """Return a prioritized order of components based on job type."""
    components = sorted(component_revisions_dict.keys())

    if utils.is_chromium():
        # Components prioritization only applies to non-chromium projects.
        return components

    project_name = data_handler.get_project_name(job_type)
    if not project_name:
        # No project name found in job environment, return list as-is.
        return components

    project_src = '/src/' + project_name
    for component in components.copy():
        if component == project_src:
            components.remove(component)
            components.insert(0, component)
            break

        if project_name.lower() in os.path.basename(component).lower():
            components.remove(component)
            components.insert(0, component)
            # Keep trying in case an exact match is found later.

    return components
コード例 #5
0
def create_variant_tasks_if_needed(testcase):
    """Creates a variant task if needed."""
    testcase_id = testcase.key.id()
    jobs = ndb_utils.get_all_from_model(data_types.Job)
    for job in jobs:
        # The variant needs to be tested in a different job type than us.
        job_type = job.name
        if testcase.job_type == job_type:
            continue

        # Don't try to reproduce engine fuzzer testcase with blackbox fuzzer
        # testcases and vice versa.
        if (environment.is_engine_fuzzer_job(testcase.job_type) !=
                environment.is_engine_fuzzer_job(job_type)):
            continue

        # Skip experimental jobs.
        job_environment = job.get_environment()
        if utils.string_is_true(job_environment.get('EXPERIMENTAL')):
            continue

        # Don't look for variants in other projects.
        project_name = data_handler.get_project_name(job_type)
        if testcase.project_name != project_name:
            continue

        queue = tasks.queue_for_platform(job.platform)
        tasks.add_task('variant', testcase_id, job_type, queue)

        variant = data_handler.get_testcase_variant(testcase_id, job_type)
        variant.status = data_types.TestcaseVariantStatus.PENDING
        variant.put()
コード例 #6
0
 def test_get_from_job(self):
     """Test getting from job."""
     data_types.Job(
         name='job',
         environment_string=('PROJECT_NAME = from_internal_project\n'
                             'HELP_URL = help_url\n')).put()
     self.assertEqual('from_internal_project',
                      data_handler.get_project_name('job'))
コード例 #7
0
def find_fuzz_target(engine, target_name, job_name):
    """Find a fuzz target given the engine, target name (which may or may not be
  prefixed with project), and job."""
    project_name = data_handler.get_project_name(job_name)
    candidate_name = data_types.fuzz_target_fully_qualified_name(
        engine, project_name, target_name)

    target = data_handler.get_fuzz_target(candidate_name)
    if target:
        return target.fully_qualified_name(), target.binary

    return None, None
コード例 #8
0
def find_fuzz_target(engine, target_name, job_name):
    """Return fuzz target values given the engine, target name (which may or may
  not be prefixed with project), and job."""
    project_name = data_handler.get_project_name(job_name)
    candidate_name = data_types.fuzz_target_fully_qualified_name(
        engine, project_name, target_name)

    target = data_handler.get_fuzz_target(candidate_name)
    if not target:
        raise helpers.EarlyExitException('Fuzz target does not exist.', 400)

    return target.fully_qualified_name(), target.binary
コード例 #9
0
    def get_coverage_info(self, fuzzer, date=None):
        """Return coverage info of child fuzzers."""
        if fuzzer in data_types.BUILTIN_FUZZERS:
            # Get coverage info for a job (i.e. a project).
            job = self.single_job_or_none()
            project = data_handler.get_project_name(job)
            return get_coverage_info(project, date)

        fuzz_target = data_handler.get_fuzz_target(fuzzer)
        if fuzz_target:
            fuzzer = fuzz_target.project_qualified_name()

        return get_coverage_info(fuzzer, date)
コード例 #10
0
    def get(self):
        """Handle a GET request."""
        project = request.get('project')

        if access.has_access():
            # User is an internal user of ClusterFuzz (eg: ClusterFuzz developer).

            # Show all projects in the list, since this allows user to pick another
            # project as needed.
            projects_list = data_handler.get_all_project_names()

            # Filter fuzzers and job list if a project is provided.
            fuzzers_list = (
                data_handler.get_all_fuzzer_names_including_children(
                    include_parents=True, project=project))
            jobs_list = data_handler.get_all_job_type_names(project=project)
        else:
            # User is an external user of ClusterFuzz (eg: non-Chrome dev who
            # submitted a fuzzer or someone with a project in OSS-Fuzz).
            user_email = helpers.get_user_email()

            # TODO(aarya): Filter fuzzer and job if |project| is provided.
            fuzzers_list = sorted(
                external_users.allowed_fuzzers_for_user(user_email,
                                                        include_from_jobs=True,
                                                        include_parents=True))
            if not fuzzers_list:
                # User doesn't actually have access to any fuzzers.
                raise helpers.AccessDeniedException(
                    "You don't have access to any fuzzers.")

            jobs_list = sorted(
                external_users.allowed_jobs_for_user(user_email))
            projects_list = sorted(
                {data_handler.get_project_name(job)
                 for job in jobs_list})

        result = {
            'projects': projects_list,
            'fuzzers': fuzzers_list,
            'jobs': jobs_list,
        }
        return self.render_json(result)
コード例 #11
0
def _get_project_report_url(job, date):
  """Return url for the report requested."""
  project = data_handler.get_project_name(job)
  if not project:
    return None

  if date == 'latest':
    date = None
  else:
    try:
      date = datetime.datetime.strptime(date, '%Y-%m-%d').date()
    except:
      raise helpers.EarlyExitException('Invalid date.', 400)

  info = fuzzer_stats.get_coverage_info(project, date)
  if not info:
    return None

  return info.html_report_url
コード例 #12
0
ファイル: revisions.py プロジェクト: satoshi-n/clusterfuzz
def get_components_list(component_revisions_dict, job_type):
    """Return a prioritized order of components based on job type."""
    components = sorted(component_revisions_dict.keys())

    if utils.is_chromium():
        # Components prioritization only applies to non-chromium projects.
        return components

    project_name = data_handler.get_project_name(job_type)
    if not project_name:
        # No project name found in job environment, return list as-is.
        return components

    project_src = '/src/%s' % project_name
    for component in components:
        if component == project_src:
            components.remove(component)
            components.insert(0, component)
            break

    return components
コード例 #13
0
def _process_corpus_crashes(context, result):
    """Process crashes found in the corpus."""
    # Default Testcase entity values.
    crash_revision = result.revision
    job_type = environment.get_value("JOB_NAME")
    minimized_arguments = "%TESTCASE% " + context.fuzz_target.binary
    project_name = data_handler.get_project_name(job_type)

    comment = "Fuzzer %s generated corpus testcase crashed (r%s)" % (
        context.fuzz_target.project_qualified_name(),
        crash_revision,
    )

    # Generate crash reports.
    for crash in result.crashes:
        existing_testcase = data_handler.find_testcase(project_name,
                                                       crash.crash_type,
                                                       crash.crash_state,
                                                       crash.security_flag)
        if existing_testcase:
            continue

        # Upload/store testcase.
        if environment.is_trusted_host():
            from bot.untrusted_runner import file_host

            unit_path = os.path.join(context.bad_units_path,
                                     os.path.basename(crash.unit_path))
            # Prevent the worker from escaping out of |context.bad_units_path|.
            if not file_host.is_directory_parent(unit_path,
                                                 context.bad_units_path):
                raise CorpusPruningException("Invalid units path from worker.")

            file_host.copy_file_from_worker(crash.unit_path, unit_path)
        else:
            unit_path = crash.unit_path

        with open(unit_path, "rb") as f:
            key = blobs.write_blob(f)

        # Set the absolute_path property of the Testcase to a file in FUZZ_INPUTS
        # instead of the local quarantine directory.
        absolute_testcase_path = os.path.join(
            environment.get_value("FUZZ_INPUTS"), "testcase")

        testcase_id = data_handler.store_testcase(
            crash=crash,
            fuzzed_keys=key,
            minimized_keys="",
            regression="",
            fixed="",
            one_time_crasher_flag=False,
            crash_revision=crash_revision,
            comment=comment,
            absolute_path=absolute_testcase_path,
            fuzzer_name=context.fuzz_target.engine,
            fully_qualified_fuzzer_name=context.fuzz_target.
            fully_qualified_name(),
            job_type=job_type,
            archived=False,
            archive_filename="",
            binary_flag=True,
            http_flag=False,
            gestures=None,
            redzone=DEFAULT_REDZONE,
            disable_ubsan=False,
            minidump_keys=None,
            window_argument=None,
            timeout_multiplier=1.0,
            minimized_arguments=minimized_arguments,
        )

        # Set fuzzer_binary_name in testcase metadata.
        testcase = data_handler.get_testcase_by_id(testcase_id)
        testcase.set_metadata("fuzzer_binary_name", result.fuzzer_binary_name)

        issue_metadata = engine_common.get_all_issue_metadata_for_testcase(
            testcase)
        if issue_metadata:
            for key, value in issue_metadata.items():
                testcase.set_metadata(key, value, update_testcase=False)

            testcase.put()

        # Create additional tasks for testcase (starting with minimization).
        testcase = data_handler.get_testcase_by_id(testcase_id)
        task_creation.create_tasks(testcase)
コード例 #14
0
ファイル: analyze_task.py プロジェクト: wdgreen/clusterfuzz
def execute_task(testcase_id, job_type):
  """Run analyze task."""
  # Reset redzones.
  environment.reset_current_memory_tool_options(redzone_size=128)

  # Unset window location size and position properties so as to use default.
  environment.set_value('WINDOW_ARG', '')

  # Locate the testcase associated with the id.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  data_handler.update_testcase_comment(testcase, data_types.TaskState.STARTED)

  metadata = data_types.TestcaseUploadMetadata.query(
      data_types.TestcaseUploadMetadata.testcase_id == int(testcase_id)).get()
  if not metadata:
    logs.log_error(
        'Testcase %s has no associated upload metadata.' % testcase_id)
    testcase.key.delete()
    return

  is_lsan_enabled = environment.get_value('LSAN')
  if is_lsan_enabled:
    # Creates empty local blacklist so all leaks will be visible to uploader.
    leak_blacklist.create_empty_local_blacklist()

  # Store the bot name and timestamp in upload metadata.
  bot_name = environment.get_value('BOT_NAME')
  metadata.bot_name = bot_name
  metadata.timestamp = datetime.datetime.utcnow()
  metadata.put()

  # Adjust the test timeout, if user has provided one.
  if metadata.timeout:
    environment.set_value('TEST_TIMEOUT', metadata.timeout)

  # Adjust the number of retries, if user has provided one.
  if metadata.retries is not None:
    environment.set_value('CRASH_RETRIES', metadata.retries)

  # Setup testcase and get absolute testcase path.
  file_list, _, testcase_file_path = setup.setup_testcase(testcase)
  if not file_list:
    return

  # Set up a custom or regular build based on revision.
  build_manager.setup_build(testcase.crash_revision)

  # Check if we have an application path. If not, our build failed
  # to setup correctly.
  app_path = environment.get_value('APP_PATH')
  if not app_path:
    data_handler.update_testcase_comment(testcase, data_types.TaskState.ERROR,
                                         'Build setup failed')

    if data_handler.is_first_retry_for_task(testcase):
      build_fail_wait = environment.get_value('FAIL_WAIT')
      tasks.add_task(
          'analyze', testcase_id, job_type, wait_time=build_fail_wait)
    else:
      close_invalid_testcase_and_update_status(testcase, metadata,
                                               'Build setup failed')
    return

  # Update initial testcase information.
  testcase.absolute_path = testcase_file_path
  testcase.job_type = job_type
  testcase.binary_flag = utils.is_binary_file(testcase_file_path)
  testcase.queue = tasks.default_queue()
  testcase.crash_state = ''

  # Set initial testcase metadata fields (e.g. build url, etc).
  data_handler.set_initial_testcase_metadata(testcase)

  # Update minimized arguments and use ones provided during user upload.
  if not testcase.minimized_arguments:
    minimized_arguments = environment.get_value('APP_ARGS') or ''
    additional_command_line_flags = testcase.get_metadata(
        'uploaded_additional_args')
    if additional_command_line_flags:
      minimized_arguments += ' %s' % additional_command_line_flags
    environment.set_value('APP_ARGS', minimized_arguments)
    testcase.minimized_arguments = minimized_arguments

  # Update other fields not set at upload time.
  testcase.crash_revision = environment.get_value('APP_REVISION')
  data_handler.set_initial_testcase_metadata(testcase)
  testcase.put()

  # Initialize some variables.
  gestures = testcase.gestures
  http_flag = testcase.http_flag
  test_timeout = environment.get_value('TEST_TIMEOUT')

  # Get the crash output.
  result = testcase_manager.test_for_crash_with_retries(
      testcase,
      testcase_file_path,
      test_timeout,
      http_flag=http_flag,
      compare_crash=False)

  # If we don't get a crash, try enabling http to see if we can get a crash.
  # Skip engine fuzzer jobs (e.g. libFuzzer, AFL) for which http testcase paths
  # are not applicable.
  if (not result.is_crash() and not http_flag and
      not environment.is_engine_fuzzer_job()):
    result_with_http = testcase_manager.test_for_crash_with_retries(
        testcase,
        testcase_file_path,
        test_timeout,
        http_flag=True,
        compare_crash=False)
    if result_with_http.is_crash():
      logs.log('Testcase needs http flag for crash.')
      http_flag = True
      result = result_with_http

  # Refresh our object.
  testcase = data_handler.get_testcase_by_id(testcase_id)
  if not testcase:
    return

  # Set application command line with the correct http flag.
  application_command_line = (
      testcase_manager.get_command_line_for_application(
          testcase_file_path, needs_http=http_flag))

  # Get the crash data.
  crashed = result.is_crash()
  crash_time = result.get_crash_time()
  state = result.get_symbolized_data()
  unsymbolized_crash_stacktrace = result.get_stacktrace(symbolized=False)

  # Get crash info object with minidump info. Also, re-generate unsymbolized
  # stacktrace if needed.
  crash_info, _ = (
      crash_uploader.get_crash_info_and_stacktrace(
          application_command_line, state.crash_stacktrace, gestures))
  if crash_info:
    testcase.minidump_keys = crash_info.store_minidump()

  if not crashed:
    # Could not reproduce the crash.
    log_message = (
        'Testcase didn\'t crash in %d seconds (with retries)' % test_timeout)
    data_handler.update_testcase_comment(
        testcase, data_types.TaskState.FINISHED, log_message)

    # For an unreproducible testcase, retry once on another bot to confirm
    # our results and in case this bot is in a bad state which we didn't catch
    # through our usual means.
    if data_handler.is_first_retry_for_task(testcase):
      testcase.status = 'Unreproducible, retrying'
      testcase.put()

      tasks.add_task('analyze', testcase_id, job_type)
      return

    # In the general case, we will not attempt to symbolize if we do not detect
    # a crash. For user uploads, we should symbolize anyway to provide more
    # information about what might be happening.
    crash_stacktrace_output = utils.get_crash_stacktrace_output(
        application_command_line, state.crash_stacktrace,
        unsymbolized_crash_stacktrace)
    testcase.crash_stacktrace = data_handler.filter_stacktrace(
        crash_stacktrace_output)
    close_invalid_testcase_and_update_status(testcase, metadata,
                                             'Unreproducible')

    # A non-reproducing testcase might still impact production branches.
    # Add the impact task to get that information.
    task_creation.create_impact_task_if_needed(testcase)
    return

  # Update http flag and re-run testcase to store dependencies (for bundled
  # archives only).
  testcase.http_flag = http_flag
  if not store_testcase_dependencies_from_bundled_testcase_archive(
      metadata, testcase, testcase_file_path):
    return

  # Update testcase crash parameters.
  testcase.crash_type = state.crash_type
  testcase.crash_address = state.crash_address
  testcase.crash_state = state.crash_state

  # Try to guess if the bug is security or not.
  security_flag = crash_analyzer.is_security_issue(
      state.crash_stacktrace, state.crash_type, state.crash_address)
  testcase.security_flag = security_flag

  # If it is, guess the severity.
  if security_flag:
    testcase.security_severity = severity_analyzer.get_security_severity(
        state.crash_type, state.crash_stacktrace, job_type, bool(gestures))

  log_message = ('Testcase crashed in %d seconds (r%d)' %
                 (crash_time, testcase.crash_revision))
  data_handler.update_testcase_comment(testcase, data_types.TaskState.FINISHED,
                                       log_message)

  # See if we have to ignore this crash.
  if crash_analyzer.ignore_stacktrace(state.crash_stacktrace):
    close_invalid_testcase_and_update_status(testcase, metadata, 'Irrelavant')
    return

  # Test for reproducibility.
  one_time_crasher_flag = not testcase_manager.test_for_reproducibility(
      testcase_file_path, state.crash_state, security_flag, test_timeout,
      http_flag, gestures)
  testcase.one_time_crasher_flag = one_time_crasher_flag

  # Check to see if this is a duplicate.
  project_name = data_handler.get_project_name(job_type)
  existing_testcase = data_handler.find_testcase(
      project_name, state.crash_type, state.crash_state, security_flag)
  if existing_testcase:
    # If the existing test case is unreproducible and we are, replace the
    # existing test case with this one.
    if existing_testcase.one_time_crasher_flag and not one_time_crasher_flag:
      duplicate_testcase = existing_testcase
      original_testcase = testcase
    else:
      duplicate_testcase = testcase
      original_testcase = existing_testcase
      metadata.status = 'Duplicate'
      metadata.duplicate_of = existing_testcase.key.id()

    duplicate_testcase.status = 'Duplicate'
    duplicate_testcase.duplicate_of = original_testcase.key.id()
    duplicate_testcase.put()

  # Set testcase and metadata status if not set already.
  if testcase.status != 'Duplicate':
    testcase.status = 'Processed'
    metadata.status = 'Confirmed'

    # Add new leaks to global blacklist to avoid detecting duplicates.
    # Only add if testcase has a direct leak crash and if it's reproducible.
    if is_lsan_enabled:
      leak_blacklist.add_crash_to_global_blacklist_if_needed(testcase)

  # Add application specific information in the trace.
  crash_stacktrace_output = utils.get_crash_stacktrace_output(
      application_command_line, state.crash_stacktrace,
      unsymbolized_crash_stacktrace)
  testcase.crash_stacktrace = data_handler.filter_stacktrace(
      crash_stacktrace_output)

  # Update the testcase values.
  testcase.put()

  # Update the upload metadata.
  metadata.security_flag = security_flag
  metadata.put()

  # Create tasks to
  # 1. Minimize testcase (minimize).
  # 2. Find regression range (regression).
  # 3. Find testcase impact on production branches (impact).
  # 4. Check whether testcase is fixed (progression).
  # 5. Get second stacktrace from another job in case of
  #    one-time crashers (stack).
  task_creation.create_tasks(testcase)
コード例 #15
0
 def test_get_from_default(self):
     """Test getting from local config."""
     self.assertEqual('test-project', data_handler.get_project_name('job'))
コード例 #16
0
ファイル: revisions.py プロジェクト: satoshi-n/clusterfuzz
def get_component_revisions_dict(revision, job_type):
    """Retrieve revision vars dict."""
    if revision == 0 or revision == '0' or revision is None:
        # Return empty dict for zero start revision.
        return {}

    config = db_config.get()
    revision_info_url_format = db_config.get_value_for_job(
        config.revision_vars_url, job_type)
    if not revision_info_url_format:
        return None

    project_name = data_handler.get_project_name(job_type)
    revisions_dict = {}

    if utils.is_chromium():
        component = data_handler.get_component_name(job_type)
        repository = data_handler.get_repository_for_component(component)
        if repository and not _is_clank(revision_info_url_format):
            revision_hash = _git_commit_position_to_git_hash_for_chromium(
                revision, repository)
            if revision_hash is None:
                return None

            # FIXME: While we check for this explicitly appended component in all
            # applicable cases that we know of within this codebase, if the dict
            # is shared with an external service (e.g. Predator) we may need to clean
            # this up beforehand.
            revisions_dict['/src'] = {
                'name': _get_component_display_name(component, project_name),
                'url': _git_url_for_chromium_repository(repository),
                'rev': revision_hash,
                'commit_pos': revision
            }

            # Use revision hash for info url later.
            revision = revision_hash

    revision_info_url = revision_info_url_format % revision
    url_content = _get_url_content(revision_info_url)
    if not url_content:
        logs.log_error('Failed to get component revisions from %s.' %
                       revision_info_url)
        return None

    # Parse as per DEPS format.
    if _is_deps(revision_info_url):
        deps_revisions_dict = deps_to_revisions_dict(url_content)
        if not deps_revisions_dict:
            return None

        revisions_dict.update(deps_revisions_dict)
        return revisions_dict

    # Parse as per Clank DEPS format.
    if _is_clank(revision_info_url):
        return _clank_revision_file_to_revisions_dict(url_content)

    # Default case: parse content as yaml.
    revisions_dict = _to_dict(url_content)
    if not revisions_dict:
        logs.log_error('Failed to parse component revisions from %s.' %
                       revision_info_url)
        return None

    # Parse as per source map format.
    if revision_info_url.endswith(SOURCE_MAP_EXTENSION):
        revisions_dict = _src_map_to_revisions_dict(revisions_dict,
                                                    project_name)

    return revisions_dict