Beispiel #1
0
def _make_bisection_request(pubsub_topic, testcase, target, bisect_type):
    """Make a bisection request to the external bisection service. Returns whether
  or not a request was actually made."""
    if bisect_type == 'fixed':
        old_commit, new_commit = _get_commits(testcase.fixed,
                                              testcase.job_type)
    elif bisect_type == 'regressed':
        old_commit, new_commit = _get_commits(testcase.regression,
                                              testcase.job_type)
    else:
        raise ValueError('Invalid bisection type: ' + bisect_type)

    if not new_commit:
        # old_commit can be empty (i.e. '0' case), but new_commit should never be.
        return False

    old_commit, new_commit = _check_commits(testcase, bisect_type, old_commit,
                                            new_commit)

    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(pubsub_topic, [
        pubsub.Message(
            reproducer, {
                'type':
                bisect_type,
                'project_name':
                target.project,
                'sanitizer':
                environment.SANITIZER_NAME_MAP[
                    environment.get_memory_tool_name(testcase.job_type)],
                'fuzz_target':
                target.binary,
                'old_commit':
                old_commit,
                'new_commit':
                new_commit,
                'testcase_id':
                str(testcase.key.id()),
                'issue_id':
                testcase.bug_information,
                'crash_type':
                testcase.crash_type,
                'crash_state':
                testcase.crash_state,
                'security':
                str(testcase.security_flag),
                'severity':
                severity_analyzer.severity_to_string(
                    testcase.security_severity),
                'timestamp':
                testcase.timestamp.isoformat(),
            })
    ])
    return True
Beispiel #2
0
def do_fork():
    """Return whether or not to do fork mode."""
    # TODO(crbug.com/920355): Reenable this when fork mode works with ChromeOS's
    # MSAN.
    job_name = environment.get_value('JOB_NAME')
    memory_tool = environment.get_memory_tool_name(job_name)
    if memory_tool == 'MSAN' and environment.is_chromeos_system_job():
        return False

    return engine_common.decide_with_probability(
        engine_common.get_strategy_probability(strategy.FORK_STRATEGY,
                                               default=FORK_PROBABILITY))
Beispiel #3
0
def update_job_weight(job_name, multiplier):
    """Update a job weight."""
    tool_name = environment.get_memory_tool_name(job_name)
    multiplier *= SANITIZER_WEIGHTS.get(tool_name, DEFAULT_SANITIZER_WEIGHT)

    query = data_types.FuzzerJob.query(data_types.FuzzerJob.job == job_name)
    changed_weights = []
    for fuzzer_job in query:
        if fuzzer_job.multiplier != multiplier:
            fuzzer_job.multiplier = multiplier
            changed_weights.append(fuzzer_job)

    if changed_weights:
        ndb.put_multi(changed_weights)
def request_bisection(testcase, bisect_type):
    """Request precise bisection."""
    pubsub_topic = local_config.ProjectConfig().get(
        'bisect_service.pubsub_topic')
    if not pubsub_topic:
        return

    target = testcase.get_fuzz_target()
    if not target:
        return

    if bisect_type == 'fixed':
        old_commit, new_commit = _get_commits(testcase.fixed,
                                              testcase.job_type)
    elif bisect_type == 'regressed':
        old_commit, new_commit = _get_commits(testcase.regression,
                                              testcase.job_type)
    else:
        raise ValueError('Invalid bisection type: ' + bisect_type)

    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(
        pubsub_topic,
        pubsub.Message(
            reproducer, {
                'type':
                bisect_type,
                'project_name':
                target.project,
                'sanitizer':
                environment.SANITIZER_NAME_MAP[
                    environment.get_memory_tool_name(testcase.job_type)],
                'fuzz_target':
                target.binary,
                'old_commit':
                old_commit,
                'new_commit':
                new_commit,
                'testcase_id':
                testcase.key.id(),
                'issue_id':
                testcase.bug_information,
                'crash_type':
                testcase.crash_type,
                'security':
                str(testcase.security_flag),
            }))
Beispiel #5
0
def do_fork():
    """Return whether or not to do fork mode."""
    # TODO(metzman): Find a workaround for Windows command line limit before
    # re-enabling this.
    if environment.platform() == 'WINDOWS':
        return False

    # TODO(crbug.com/920355): Reenable this when fork mode works with ChromeOS's
    # MSAN.
    job_name = environment.get_value('JOB_NAME')
    memory_tool = environment.get_memory_tool_name(job_name)
    if memory_tool == 'MSAN' and environment.is_chromeos_system_job():
        return False

    return engine_common.decide_with_probability(
        engine_common.get_strategy_probability(strategy.FORK_STRATEGY,
                                               default=FORK_PROBABILITY))
def _make_bisection_request(pubsub_topic, testcase, target, bisect_type):
    """Make a bisection request to the external bisection service."""
    if bisect_type == 'fixed':
        old_commit, new_commit = _get_commits(testcase.fixed,
                                              testcase.job_type)
    elif bisect_type == 'regressed':
        old_commit, new_commit = _get_commits(testcase.regression,
                                              testcase.job_type)
    else:
        raise ValueError('Invalid bisection type: ' + bisect_type)

    if not old_commit or not new_commit:
        return

    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    pubsub_client = pubsub.PubSubClient()
    pubsub_client.publish(
        pubsub_topic,
        pubsub.Message(
            reproducer, {
                'type':
                bisect_type,
                'project_name':
                target.project,
                'sanitizer':
                environment.SANITIZER_NAME_MAP[
                    environment.get_memory_tool_name(testcase.job_type)],
                'fuzz_target':
                target.binary,
                'old_commit':
                old_commit,
                'new_commit':
                new_commit,
                'testcase_id':
                testcase.key.id(),
                'issue_id':
                testcase.bug_information,
                'crash_type':
                testcase.crash_type,
                'security':
                str(testcase.security_flag),
            }))
Beispiel #7
0
def add_external_task(command, testcase_id, job):
    """Add external task."""
    if command != 'progression':
        # Only progression is supported.
        return

    pubsub_client = pubsub.PubSubClient()
    topic_name = job.external_reproduction_topic
    assert topic_name is not None

    testcase = data_handler.get_testcase_by_id(testcase_id)
    fuzz_target = testcase.get_fuzz_target()

    memory_tool_name = environment.get_memory_tool_name(job.name)
    sanitizer = environment.SANITIZER_NAME_MAP.get(memory_tool_name)
    job_environment = job.get_environment()
    if job_environment.get('CUSTOM_BINARY'):
        raise RuntimeError('External jobs should never have custom binaries.')

    build_path = (job_environment.get('RELEASE_BUILD_BUCKET_PATH')
                  or job_environment.get('FUZZ_TARGET_BUILD_BUCKET_PATH'))
    if build_path is None:
        raise RuntimeError(f'{job.name} has no build path defined.')

    min_revision = (testcase.get_metadata('last_tested_revision')
                    or testcase.crash_revision)

    logs.log(f'Publishing external reproduction task for {testcase_id}.')
    attributes = {
        'project': job.project,
        'target': fuzz_target.binary,
        'fuzzer': testcase.fuzzer_name,
        'sanitizer': sanitizer,
        'job': job.name,
        'testcaseId': str(testcase_id),
        'buildPath': build_path,
        'minRevisionAbove': str(min_revision),
    }

    reproducer = blobs.read_key(testcase.minimized_keys
                                or testcase.fuzzed_keys)
    message = pubsub.Message(data=reproducer, attributes=attributes)
    pubsub_client.publish(topic_name, [message])
Beispiel #8
0
    def _setup_device_and_fuzzer(self):
        """Build a Device and Fuzzer object based on QEMU's settings."""
        # These environment variables are set when start_qemu is run.
        # We need them in order to ssh / otherwise communicate with the VM.
        fuchsia_pkey_path = environment.get_value('FUCHSIA_PKEY_PATH')
        fuchsia_portnum = environment.get_value('FUCHSIA_PORTNUM')
        fuchsia_resources_dir = environment.get_value('FUCHSIA_RESOURCES_DIR')
        if (not fuchsia_pkey_path or not fuchsia_portnum
                or not fuchsia_resources_dir):
            raise fuchsia.errors.FuchsiaConfigError((
                'FUCHSIA_PKEY_PATH, FUCHSIA_PORTNUM, or FUCHSIA_RESOURCES_DIR was '
                'not set'))

        # Fuzzer objects communicate with the VM via a Device object,
        # which we set up here.
        fuchsia_resources_dir_plus_build = os.path.join(
            fuchsia_resources_dir, self.FUCHSIA_BUILD_REL_PATH)
        self.host = Host.from_dir(fuchsia_resources_dir_plus_build)
        self.device = Device(self.host, 'localhost', fuchsia_portnum)
        self.device.set_ssh_option('StrictHostKeyChecking no')
        self.device.set_ssh_option('UserKnownHostsFile=/dev/null')
        self.device.set_ssh_identity(fuchsia_pkey_path)

        # Fuchsia fuzzer names have the format {package_name}/{binary_name}.
        package, target = self.executable_path.split('/')
        test_data_dir = os.path.join(fuchsia_resources_dir_plus_build,
                                     self.FUZZER_TEST_DATA_REL_PATH, package,
                                     target)

        # Finally, we set up the Fuzzer object itself, which will run our fuzzer!
        sanitizer = environment.get_memory_tool_name(
            environment.get_value('JOB_NAME')).lower()
        self.fuzzer = Fuzzer(self.device,
                             package,
                             target,
                             output=test_data_dir,
                             foreground=True,
                             sanitizer=sanitizer)
Beispiel #9
0
def _prepare_predator_message(testcase):
    """Prepare the json sent to the Predator service for the given test case."""
    result, error_message = _is_predator_testcase(testcase)
    if not result:
        _set_predator_result_with_error(testcase, error_message)
        return None

    crash_revisions_dict, crash_revision_hash = _prepare_component_revisions_dict(
        testcase.crash_revision, testcase.job_type)
    # Do a None check since we can return {} for revision = 0.
    if crash_revisions_dict is None:
        _set_predator_result_with_error(
            testcase, 'Failed to fetch component revisions for revision %s.' %
            testcase.crash_revision)
        return None

    dependency_rolls = []
    start_revision_hash = end_revision_hash = None
    if ':' in testcase.regression:
        regression_parts = testcase.regression.split(':', 1)
        start_revision = int(regression_parts[0])
        end_revision = int(regression_parts[1])

        start_revisions_dict, start_revision_hash = (
            _prepare_component_revisions_dict(start_revision,
                                              testcase.job_type))
        # Do a None check since we can return {} for revision = 0.
        if start_revisions_dict is None:
            _set_predator_result_with_error(
                testcase,
                'Failed to fetch component revisions for revision %s.' %
                start_revision)
            return None

        end_revisions_dict, end_revision_hash = (
            _prepare_component_revisions_dict(end_revision, testcase.job_type))
        # Do a None check since we can return {} for revision = 0.
        if end_revisions_dict is None:
            _set_predator_result_with_error(
                testcase,
                'Failed to fetch component revisions for revision %s.' %
                end_revision)
            return None

        if start_revision != 0:
            dependency_rolls = _compute_rolls(start_revisions_dict,
                                              end_revisions_dict)

    # Put the current revisions dictionary in the format predator expects.
    crash_revision_component_revisions_list = (
        _format_component_revisions_for_predator(crash_revisions_dict))

    # In addition to the start and end revisions, Predator expects the regression
    # range to include the dependency path and repository URL in the same way that
    # they would be included in the dependency rolls. Note that we do not take
    # this from the rolls dict directly as it may not be available.
    src_entry = [
        entry for entry in crash_revision_component_revisions_list
        if entry['dep_path'] == 'src'
    ][0]

    # TODO(mbarbella): This is a hack since ClusterFuzz relies on "src" as a
    # special-cased path, but this is only going to be the correct repository
    # root path some of the time. For certain cases, we must update it.
    repo_url = src_entry['repo_url']
    real_dep_path = SRC_COMPONENT_OVERRIDES.get(repo_url, 'src')
    if real_dep_path != 'src':
        for dependency_list in [
                dependency_rolls, crash_revision_component_revisions_list
        ]:
            for entry in dependency_list:
                if entry['dep_path'] == 'src':
                    entry['dep_path'] = real_dep_path
                    break

    regression_range = {
        'dep_path': real_dep_path,
        'repo_url': repo_url,
        'old_revision': start_revision_hash,
        'new_revision': end_revision_hash,
    }

    crash_stacktrace = _filter_stacktrace(
        data_handler.get_stacktrace(testcase))

    return pubsub.Message(data=json.dumps({
        'stack_trace': crash_stacktrace,
        'crash_revision': crash_revision_hash,
        'customized_data': {
            'regression_range': regression_range,
            'dependency_rolls': dependency_rolls,
            'dependencies': crash_revision_component_revisions_list,
            'crash_type': testcase.crash_type,
            'crash_address': testcase.crash_address,
            'sanitizer': environment.get_memory_tool_name(testcase.job_type),
            'security_flag': testcase.security_flag,
            'job_type': testcase.job_type,
            'testcase_id': testcase.key.id()
        },
        'platform': testcase.platform,
        'client_id': 'clusterfuzz',
        'signature': testcase.crash_state,
    }).encode('utf-8'))