Ejemplo n.º 1
0
def test_run_cov_new_units(mocked_execute, fs, environ):
    """Tests that run_cov_new_units does a coverage run as we expect."""
    os.environ = {'WORK': '/work'}
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)
    snapshot_measurer.initialize_measurement_dirs()
    shared_units = ['shared1', 'shared2']
    for unit in shared_units:
        fs.create_file(os.path.join(snapshot_measurer.prev_corpus_dir, unit))
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))
    new_units = ['new1', 'new2']
    for unit in new_units:
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))
    fuzz_target_path = '/work/coverage-binaries/benchmark-a/fuzz-target'
    fs.create_file(fuzz_target_path)

    snapshot_measurer.run_cov_new_units()
    assert len(mocked_execute.call_args_list) == 1  # Called once
    args = mocked_execute.call_args_list[0]
    command_arg = args[0][0]
    assert command_arg[0] == fuzz_target_path
    expected = {
        'cwd': '/work/coverage-binaries/benchmark-a',
        'env': {
            'UBSAN_OPTIONS': ('coverage_dir='
                              '/work/measurement-folders/benchmark-a/fuzzer-a'
                              '/trial-12/sancovs'),
            'WORK': '/work',
        },
        'expect_zero': False,
    }
    args = args[1]
    for arg, value in expected.items():
        assert args[arg] == value
Ejemplo n.º 2
0
def test_generate_summary(mocked_get_coverage_binary, mocked_execute,
                          experiment, fs):
    """Tests that generate_summary can run the correct command."""
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    coverage_binary_path = '/work/coverage-binaries/benchmark-a/fuzz-target'
    mocked_get_coverage_binary.return_value = coverage_binary_path

    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)
    snapshot_measurer.cov_summary_file = "/reports/cov_summary.txt"
    snapshot_measurer.profdata_file = "/reports/data.profdata"
    fs.create_dir('/reports')
    fs.create_file(snapshot_measurer.profdata_file, contents='fake_contents')
    snapshot_measurer.generate_summary(CYCLE)

    expected = [
        'llvm-cov', 'export', '-format=text',
        '/work/coverage-binaries/benchmark-a/fuzz-target',
        '-instr-profile=/reports/data.profdata', '-summary-only'
    ]

    assert (len(mocked_execute.call_args_list)) == 1
    args = mocked_execute.call_args_list[0]
    assert args[0][0] == expected
    assert args[1]['output_file'].name == "/reports/cov_summary.txt"
Ejemplo n.º 3
0
def test_measure_all_trials_not_ready(mocked_rsync, mocked_ls, experiment):
    """Test running measure_all_trials before it is ready works as intended."""
    mocked_ls.return_value = new_process.ProcessResult(1, '', False)
    assert measurer.measure_all_trials(experiment_utils.get_experiment_name(),
                                       MAX_TOTAL_TIME, test_utils.MockPool(),
                                       queue.Queue())
    assert not mocked_rsync.called
Ejemplo n.º 4
0
def test_do_sync_changed(mocked_execute, mocked_is_corpus_dir_same, fs,
                         trial_runner, fuzzer_module):
    """Test that do_sync archives and saves a corpus if it changed from the
    previous one."""
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    corpus_file_name = 'corpus-file'
    fs.create_file(os.path.join(trial_runner.corpus_dir, corpus_file_name))
    trial_runner.cycle = 1337
    mocked_is_corpus_dir_same.return_value = False
    trial_runner.do_sync()
    assert mocked_execute.call_args_list == [
        mock.call([
            'gsutil', 'cp', 'corpus-archives/corpus-archive-1337.tar.gz',
            ('gs://bucket/experiment-name/experiment-folders/'
             'benchmark-1-fuzzer_a/trial-1/corpus/'
             'corpus-archive-1337.tar.gz')
        ],
                  expect_zero=True),
        mock.call([
            'gsutil', 'rsync', '-d', '-r', 'results-copy',
            ('gs://bucket/experiment-name/experiment-folders/'
             'benchmark-1-fuzzer_a/trial-1/results')
        ],
                  expect_zero=True)
    ]
    unchanged_cycles_path = os.path.join(trial_runner.results_dir,
                                         'unchanged-cycles')
    assert not os.path.exists(unchanged_cycles_path)

    # Archives should get deleted after syncing.
    archives = os.listdir(trial_runner.corpus_archives_dir)
    assert len(archives) == 0
Ejemplo n.º 5
0
def test_schedule(mocked_datetime_now, mocked_get_by_variant_name,
                  mocked_execute, pending_trials, experiment_config):
    """Tests that schedule() ends expired trials and starts new ones as
    needed."""
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    mocked_get_by_variant_name.return_value = {'fuzzer': 'test_fuzzer'}
    experiment = experiment_config['experiment']
    datetimes_first_experiments_started = [
        trial.time_started for trial in db_utils.query(models.Trial).filter(
            models.Trial.experiment == experiment).filter(
                models.Trial.time_started.isnot(None))
    ]

    mocked_datetime_now.return_value = (
        max(datetimes_first_experiments_started) +
        datetime.timedelta(seconds=(experiment_config['max_total_time'] +
                                    scheduler.GRACE_TIME_SECONDS * 2)))

    with ThreadPool() as pool:
        scheduler.schedule(experiment_config, pool)
    assert db_utils.query(models.Trial).filter(
        models.Trial.time_started.in_(
            datetimes_first_experiments_started)).all() == (db_utils.query(
                models.Trial).filter(models.Trial.time_ended.isnot(None)).all())

    assert pending_trials.filter(
        models.Trial.time_started.isnot(None)).all() == pending_trials.all()
Ejemplo n.º 6
0
def test_robust_begin_gcloud_ssh_pass(mocked_ssh, _):
    """Tests robust_begin_gcloud_ssh works as intended on google cloud."""
    mocked_ssh.return_value = new_process.ProcessResult(0, None, False)
    gcloud.robust_begin_gcloud_ssh(INSTANCE_NAME, ZONE)
    mocked_ssh.assert_called_with('instance-a',
                                  command='echo ping',
                                  expect_zero=False,
                                  zone='zone-a')
Ejemplo n.º 7
0
def test_is_cycle_unchanged_no_file(mocked_cp, fs, experiment):
    """Test that is_cycle_unchanged returns False when there is no
    unchanged-cycles file."""
    # Make sure we log if there is no unchanged-cycles file.
    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)
    mocked_cp.return_value = new_process.ProcessResult(1, '', False)
    assert not snapshot_measurer.is_cycle_unchanged(0)
Ejemplo n.º 8
0
def test_create_instance_failed_create(mocked_execute):
    """Tests create_instance creates an instance if it doesn't already
    exist."""
    mocked_execute.return_value = new_process.ProcessResult(1, '', False)
    # We shouldn't exception here.
    assert not gcloud.create_instance(INSTANCE_NAME,
                                      gcloud.InstanceType.DISPATCHER, CONFIG)
    # Check that the first call is to create the instance.
    assert 'create' in mocked_execute.call_args_list[0][0][0]
Ejemplo n.º 9
0
def test_measure_all_trials_no_more(mocked_directories_have_same_files,
                                    mocked_execute):
    """Test measure_all_trials does what is intended when the experiment is
    done."""
    mocked_directories_have_same_files.return_value = True
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    mock_pool = test_utils.MockPool()
    assert not measurer.measure_all_trials(
        experiment_utils.get_experiment_name(), MAX_TOTAL_TIME, mock_pool,
        queue.Queue())
Ejemplo n.º 10
0
def test_delete_instances_fail(mocked_execute):
    """Test that delete_instances returns False when instance deletion fails."""
    instances = ['instance-%d' % i for i in range(5)]
    mocked_execute.return_value = new_process.ProcessResult(1, 'Error', False)
    zone = 'us-central1-a'
    expected_command = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
                        instances + ['--zone', zone])
    result = gcloud.delete_instances(instances, zone)
    assert not result
    mocked_execute.assert_called_with(expected_command, expect_zero=False)
Ejemplo n.º 11
0
def test_run_cov_new_units(_, mocked_execute, fs, environ):
    """Tests that run_cov_new_units does a coverage run as we expect."""
    os.environ = {
        'WORK': '/work',
        'EXPERIMENT_FILESTORE': 'gs://bucket',
        'EXPERIMENT': 'experiment',
    }
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    snapshot_measurer = measure_manager.SnapshotMeasurer(
        FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
    snapshot_measurer.initialize_measurement_dirs()
    shared_units = ['shared1', 'shared2']
    fs.create_file(snapshot_measurer.measured_files_path,
                   contents='\n'.join(shared_units))
    for unit in shared_units:
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))

    new_units = ['new1', 'new2']
    for unit in new_units:
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))
    fuzz_target_path = '/work/coverage-binaries/benchmark-a/fuzz-target'
    fs.create_file(fuzz_target_path)
    profraw_file_path = os.path.join(snapshot_measurer.coverage_dir,
                                     'data.profraw')
    fs.create_file(profraw_file_path)

    snapshot_measurer.run_cov_new_units()
    assert len(mocked_execute.call_args_list) == 1  # Called once
    args = mocked_execute.call_args_list[0]
    command_arg = args[0][0]
    assert command_arg[0] == fuzz_target_path
    expected = {
        'cwd': '/work/coverage-binaries/benchmark-a',
        'env': {
            'ASAN_OPTIONS':
            ('handle_abort=2:handle_segv=2:handle_sigbus=2:handle_sigfpe=2:'
             'handle_sigill=2:symbolize=1:symbolize_inline_frames=0'),
            'UBSAN_OPTIONS':
            ('handle_abort=2:handle_segv=2:handle_sigbus=2:handle_sigfpe=2:'
             'handle_sigill=2:symbolize=1:symbolize_inline_frames=0'),
            'LLVM_PROFILE_FILE':
            ('/work/measurement-folders/'
             'benchmark-a-fuzzer-a/trial-12/coverage/data-%m.profraw'),
            'WORK':
            '/work',
            'EXPERIMENT_FILESTORE':
            'gs://bucket',
            'EXPERIMENT':
            'experiment',
        },
        'expect_zero': False,
    }
    args = args[1]
    for arg, value in expected.items():
        assert args[arg] == value
Ejemplo n.º 12
0
def test_delete_instances_less_than_batch_size(mocked_execute):
    """Test that delete_instances works as intended when instance count is less
    than batch size."""
    instances = ['instance-%d' % i for i in range(5)]
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    zone = 'us-central1-a'
    expected_command = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
                        instances + ['--zone', zone])
    result = gcloud.delete_instances(instances, zone)
    assert result
    mocked_execute.assert_called_with(expected_command, expect_zero=False)
Ejemplo n.º 13
0
    def test_measure_snapshot_coverage(  # pylint: disable=too-many-locals
            self, mocked_is_cycle_unchanged, db, experiment, tmp_path):
        """Integration test for measure_snapshot_coverage."""
        # WORK is set by experiment to a directory that only makes sense in a
        # fakefs. A directory containing necessary llvm tools is also added to
        # PATH.
        llvm_tools_path = get_test_data_path('llvm_tools')
        os.environ["PATH"] += os.pathsep + llvm_tools_path
        os.environ['WORK'] = str(tmp_path)
        mocked_is_cycle_unchanged.return_value = False
        # Set up the coverage binary.
        benchmark = 'freetype2-2017'
        coverage_binary_src = get_test_data_path(
            'test_measure_snapshot_coverage', benchmark + '-coverage')
        benchmark_cov_binary_dir = os.path.join(
            build_utils.get_coverage_binaries_dir(), benchmark)

        os.makedirs(benchmark_cov_binary_dir)
        coverage_binary_dst_dir = os.path.join(benchmark_cov_binary_dir,
                                               'ftfuzzer')

        shutil.copy(coverage_binary_src, coverage_binary_dst_dir)

        # Set up entities in database so that the snapshot can be created.
        experiment = models.Experiment(name=os.environ['EXPERIMENT'])
        db_utils.add_all([experiment])
        trial = models.Trial(fuzzer=FUZZER,
                             benchmark=benchmark,
                             experiment=os.environ['EXPERIMENT'])
        db_utils.add_all([trial])

        snapshot_measurer = measurer.SnapshotMeasurer(trial.fuzzer,
                                                      trial.benchmark,
                                                      trial.id,
                                                      SNAPSHOT_LOGGER)

        # Set up the snapshot archive.
        cycle = 1
        archive = get_test_data_path('test_measure_snapshot_coverage',
                                     'corpus-archive-%04d.tar.gz' % cycle)
        corpus_dir = os.path.join(snapshot_measurer.trial_dir, 'corpus')
        os.makedirs(corpus_dir)
        shutil.copy(archive, corpus_dir)

        with mock.patch('common.filestore_utils.cp') as mocked_cp:
            mocked_cp.return_value = new_process.ProcessResult(0, '', False)
            # TODO(metzman): Create a system for using actual buckets in
            # integration tests.
            snapshot = measurer.measure_snapshot_coverage(
                snapshot_measurer.fuzzer, snapshot_measurer.benchmark,
                snapshot_measurer.trial_num, cycle)
        assert snapshot
        assert snapshot.time == cycle * experiment_utils.get_snapshot_seconds()
        assert snapshot.edges_covered == 13178
Ejemplo n.º 14
0
def test_is_cycle_unchanged_first_copy(mocked_read, mocked_cp, experiment):
    """Test that is_cycle_unchanged can properly determine if a cycle is
    unchanged or not when it needs to copy the file for the first time."""
    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)
    this_cycle = 100
    unchanged_cycles_file_contents = (
        '\n'.join([str(num) for num in range(10)] + [str(this_cycle)]))
    mocked_read.return_value = unchanged_cycles_file_contents
    mocked_cp.return_value = new_process.ProcessResult(0, '', False)

    assert snapshot_measurer.is_cycle_unchanged(this_cycle)
    assert not snapshot_measurer.is_cycle_unchanged(this_cycle + 1)
Ejemplo n.º 15
0
    def test_measure_snapshot_coverage(  # pylint: disable=too-many-locals
            self, mocked_is_cycle_unchanged, create_measurer, db, experiment):
        """Integration test for measure_snapshot_coverage."""
        mocked_is_cycle_unchanged.return_value = False
        # Set up the coverage binary.
        benchmark = 'freetype2-2017'
        coverage_binary_src = get_test_data_path(
            'test_measure_snapshot_coverage', benchmark + '-coverage')
        benchmark_cov_binary_dir = os.path.join(
            build_utils.get_coverage_binaries_dir(), benchmark)

        os.makedirs(benchmark_cov_binary_dir)
        coverage_binary_dst_dir = os.path.join(benchmark_cov_binary_dir,
                                               'fuzz-target')

        shutil.copy(coverage_binary_src, coverage_binary_dst_dir)

        # Set up entities in database so that the snapshot can be created.
        experiment = models.Experiment(name=os.environ['EXPERIMENT'])
        db_utils.add_all([experiment])
        trial = models.Trial(fuzzer=FUZZER,
                             benchmark=benchmark,
                             experiment=os.environ['EXPERIMENT'])
        db_utils.add_all([trial])

        snapshot_measurer = create_measurer(trial.fuzzer, trial.benchmark,
                                            trial.id)

        # Set up the snapshot archive.
        cycle = 1
        archive = get_test_data_path('test_measure_snapshot_coverage',
                                     'corpus-archive-%04d.tar.gz' % cycle)
        corpus_dir = os.path.join(snapshot_measurer.trial_dir, 'corpus')
        os.makedirs(corpus_dir)
        shutil.copy(archive, corpus_dir)

        with mock.patch('common.gsutil.cp') as mocked_cp:
            mocked_cp.return_value = new_process.ProcessResult(0, '', False)
            # TODO(metzman): Create a system for using actual buckets in
            # integration tests.
            snapshot = measurer.measure_snapshot_coverage(
                snapshot_measurer.fuzzer, snapshot_measurer.benchmark,
                snapshot_measurer.trial_num, cycle)
        assert snapshot
        assert snapshot.time == cycle * experiment_utils.get_snapshot_seconds()
        assert snapshot.edges_covered == 3798
Ejemplo n.º 16
0
def test_delete_instances_greater_than_batch_size(mocked_execute):
    """Test that delete_instances works as intended when instance count is more
  than batch size."""
    instances = ['instance-%d' % i for i in range(103)]
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    zone = 'us-central1-a'
    result = gcloud.delete_instances(instances, zone)
    assert result
    expected_command_1 = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
                          ['instance-%d' % i
                           for i in range(100)] + ['--zone', zone])
    expected_command_2 = (['gcloud', 'compute', 'instances', 'delete', '-q'] +
                          ['instance-%d' % i
                           for i in range(100, 103)] + ['--zone', zone])
    mocked_execute.assert_has_calls([
        mock.call(expected_command_1, expect_zero=False),
        mock.call(expected_command_2, expect_zero=False)
    ])
Ejemplo n.º 17
0
def test_generate_profdata_create(mocked_execute, experiment, fs):
    """Tests that generate_profdata can run the correct command."""
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)
    snapshot_measurer.profdata_file = '/work/reports/data.profdata'
    snapshot_measurer.profraw_file = '/work/reports/data.profraw'
    fs.create_file(snapshot_measurer.profraw_file, contents='fake_contents')
    snapshot_measurer.generate_profdata(CYCLE)

    expected = [
        'llvm-profdata', 'merge', '-sparse', '/work/reports/data.profraw',
        '-o', '/work/reports/data.profdata'
    ]

    assert (len(mocked_execute.call_args_list)) == 1
    args = mocked_execute.call_args_list[0]
    assert args[0][0] == expected
Ejemplo n.º 18
0
def test_keyword_args(use_gsutil):  # pylint: disable=unused-argument
    """Tests that keyword args, and in particular 'parallel' are handled
    correctly."""

    with mock.patch('common.new_process.execute') as mocked_execute:
        filestore_utils.rm(GCS_DIR_2, recursive=True, parallel=True)
        mocked_execute.assert_called_with(
            ['gsutil', '-m', 'rm', '-r', GCS_DIR_2], expect_zero=True)

    with mock.patch('common.new_process.execute') as mocked_execute:
        mocked_execute.return_value = new_process.ProcessResult(0, '', '')
        filestore_utils.ls(GCS_DIR_2)
        mocked_execute.assert_called_with(['gsutil', 'ls', GCS_DIR_2],
                                          expect_zero=True)

    with mock.patch('common.new_process.execute') as mocked_execute:
        filestore_utils.cp(GCS_DIR, GCS_DIR_2, parallel=True)
        mocked_execute.assert_called_with(
            ['gsutil', '-m', 'cp', GCS_DIR, GCS_DIR_2], expect_zero=True)
Ejemplo n.º 19
0
def test_is_cycle_unchanged_update(fs, experiment):
    """Test that is_cycle_unchanged can properly determine that a
    cycle has changed when it has the file but needs to update it."""
    snapshot_measurer = measurer.SnapshotMeasurer(FUZZER, BENCHMARK, TRIAL_NUM,
                                                  SNAPSHOT_LOGGER)

    this_cycle = 100
    initial_unchanged_cycles_file_contents = (
        '\n'.join([str(num) for num in range(10)] + [str(this_cycle)]))
    fs.create_file(snapshot_measurer.unchanged_cycles_path,
                   contents=initial_unchanged_cycles_file_contents)

    next_cycle = this_cycle + 1
    unchanged_cycles_file_contents = (initial_unchanged_cycles_file_contents +
                                      '\n' + str(next_cycle))
    assert snapshot_measurer.is_cycle_unchanged(this_cycle)
    with mock.patch('common.filestore_utils.cp') as mocked_cp:
        with mock.patch('common.filesystem.read') as mocked_read:
            mocked_cp.return_value = new_process.ProcessResult(0, '', False)
            mocked_read.return_value = unchanged_cycles_file_contents
            assert snapshot_measurer.is_cycle_unchanged(next_cycle)
Ejemplo n.º 20
0
def test_measure_all_trials(_, __, mocked_execute, db, fs):
    """Tests that measure_all_trials does what is intended under normal
    conditions."""
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)

    dispatcher._initialize_experiment_in_db(
        experiment_utils.get_experiment_name(), BENCHMARKS, FUZZERS, NUM_TRIALS)
    trials = scheduler.get_pending_trials(
        experiment_utils.get_experiment_name()).all()
    for trial in trials:
        trial.time_started = datetime.datetime.utcnow()
    db_utils.add_all(trials)

    fs.create_file(measurer.get_experiment_folders_dir() / NEW_UNIT)
    mock_pool = test_utils.MockPool()

    assert measurer.measure_all_trials(experiment_utils.get_experiment_name(),
                                       MAX_TOTAL_TIME, mock_pool, queue.Queue())

    actual_ids = [call[2] for call in mock_pool.func_calls]
    # 4 (trials) * 2 (fuzzers) * 2 (benchmarks)
    assert sorted(actual_ids) == list(range(1, 17))
Ejemplo n.º 21
0
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for builder.py."""

import subprocess
from unittest import mock

import pytest

from common import new_process
from experiment.build import gcb_build

# pylint: disable=protected-access

FAIL_RESULT = new_process.ProcessResult(1, '', False)


@mock.patch('common.new_process.execute', return_value=FAIL_RESULT)
@mock.patch('experiment.build.build_utils.store_build_logs')
def test_build_error(mocked_store_build_logs, _):
    """Tests that on error, _build raises subprocess.CalledProcessError and
    calls store_build_logs."""
    config_name = 'config'
    with pytest.raises(subprocess.CalledProcessError):
        gcb_build._build({}, config_name)
    mocked_store_build_logs.assert_called_with(config_name, FAIL_RESULT)


SUCCESS_RESULT = new_process.ProcessResult(0, '', False)
Ejemplo n.º 22
0
def test_run_cov_new_units(_, mocked_execute, fs, environ):
    """Tests that run_cov_new_units does a coverage run as we expect."""
    os.environ = {
        'WORK': '/work',
        'EXPERIMENT_FILESTORE': 'gs://bucket',
        'EXPERIMENT': 'experiment',
    }
    mocked_execute.return_value = new_process.ProcessResult(0, '', False)
    snapshot_measurer = measure_manager.SnapshotMeasurer(
        FUZZER, BENCHMARK, TRIAL_NUM, SNAPSHOT_LOGGER)
    snapshot_measurer.initialize_measurement_dirs()
    shared_units = ['shared1', 'shared2']
    fs.create_file(snapshot_measurer.measured_files_path,
                   contents='\n'.join(shared_units))
    for unit in shared_units:
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))

    new_units = ['new1', 'new2']
    for unit in new_units:
        fs.create_file(os.path.join(snapshot_measurer.corpus_dir, unit))
    fuzz_target_path = '/work/coverage-binaries/benchmark-a/fuzz-target'
    fs.create_file(fuzz_target_path)
    profraw_file_path = os.path.join(snapshot_measurer.coverage_dir,
                                     'data.profraw')
    fs.create_file(profraw_file_path)

    snapshot_measurer.run_cov_new_units()
    assert len(mocked_execute.call_args_list) == 1  # Called once
    args = mocked_execute.call_args_list[0]
    command_arg = args[0][0]
    assert command_arg[0] == fuzz_target_path
    expected = {
        'cwd': '/work/coverage-binaries/benchmark-a',
        'env': {
            'ASAN_OPTIONS':
            ('alloc_dealloc_mismatch=0:allocator_may_return_null=1:'
             'allocator_release_to_os_interval_ms=500:'
             'allow_user_segv_handler=0:check_malloc_usable_size=0:'
             'detect_leaks=1:detect_odr_violation=0:'
             'detect_stack_use_after_return=1:fast_unwind_on_fatal=0:'
             'handle_abort=2:handle_segv=2:handle_sigbus=2:handle_sigfpe=2:'
             'handle_sigill=2:max_uar_stack_size_log=16:'
             'quarantine_size_mb=64:strict_memcmp=1:symbolize=1:'
             'symbolize_inline_frames=0'),
            'UBSAN_OPTIONS':
            ('allocator_release_to_os_interval_ms=500:handle_abort=2:'
             'handle_segv=2:handle_sigbus=2:handle_sigfpe=2:'
             'handle_sigill=2:print_stacktrace=1:'
             'symbolize=1:symbolize_inline_frames=0'),
            'LLVM_PROFILE_FILE':
            ('/work/measurement-folders/'
             'benchmark-a-fuzzer-a/trial-12/coverage/data-%m.profraw'),
            'WORK':
            '/work',
            'EXPERIMENT_FILESTORE':
            'gs://bucket',
            'EXPERIMENT':
            'experiment',
        },
        'expect_zero': False,
    }
    args = args[1]
    for arg, value in expected.items():
        assert args[arg] == value
Ejemplo n.º 23
0
def run_local_instance(startup_script: str = None) -> bool:
    """Does the equivalent of "create_instance" for local experiments, runs
    |startup_script| in the background."""
    command = ['/bin/bash', startup_script]
    subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
    return new_process.ProcessResult(0, '', False)