コード例 #1
0
def _setup_x():
    """Start Xvfb and blackbox before running the test application."""
    if environment.platform() != 'LINUX':
        return []

    if environment.is_engine_fuzzer_job():
        # For engine fuzzer jobs like AFL, libFuzzer, Xvfb is not needed as the
        # those fuzz targets do not needed a UI.
        return []

    environment.set_value('DISPLAY', DISPLAY)

    print('Creating virtual display...')
    xvfb_runner = new_process.ProcessRunner('/usr/bin/Xvfb')
    xvfb_process = xvfb_runner.run(additional_args=[
        DISPLAY, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp'
    ])
    time.sleep(PROCESS_START_WAIT_SECONDS)

    blackbox_runner = new_process.ProcessRunner('/usr/bin/blackbox')
    blackbox_process = blackbox_runner.run()
    time.sleep(PROCESS_START_WAIT_SECONDS)

    # Return all handles we create so they can be terminated properly at exit.
    return [xvfb_process, blackbox_process]
コード例 #2
0
def run_and_wait(request, _):
  """Implementation of RunAndWait."""
  process_runner = new_process.ProcessRunner(request.executable_path,
                                             request.default_args)
  args = {}
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
  protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')

  if request.popen_args.env_is_set:
    args['env'] = request.popen_args.env
  else:
    args['env'] = None

  args['additional_args'] = request.additional_args
  protobuf_utils.get_protobuf_field(args, request, 'timeout')
  protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
  protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
  protobuf_utils.get_protobuf_field(args, request, 'input_data')
  protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')

  logs.log('Running command: %s' % process_runner.get_command())

  return untrusted_runner_pb2.RunAndWaitResponse(
      result=process_result_to_proto(process_runner.run_and_wait(**args)))
コード例 #3
0
def run(input_directory,
        output_directory,
        model_path,
        generation_timeout,
        generation_count=None,
        hidden_state_size=None,
        hidden_layer_size=None):
    """Generate inputs with specified model parameters.

  Args:
    input_directory: Corpus directory. Required argument for generation script.
    output_directory: New inputs directory. Required argument for generation
        script.
    model_path: Model path. Required argument for generation script.
    generation_timeout: Timeout for running generation process.
    generation_count: Number of inputs to generate. Required argument for
        generation script.
    hidden_state_size: Hidden state size of LSTM cell.
    hidden_layer_size: Hidden layer size of LSTM model.

  Returns:
    Result of running generation process. Format is defined by
    ProcessRunner.run_and_wait().
  """
    # Get generation script path.
    script_path = os.path.join(ML_RNN_SCRIPT_DIR,
                               constants.GENERATION_SCRIPT_NAME)

    # Wrap command arguments.
    args_list = [
        script_path,
        constants.INPUT_DIR_ARGUMENT_PREFIX + input_directory,
        constants.OUTPUT_DIR_ARGUMENT_PREFIX + output_directory,
        constants.MODEL_PATH_ARGUMENT_PREFIX + model_path,
    ]

    if generation_count:
        args_list.append(constants.GENERATION_COUNT_ARGUMENT_PREFIX +
                         str(generation_count))
    else:
        args_list.append(constants.GENERATION_COUNT_ARGUMENT_PREFIX +
                         str(GENERATION_MAX_COUNT))

    # Optional arguments.
    if hidden_state_size:
        args_list.append(constants.HIDDEN_STATE_ARGUMENT_PREFIX +
                         str(hidden_state_size))
    if hidden_layer_size:
        args_list.append(constants.HIDDEN_LAYER_ARGUMENT_PREFIX +
                         str(hidden_layer_size))

    script_environment = os.environ.copy()

    # Run process in script directory.
    rnn_runner = new_process.ProcessRunner(sys.executable)
    return rnn_runner.run_and_wait(args_list,
                                   cwd=ML_RNN_SCRIPT_DIR,
                                   env=script_environment,
                                   timeout=generation_timeout)
コード例 #4
0
 def test_additional_args(self):
     """Tests basic command line with default arguments and additional
 arguments."""
     runner = new_process.ProcessRunner('/test/path',
                                        default_args=['-arg1', '-arg2'])
     self.assertEqual(
         runner.get_command(additional_args=['-arg3', '-arg4']),
         ['/test/path', '-arg1', '-arg2', '-arg3', '-arg4'])
コード例 #5
0
 def test_over_limit(self):
     """Test stdout over limit."""
     runner = new_process.ProcessRunner('python')
     result = runner.run_and_wait(
         ['-c', 'print("A" + "B"*499 + "C"*499 + "D")'], max_stdout_len=64)
     self.assertEqual(
         b'A' + b'B' * 31 + b'\n...truncated 937 bytes...\n' + b'C' * 30 +
         b'D' + b'\n', result.output)
コード例 #6
0
    def test_no_timeout(self):
        """Tests process exiting before timeout."""
        with mock.patch('subprocess.Popen',
                        mock_popen_factory(0.5, '', 0.0)) as mock_popen:
            runner = new_process.ProcessRunner('/test/path')
            runner.run_and_wait(timeout=5.0)

            # No signals should be sent.
            self.assertEqual(len(mock_popen.received_signals), 0)
コード例 #7
0
def train_rnn(input_directory,
              model_directory,
              log_directory,
              batch_size=None,
              hidden_state_size=None,
              hidden_layer_size=None):
    """Train ML RNN model.

  Args:
    input_directory: Corpus directory. Required argument for training script.
    model_directory: The directory to save models. Required argument for
        training script.
    log_directory: The directory to keep logs. Required argument for training
        script.
    batch_size: Batch size in each loop.
    hidden_state_size: Hidden state size of LSTM cell.
    hidden_layer_size: Hidden layer size of LSTM model.

  Returns:
    Training result. An object of class `new_process.ProcessResult`.
  """
    # Get the script path to run the model.
    script_path = get_model_script_path()

    # Wrap command and arguments to run training script.
    args_list = [
        script_path,
        constants.INPUT_DIR_ARGUMENT_PREFIX + input_directory,
        constants.MODEL_DIR_ARGUMENT_PREFIX + model_directory,
        constants.LOG_DIR_ARGUMENT_PREFIX + log_directory,
    ]

    # Optional argument.
    if batch_size:
        args_list.append(constants.BATCH_SIZE_ARGUMENT_PREFIX +
                         str(batch_size))
    if hidden_state_size:
        args_list.append(constants.HIDDEN_STATE_ARGUMENT_PREFIX +
                         str(hidden_state_size))
    if hidden_layer_size:
        args_list.append(constants.HIDDEN_LAYER_ARGUMENT_PREFIX +
                         str(hidden_layer_size))

    script_environment = os.environ.copy()

    logs.log('Launching the training with the following arguments: "%s".' %
             str(args_list))

    # Run process in rnn directory.
    rnn_trainer = new_process.ProcessRunner(sys.executable)

    return rnn_trainer.run_and_wait(args_list,
                                    cwd=ML_RNN_SCRIPT_DIR,
                                    env=script_environment,
                                    timeout=TRAINING_TIMEOUT)
コード例 #8
0
    def test_timeout(self):
        """Tests timeout signals."""
        with mock.patch('subprocess.Popen',
                        mock_popen_factory(1.0, '', 0.0)) as mock_popen:
            runner = new_process.ProcessRunner('/test/path')
            runner.run_and_wait(timeout=0.5)

            # Single signal (SIGKILL) should arrive in 0.5 seconds.
            self.assertEqual(len(mock_popen.received_signals), 1)
            self.assertLess(abs(mock_popen.received_signals[0][1] - 0.5),
                            self.TIME_ERROR)
            self.assertEqual(mock_popen.received_signals[0][0], Signal.KILL)
コード例 #9
0
 def test_results_timeout(self):
     """Test process execution results with timeout."""
     with mock.patch('subprocess.Popen',
                     mock_popen_factory(1.0, 'output', 0.0, 0)):
         runner = new_process.ProcessRunner('/test/path',
                                            default_args=['-arg1', '-arg2'])
         results = runner.run_and_wait(timeout=0.5)
         self.assertEqual(results.command, ['/test/path', '-arg1', '-arg2'])
         self.assertEqual(results.return_code, None)
         self.assertEqual(results.output, 'output')
         self.assertLess(abs(results.time_executed - 0.5), self.TIME_ERROR)
         self.assertTrue(results.timed_out)
コード例 #10
0
    def test_terminate_before_kill_timeout(self):
        """Tests process kill handler called on timeout."""
        with mock.patch('subprocess.Popen',
                        mock_popen_factory(1.0, '', 1.0)) as mock_popen:
            runner = new_process.ProcessRunner('/test/path')
            runner.run_and_wait(timeout=0.5,
                                terminate_before_kill=True,
                                terminate_wait_time=0.5)

            # Single signal (SIGKILL) should arrive in 0.5 seconds.
            self.assertEqual(len(mock_popen.received_signals), 1)
            self.assertLess(abs(mock_popen.received_signals[0][1] - 0.5),
                            self.TIME_ERROR)
            self.assertEqual(mock_popen.received_signals[0][0], Signal.KILL)
コード例 #11
0
    def test_terminate_before_kill_no_sigterm_timeout(self):
        """Tests process sigterm handler completing before terminate_wait_time."""
        with mock.patch('subprocess.Popen',
                        mock_popen_factory(1.0, '', 0.5)) as mock_popen:
            runner = new_process.ProcessRunner('/test/path')
            runner.run_and_wait(timeout=0.5,
                                terminate_before_kill=True,
                                terminate_wait_time=1.0)

            # Single signal (SIGTERM) in 0.5 seconds.
            self.assertEqual(len(mock_popen.received_signals), 1)
            self.assertLess(abs(mock_popen.received_signals[0][1] - 0.5),
                            self.TIME_ERROR)
            self.assertEqual(mock_popen.received_signals[0][0], Signal.TERM)
コード例 #12
0
def generate_new_testcase_mutations_using_radamsa(
        corpus_directory, new_testcase_mutations_directory,
        generation_timeout):
    """Generate new testcase mutations based on Radamsa."""
    radamsa_path = get_radamsa_path()
    if not radamsa_path:
        # Mutations using radamsa are not supported on current platform, bail out.
        return

    radamsa_runner = new_process.ProcessRunner(radamsa_path)
    files_list = shell.get_files_list(corpus_directory)
    filtered_files_list = [
        f for f in files_list if os.path.getsize(f) <= CORPUS_INPUT_SIZE_LIMIT
    ]
    if not filtered_files_list:
        # No mutations to do on an empty corpus or one with very large files.
        return

    old_corpus_size = shell.get_directory_file_count(
        new_testcase_mutations_directory)
    expected_completion_time = time.time() + generation_timeout

    for i in range(RADAMSA_MUTATIONS):
        original_file_path = random_choice(filtered_files_list)
        original_filename = os.path.basename(original_file_path)
        output_path = os.path.join(
            new_testcase_mutations_directory,
            get_radamsa_output_filename(original_filename, i))

        result = radamsa_runner.run_and_wait(
            ['-o', output_path, original_file_path], timeout=RADAMSA_TIMEOUT)

        if (os.path.exists(output_path)
                and os.path.getsize(output_path) > CORPUS_INPUT_SIZE_LIMIT):
            # Skip large files to avoid further mutations and impact fuzzing
            # efficiency.
            shell.remove_file(output_path)
        elif result.return_code or result.timed_out:
            logs.log_warn('Radamsa failed to mutate or timed out.',
                          output=result.output)

        # Check if we exceeded our timeout. If yes, do no more mutations and break.
        if time.time() > expected_completion_time:
            break

    new_corpus_size = shell.get_directory_file_count(
        new_testcase_mutations_directory)
    logs.log('Added %d tests using Radamsa mutations.' %
             (new_corpus_size - old_corpus_size))
コード例 #13
0
def start_emulator():
  """Return a ProcessRunner configured to start the Android emulator."""
  root_dir = environment.get_value('ROOT_DIR')

  runner = new_process.ProcessRunner(
      os.path.join(root_dir, EMULATOR_RELATIVE_PATH),
      ['-avd', 'TestImage', '-writable-system', '-partition-size', '2048'])
  emulator_process = runner.run()

  # If we run adb commands too soon after the emulator starts, we may see
  # flake or errors. Delay a short while to account for this.
  # TODO(mbarbella): This is slow and flaky, but wait-for-device isn't usable if
  # another device is connected (as we don't know the serial yet). Find a better
  # solution.
  time.sleep(30)

  return emulator_process
コード例 #14
0
ファイル: device.py プロジェクト: stjordanis/clusterfuzz
def add_keys_to_zbi(fuchsia_resources_dir, initrd_path, fuchsia_zbi):
  """Adds keys to the ZBI so we can SSH into it. See:
  fuchsia.googlesource.com/fuchsia/+/refs/heads/master/sdk/docs/ssh.md"""
  zbi_tool = os.path.join(fuchsia_resources_dir, 'build', 'out',
                          'default.zircon', 'tools', 'zbi')
  os.chmod(zbi_tool, 0o500)
  authorized_keys_path = os.path.join(fuchsia_resources_dir, '.ssh',
                                      'authorized_keys')
  process = new_process.ProcessRunner(zbi_tool, [
      '-o', initrd_path, fuchsia_zbi, '-e',
      'data/ssh/authorized_keys=' + authorized_keys_path
  ])
  result = process.run_and_wait()
  if result.return_code or result.timed_out:
    raise errors.FuchsiaSdkError('Failed to add keys to Fuchsia ZBI: ' +
                                 result.output)
  os.chmod(initrd_path, 0o644)
コード例 #15
0
def setup_asan_if_needed():
    """Set up asan on device."""
    if not environment.get_value('ASAN_DEVICE_SETUP'):
        # Only do this step if explicitly enabled in the job type. This cannot be
        # determined from libraries in application directory since they can go
        # missing in a bad build, so we want to catch that.
        return

    if settings.get_sanitizer_tool_name():
        # If this is a sanitizer build, no need to setup ASAN (incompatible).
        return

    app_directory = environment.get_value('APP_DIR')
    if not app_directory:
        # No app directory -> No ASAN runtime library. No work to do, bail out.
        return

    # Initialize variables.
    android_directory = environment.get_platform_resources_directory()
    device_id = environment.get_value('ANDROID_SERIAL')

    # Execute the script.
    logs.log('Executing ASan device setup script.')
    asan_device_setup_script_path = os.path.join(android_directory,
                                                 'third_party',
                                                 'asan_device_setup.sh')
    extra_options_arg = 'include_if_exists=' + get_options_file_path('asan')
    asan_device_setup_script_args = [
        '--lib', app_directory, '--device', device_id, '--extra-options',
        extra_options_arg
    ]

    process = new_process.ProcessRunner(asan_device_setup_script_path,
                                        asan_device_setup_script_args)
    result = process.run_and_wait()
    if result.return_code:
        logs.log_error('Failed to setup ASan on device.', output=result.output)
        return

    logs.log(
        'ASan device setup script successfully finished, waiting for boot.',
        output=result.output)

    # Wait until fully booted as otherwise shell restart followed by a quick
    # reboot can trigger data corruption in /data/data.
    adb.wait_until_fully_booted()
コード例 #16
0
ファイル: device.py プロジェクト: stjordanis/clusterfuzz
def extend_fvm(fuchsia_resources_dir, orig_drive_path, drive_path):
  """The FVM is minimally sized to begin with; make an extended copy
  of it to make room for ephemeral packages etc."""
  fvm_tool_path = os.path.join(fuchsia_resources_dir, 'build', 'out',
                               'default.zircon', 'tools', 'fvm')
  os.chmod(fvm_tool_path, 0o500)

  # Since the fvm tool modifies the image in place, make a copy so the build
  # isn't mutated (required for running undercoat on a cached build previously
  # affected by this legacy codepath)
  shutil.copy(orig_drive_path, drive_path)

  process = new_process.ProcessRunner(fvm_tool_path,
                                      [drive_path, 'extend', '--length', '3G'])
  result = process.run_and_wait()
  if result.return_code or result.timed_out:
    raise errors.FuchsiaSdkError('Failed to extend FVM: ' + result.output)

  os.chmod(drive_path, 0o644)
コード例 #17
0
def get_devices():
  """Get a list of all connected Android devices."""
  adb_runner = new_process.ProcessRunner(adb.get_adb_path())
  result = adb_runner.run_and_wait(additional_args=['devices'])

  if result.return_code:
    raise errors.ReproduceToolUnrecoverableError('Unable to run adb.')

  # Ignore non-device lines (those before "List of devices attached").
  store_devices = False
  devices = []
  for line in result.output.splitlines():
    if line == ADB_DEVICES_SEPARATOR_STRING:
      store_devices = True
      continue
    if not store_devices or not line:
      continue

    devices.append(line.split()[0])

  return devices
コード例 #18
0
ファイル: undercoat.py プロジェクト: google/clusterfuzz
def undercoat_api_command(*args):
  """Make an API call to the undercoat binary."""
  logs.log(f'Running undercoat command {args}')
  bundle_dir = environment.get_value('FUCHSIA_RESOURCES_DIR')
  undercoat_path = os.path.join(bundle_dir, 'undercoat', 'undercoat')
  undercoat = new_process.ProcessRunner(undercoat_path, args)
  # The undercoat log is sent to stderr, which we capture to a tempfile
  with tempfile.TemporaryFile() as undercoat_log:
    result = undercoat.run_and_wait(
        stderr=undercoat_log, extra_env={'TMPDIR': get_temp_dir()})
    result.output = utils.decode_to_unicode(result.output)

    if result.return_code != 0:
      # Dump the undercoat log to assist in debugging
      log_data = utils.read_from_handle_truncated(undercoat_log, 1024 * 1024)
      logs.log_warn('Log output from undercoat: ' +
                    utils.decode_to_unicode(log_data))

      # The API error message is returned on stdout
      raise UndercoatError(
          'Error running undercoat command %s: %s' % (args, result.output))

  return result
コード例 #19
0
ファイル: device.py プロジェクト: stjordanis/clusterfuzz
  def create(self):
    """Configures a QEMU process which can subsequently be `run`.

    Assumes that initial_qemu_setup was already called exactly once.
    """
    qemu_vars = _fetch_qemu_vars()

    # Get a free port for the VM, so we can SSH in later.
    tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    tcp.bind(('localhost', 0))
    _, port = tcp.getsockname()
    tcp.close()
    # Fuzzing jobs that SSH into the QEMU VM need access to this env var.
    environment.set_value('FUCHSIA_PORTNUM', port)
    environment.set_value('FUCHSIA_RESOURCES_DIR',
                          qemu_vars['fuchsia_resources_dir'])

    # yapf: disable
    qemu_args = [
        '-m', '3072',
        '-nographic',
        '-kernel', qemu_vars['kernel_path'],
        '-initrd', qemu_vars['initrd_path'],
        '-smp', '4',
        '-drive',
        ('file=' + qemu_vars['drive_path'] + ',format=raw,if=none,'
         'id=blobstore'),
        '-device', 'virtio-blk-pci,drive=blobstore',
        '-monitor', 'none',
        '-append', 'kernel.serial=legacy TERM=dumb',
        '-machine', 'q35',
        '-display', 'none',
        '-netdev',
        ('user,id=net0,net=192.168.3.0/24,dhcpstart=192.168.3.9,'
         'host=192.168.3.2,hostfwd=tcp::') + str(port) + '-:22',
        '-device', 'e1000,netdev=net0,mac=52:54:00:63:5e:7b',
        '-L', qemu_vars['sharefiles_path']
    ]
    # yapf: enable

    # Detecting KVM is tricky, so use an environment variable to determine
    # whether to turn it on or not.
    if environment.get_value('FUCHSIA_USE_KVM'):
      # In builds before fxrev.dev/375343, a bug prevents booting with newer
      # versions of KVM. On some of these older builds,
      # `kernel.x86.disable-spec-mitigations` also doesn't work as
      # expected, so we work around this by selecting a CPU type where the
      # speculation mitigation will not applied.
      if environment.get_value('APP_REVISION') < 20200414210423:
        qemu_args.extend(['-cpu', 'Opteron_G5,+invtsc'])
      else:
        qemu_args.extend(['-cpu', 'host,migratable=no,+invtsc'])
      qemu_args.append('-enable-kvm')
    else:
      # Can't use host CPU since we don't necessarily have KVM on the machine.
      # Emulate a Haswell CPU with a few feature toggles. This mirrors the most
      # common configuration for Fuchsia VMs when using in-tree tools.
      qemu_args.extend(['-cpu', 'Haswell,+smap,-check,-fsgsbase'])

    # Get the list of fuzzers for ClusterFuzz to choose from.
    host = Host.from_dir(
        os.path.join(qemu_vars['fuchsia_resources_dir'], 'build', 'out',
                     'default'))
    Device(host, 'localhost', str(port))
    Fuzzer.filter(host.fuzzers, '')

    # Fuzzing jobs that SSH into the QEMU VM need access to this env var.
    environment.set_value('FUCHSIA_PKEY_PATH', qemu_vars['pkey_path'])
    logs.log('Ready to run QEMU. Command: ' + qemu_vars['qemu_path'] + ' ' +
             ' '.join(shlex.quote(arg) for arg in qemu_args))
    self.process_runner = new_process.ProcessRunner(qemu_vars['qemu_path'],
                                                    qemu_args)
コード例 #20
0
ファイル: emulator.py プロジェクト: vanhauser-thc/clusterfuzz
 def create(self, work_dir):
     """Configures a emulator process which can subsequently be `run`."""
     self.process_runner = new_process.ProcessRunner(
         os.path.join(work_dir, '../emulator/run'))
コード例 #21
0
 def test_basic(self):
     """Tests basic command line."""
     runner = new_process.ProcessRunner('/test/path')
     self.assertEqual(runner.get_command(), ['/test/path'])
コード例 #22
0
 def test_under_limit(self):
     """Test stdout under limit."""
     runner = new_process.ProcessRunner('python')
     result = runner.run_and_wait(['-c', 'print("A"*62)'],
                                  max_stdout_len=64)
     self.assertEqual(b'A' * 62 + b'\n', result.output)