Esempio n. 1
0
def _setup_x():
    """Start Xvfb and blackbox before running the test application."""
    if environment.platform() != 'LINUX':
        return []

    if environment.is_engine_fuzzer_job():
        # For engine fuzzer jobs like AFL, libFuzzer, Xvfb is not needed as the
        # those fuzz targets do not needed a UI.
        return []

    environment.set_value('DISPLAY', DISPLAY)

    print('Starting Xvfb...')
    xvfb_runner = new_process.ProcessRunner('/usr/bin/Xvfb')
    xvfb_process = xvfb_runner.run(additional_args=[
        DISPLAY, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp'
    ])
    time.sleep(PROCESS_START_WAIT_SECONDS)

    print('Starting blackbox...')
    blackbox_runner = new_process.ProcessRunner('/usr/bin/blackbox')
    blackbox_process = blackbox_runner.run()
    time.sleep(PROCESS_START_WAIT_SECONDS)

    # Return all handles we create so they can be terminated properly at exit.
    return [xvfb_process, blackbox_process]
Esempio n. 2
0
def _setup_x():
    """Start Xvfb and blackbox before running the test application."""
    if environment.platform() != "LINUX":
        return []

    if environment.is_engine_fuzzer_job():
        # For engine fuzzer jobs like AFL, libFuzzer, Xvfb is not needed as the
        # those fuzz targets do not needed a UI.
        return []

    environment.set_value("DISPLAY", DISPLAY)

    print("Creating virtual display...")
    xvfb_runner = new_process.ProcessRunner("/usr/bin/Xvfb")
    xvfb_process = xvfb_runner.run(additional_args=[
        DISPLAY,
        "-screen",
        "0",
        "1280x1024x24",
        "-ac",
        "-nolisten",
        "tcp",
    ])
    time.sleep(PROCESS_START_WAIT_SECONDS)

    blackbox_runner = new_process.ProcessRunner("/usr/bin/blackbox")
    blackbox_process = blackbox_runner.run()
    time.sleep(PROCESS_START_WAIT_SECONDS)

    # Return all handles we create so they can be terminated properly at exit.
    return [xvfb_process, blackbox_process]
Esempio n. 3
0
    def create(self, work_dir):
        """Configures a emulator process which can subsequently be `run`."""
        # Download emulator image.
        if not environment.get_value('ANDROID_EMULATOR_BUCKET_PATH'):
            logs.log_error('ANDROID_EMULATOR_BUCKET_PATH is not set.')
            return
        archive_src_path = environment.get_value(
            'ANDROID_EMULATOR_BUCKET_PATH')
        archive_dst_path = os.path.join(work_dir, 'emulator_bundle.zip')
        storage.copy_file_from(archive_src_path, archive_dst_path)

        # Extract emulator image.
        self.emulator_path = os.path.join(work_dir, 'emulator')
        shell.remove_directory(self.emulator_path)
        archive.unpack(archive_dst_path, self.emulator_path)
        shell.remove_file(archive_dst_path)

        # Stop any stale emulator instances.
        stop_script_path = os.path.join(self.emulator_path, 'stop')
        stop_proc = new_process.ProcessRunner(stop_script_path)
        stop_proc.run_and_wait()

        # Run emulator.
        run_script_path = os.path.join(self.emulator_path, 'run')
        self.process_runner = new_process.ProcessRunner(run_script_path)
Esempio n. 4
0
def run(input_directory,
        output_directory,
        model_path,
        generation_timeout,
        generation_count=None,
        hidden_state_size=None,
        hidden_layer_size=None):
  """Generate inputs with specified model paramters.

  Args:
    input_directory: Corpus directory. Required argument for generation script.
    output_directory: New inputs directory. Required argument for generation
        script.
    model_path: Model path. Required argument for generation script.
    generation_timeout: Timeout for running generation process.
    generation_count: Number of inputs to generate. Required argument for
        generation script.
    hidden_state_size: Hidden state size of LSTM cell.
    hidden_layer_size: Hidden layer size of LSTM model.

  Returns:
    Result of running generation process. Format is defined by
    ProcessRunner.run_and_wait().
  """
  # Get generation script path.
  script_path = os.path.join(ML_RNN_SCRIPT_DIR,
                             constants.GENERATION_SCRIPT_NAME)

  # Wrap commmand arguments.
  args_list = [
      script_path,
      constants.INPUT_DIR_ARGUMENT_PREFIX + input_directory,
      constants.OUTPUT_DIR_ARGUMENT_PREFIX + output_directory,
      constants.MODEL_PATH_ARGUMENT_PREFIX + model_path,
  ]

  if generation_count:
    args_list.append(constants.GENERATION_COUNT_ARGUMENT_PREFIX +
                     str(generation_count))
  else:
    args_list.append(constants.GENERATION_COUNT_ARGUMENT_PREFIX +
                     str(GENERATION_MAX_COUNT))

  # Optional arguments.
  if hidden_state_size:
    args_list.append(constants.HIDDEN_STATE_ARGUMENT_PREFIX +
                     str(hidden_state_size))
  if hidden_layer_size:
    args_list.append(constants.HIDDEN_LAYER_ARGUMENT_PREFIX +
                     str(hidden_layer_size))

  script_environment = os.environ.copy()

  # Run process in script directory.
  rnn_runner = new_process.ProcessRunner('python')
  return rnn_runner.run_and_wait(
      args_list,
      cwd=ML_RNN_SCRIPT_DIR,
      env=script_environment,
      timeout=generation_timeout)
def gen_inputs_labels(corpus_directory, fuzzer_binary_path):
  """
  Generates inputs and labels from raw input corpus.

  Args:
    corpus_directory (str): Path to raw inputs.
    fuzzer_binary_path (str): Path to compiled fuzz target binary.

  Returns:
    (new_process.ProcessResult): Result of `run_and_wait()`.
    (str): Dataset name (results stored under
        GRADIENTFUZZ_SCRIPTS_DIR/data/[dataset_name]).
  """
  script_path = get_script_path(run_constants.GENERATE_DATA_SCRIPT)
  dataset_name = os.path.basename(corpus_directory)
  args_list = [
      script_path,
      run_constants.FUZZ_TARGET_BINARY_FLAG,
      fuzzer_binary_path,
      run_constants.INPUT_DIR_FLAG,
      corpus_directory,
      run_constants.DATASET_NAME_FLAG,
      dataset_name,
      run_constants.MEDIAN_MULT_FLAG,
      run_constants.DEFAULT_MEDIAN_MULT_CUTOFF,
  ]

  logs.log(f'Launching input gen with args: "{args_list}".')

  # Run process in GradientFuzz directory.
  data_gen_proc = new_process.ProcessRunner(sys.executable)
  return data_gen_proc.run_and_wait(
      additional_args=args_list,
      cwd=GRADIENTFUZZ_SCRIPTS_DIR,
      timeout=run_constants.DATA_GEN_TIMEOUT), dataset_name
Esempio n. 6
0
def run_and_wait(request, _):
    """Implementation of RunAndWait."""
    process_runner = new_process.ProcessRunner(request.executable_path,
                                               request.default_args)
    args = {}
    protobuf_utils.get_protobuf_field(args, request.popen_args, 'bufsize')
    protobuf_utils.get_protobuf_field(args, request.popen_args, 'executable')
    protobuf_utils.get_protobuf_field(args, request.popen_args, 'shell')
    protobuf_utils.get_protobuf_field(args, request.popen_args, 'cwd')

    if request.popen_args.env_is_set:
        args['env'] = request.popen_args.env
    else:
        args['env'] = None

    args['additional_args'] = request.additional_args
    protobuf_utils.get_protobuf_field(args, request, 'timeout')
    protobuf_utils.get_protobuf_field(args, request, 'terminate_before_kill')
    protobuf_utils.get_protobuf_field(args, request, 'terminate_wait_time')
    protobuf_utils.get_protobuf_field(args, request, 'input_data')
    protobuf_utils.get_protobuf_field(args, request, 'max_stdout_len')

    logs.log('Running command: %s' % process_runner.get_command())

    return untrusted_runner_pb2.RunAndWaitResponse(
        result=process_result_to_proto(process_runner.run_and_wait(**args)))
Esempio n. 7
0
def _get_runner():
  """Get the honggfuzz runner."""
  honggfuzz_path = os.path.join(environment.get_value("BUILD_DIR"), "honggfuzz")
  if not os.path.exists(honggfuzz_path):
    raise HonggfuzzError("honggfuzz not found in build")

  os.chmod(honggfuzz_path, 0o755)
  return new_process.ProcessRunner(honggfuzz_path)
Esempio n. 8
0
 def test_over_limit(self):
   """Test stdout over limit."""
   runner = new_process.ProcessRunner('python')
   result = runner.run_and_wait(
       ['-c', 'print "A" + "B"*499 + "C"*499 + "D"'], max_stdout_len=64)
   self.assertEqual(
       'A' + 'B' * 31 + '\n...truncated 937 bytes...\n' + 'C' * 30 + 'D' +
       '\n', result.output)
Esempio n. 9
0
def _get_runner():
    """Get the honggfuzz runner."""
    honggfuzz_path = os.path.join(environment.get_value('BUILD_DIR'),
                                  'honggfuzz')
    if not os.path.exists(honggfuzz_path):
        raise HonggfuzzError('honggfuzz not found in build')

    return new_process.ProcessRunner(honggfuzz_path)
Esempio n. 10
0
 def test_additional_args(self):
   """Tests basic command line with default arguments and additional
   arguments."""
   runner = new_process.ProcessRunner(
       '/test/path', default_args=['-arg1', '-arg2'])
   self.assertEqual(
       runner.get_command(additional_args=['-arg3', '-arg4']),
       ['/test/path', '-arg1', '-arg2', '-arg3', '-arg4'])
Esempio n. 11
0
  def test_no_timeout(self):
    """Tests process exiting before timeout."""
    with mock.patch('subprocess.Popen', mock_popen_factory(0.5, '',
                                                           0.0)) as mock_popen:
      runner = new_process.ProcessRunner('/test/path')
      runner.run_and_wait(timeout=5.0)

      # No signals should be sent.
      self.assertEqual(len(mock_popen.received_signals), 0)
Esempio n. 12
0
 def _test_qemu_ssh(self):
   """Tests that a VM is up and can be successfully SSH'd into.
   Raises an exception if no success after MAX_SSH_RETRIES."""
   ssh_test_process = new_process.ProcessRunner('ssh', self.ssh_args + ['ls'])
   result = ssh_test_process.run_and_wait()
   if result.return_code or result.timed_out:
     raise fuchsia.errors.FuchsiaConnectionError(
         'Failed to establish initial SSH connection: ' +
         str(result.return_code))
Esempio n. 13
0
def extend_fvm(fuchsia_resources_dir, drive_path):
    """The FVM is minimally sized to begin with; extend it to make room for
  ephemeral packages etc."""
    fvm_tool_path = os.path.join(fuchsia_resources_dir, 'tools', 'fvm')
    os.chmod(fvm_tool_path, 0o500)
    process = new_process.ProcessRunner(
        fvm_tool_path, [drive_path, 'extend', '--length', '1G'])
    result = process.run_and_wait()
    if result.return_code or result.timed_out:
        raise errors.FuchsiaSdkError('Failed to extend FVM: ' + result.output)
Esempio n. 14
0
def train_rnn(input_directory,
              model_directory,
              log_directory,
              batch_size=None,
              hidden_state_size=None,
              hidden_layer_size=None):
    """Train ML RNN model.

  Args:
    input_directory: Corpus directory. Required argument for training script.
    model_directory: The directory to save models. Required argument for
        training script.
    log_directory: The directory to keep logs. Required argument for training
        script.
    batch_size: Batch size in each loop.
    hidden_state_size: Hidden state size of LSTM cell.
    hidden_layer_size: Hidden layer size of LSTM model.

  Returns:
    Training result. An object of class `new_process.ProcessResult`.
  """
    # Get the script path to run the model.
    script_path = get_model_script_path()

    # Wrap command and arguments to run training script.
    args_list = [
        script_path,
        constants.INPUT_DIR_ARGUMENT_PREFIX + input_directory,
        constants.MODEL_DIR_ARGUMENT_PREFIX + model_directory,
        constants.LOG_DIR_ARGUMENT_PREFIX + log_directory,
    ]

    # Optional argument.
    if batch_size:
        args_list.append(constants.BATCH_SIZE_ARGUMENT_PREFIX +
                         str(batch_size))
    if hidden_state_size:
        args_list.append(constants.HIDDEN_STATE_ARGUMENT_PREFIX +
                         str(hidden_state_size))
    if hidden_layer_size:
        args_list.append(constants.HIDDEN_LAYER_ARGUMENT_PREFIX +
                         str(hidden_layer_size))

    script_environment = os.environ.copy()

    logs.log('Launching the training with the following arguments: "%s".' %
             str(args_list))

    # Run process in rnn directory.
    rnn_trainer = new_process.ProcessRunner(sys.executable)

    return rnn_trainer.run_and_wait(args_list,
                                    cwd=ML_RNN_SCRIPT_DIR,
                                    env=script_environment,
                                    timeout=TRAINING_TIMEOUT)
Esempio n. 15
0
def extend_fvm(fuchsia_resources_dir, drive_path):
    """The FVM is minimally sized to begin with; extend it to make room for
    ephemeral packages etc."""
    fvm_tool_path = os.path.join(fuchsia_resources_dir, "build", "out",
                                 "default.zircon", "tools", "fvm")
    os.chmod(fvm_tool_path, 0o500)
    process = new_process.ProcessRunner(
        fvm_tool_path, [drive_path, "extend", "--length", "3G"])
    result = process.run_and_wait()
    if result.return_code or result.timed_out:
        raise errors.FuchsiaSdkError("Failed to extend FVM: " + result.output)
Esempio n. 16
0
  def test_timeout(self):
    """Tests timeout signals."""
    with mock.patch('subprocess.Popen', mock_popen_factory(1.0, '',
                                                           0.0)) as mock_popen:
      runner = new_process.ProcessRunner('/test/path')
      runner.run_and_wait(timeout=0.5)

      # Single signal (SIGKILL) should arrive in 0.5 seconds.
      self.assertEqual(len(mock_popen.received_signals), 1)
      self.assertLess(
          abs(mock_popen.received_signals[0][1] - 0.5), self.TIME_ERROR)
      self.assertEqual(mock_popen.received_signals[0][0], Signal.KILL)
Esempio n. 17
0
def _setup_x():
    """Start Xvfb and blackbox before running the test application."""
    if environment.platform() != 'LINUX':
        return []

    environment.set_value('DISPLAY', DISPLAY)

    print('Starting Xvfb...')
    xvfb_runner = new_process.ProcessRunner('/usr/bin/Xvfb')
    xvfb_process = xvfb_runner.run(additional_args=[
        DISPLAY, '-screen', '0', '1280x1024x24', '-ac', '-nolisten', 'tcp'
    ])
    time.sleep(5)  # Allow some time for Xvfb to start.

    print('Starting blackbox...')
    blackbox_runner = new_process.ProcessRunner('/usr/bin/blackbox')
    blackbox_process = blackbox_runner.run()
    time.sleep(5)  # Allow some time for blackbox to start.

    # Return all handles we create so they can be terminated properly at exit.
    return [xvfb_process, blackbox_process]
Esempio n. 18
0
 def test_results_timeout(self):
   """Test process execution results with timeout."""
   with mock.patch('subprocess.Popen', mock_popen_factory(
       1.0, 'output', 0.0, 0)):
     runner = new_process.ProcessRunner(
         '/test/path', default_args=['-arg1', '-arg2'])
     results = runner.run_and_wait(timeout=0.5)
     self.assertEqual(results.command, ['/test/path', '-arg1', '-arg2'])
     self.assertEqual(results.return_code, None)
     self.assertEqual(results.output, 'output')
     self.assertLess(abs(results.time_executed - 0.5), self.TIME_ERROR)
     self.assertTrue(results.timed_out)
Esempio n. 19
0
 def _test_qemu_ssh(self):
     """Tests that a VM is up and can be successfully SSH'd into.
 Raises an exception if no success after MAX_SSH_RETRIES."""
     print('Attempting SSH. Command: ssh ' + str(self.ssh_args))
     ssh_test_process = new_process.ProcessRunner(
         'ssh', self.ssh_args + ['echo running on fuchsia!'])
     result = ssh_test_process.run_and_wait()
     if result.return_code or result.timed_out:
         raise fuchsia.errors.FuchsiaConnectionError(
             'Failed to establish initial SSH connection: ' +
             str(result.return_code) + " , " + str(result.command) + " , " +
             str(result.output))
     return result
Esempio n. 20
0
  def test_terminate_before_kill_timeout(self):
    """Tests process kill handler called on timeout."""
    with mock.patch('subprocess.Popen', mock_popen_factory(1.0, '',
                                                           1.0)) as mock_popen:
      runner = new_process.ProcessRunner('/test/path')
      runner.run_and_wait(
          timeout=0.5, terminate_before_kill=True, terminate_wait_time=0.5)

      # Single signal (SIGKILL) should arrive in 0.5 seconds.
      self.assertEqual(len(mock_popen.received_signals), 1)
      self.assertLess(
          abs(mock_popen.received_signals[0][1] - 0.5), self.TIME_ERROR)
      self.assertEqual(mock_popen.received_signals[0][0], Signal.KILL)
Esempio n. 21
0
  def test_terminate_before_kill_no_sigterm_timeout(self):
    """Tests process sigterm handler completing before terminate_wait_time."""
    with mock.patch('subprocess.Popen', mock_popen_factory(1.0, '',
                                                           0.5)) as mock_popen:
      runner = new_process.ProcessRunner('/test/path')
      runner.run_and_wait(
          timeout=0.5, terminate_before_kill=True, terminate_wait_time=1.0)

      # Single signal (SIGTERM) in 0.5 seconds.
      self.assertEqual(len(mock_popen.received_signals), 1)
      self.assertLess(
          abs(mock_popen.received_signals[0][1] - 0.5), self.TIME_ERROR)
      self.assertEqual(mock_popen.received_signals[0][0], Signal.TERM)
Esempio n. 22
0
def setup_asan_if_needed():
    """Set up asan on device."""
    if not environment.get_value("ASAN_DEVICE_SETUP"):
        # Only do this step if explicitly enabled in the job type. This cannot be
        # determined from libraries in application directory since they can go
        # missing in a bad build, so we want to catch that.
        return

    if settings.get_sanitizer_tool_name():
        # If this is a sanitizer build, no need to setup ASAN (incompatible).
        return

    app_directory = environment.get_value("APP_DIR")
    if not app_directory:
        # No app directory -> No ASAN runtime library. No work to do, bail out.
        return

    # Initialize variables.
    android_directory = environment.get_platform_resources_directory()
    device_id = environment.get_value("ANDROID_SERIAL")

    # Execute the script.
    logs.log("Executing ASan device setup script.")
    asan_device_setup_script_path = os.path.join(android_directory,
                                                 "third_party",
                                                 "asan_device_setup.sh")
    extra_options_arg = "include_if_exists=" + get_options_file_path("asan")
    asan_device_setup_script_args = [
        "--lib",
        app_directory,
        "--device",
        device_id,
        "--extra-options",
        extra_options_arg,
    ]

    process = new_process.ProcessRunner(asan_device_setup_script_path,
                                        asan_device_setup_script_args)
    result = process.run_and_wait()
    if result.return_code:
        logs.log_error("Failed to setup ASan on device.", output=result.output)
        return

    logs.log(
        "ASan device setup script successfully finished, waiting for boot.",
        output=result.output,
    )

    # Wait until fully booted as otherwise shell restart followed by a quick
    # reboot can trigger data corruption in /data/data.
    adb.wait_until_fully_booted()
Esempio n. 23
0
 def _test_qemu_ssh(self):
     """Tests that a VM is up and can be successfully SSH'd into.
 Raises an exception if no success after MAX_SSH_RETRIES."""
     ssh_test_process = new_process.ProcessRunner(
         'ssh',
         self.device.get_ssh_cmd(
             ['ssh', 'localhost', 'echo running on fuchsia!'])[1:])
     result = ssh_test_process.run_and_wait()
     if result.return_code or result.timed_out:
         raise fuchsia.errors.FuchsiaConnectionError(
             'Failed to establish initial SSH connection: ' +
             str(result.return_code) + " , " + str(result.command) + " , " +
             str(result.output))
     return result
Esempio n. 24
0
def generate_new_testcase_mutations_using_radamsa(
        corpus_directory, new_testcase_mutations_directory,
        generation_timeout):
    """Generate new testcase mutations based on Radamsa."""
    radamsa_path = get_radamsa_path()
    if not radamsa_path:
        # Mutations using radamsa are not supported on current platform, bail out.
        return

    radamsa_runner = new_process.ProcessRunner(radamsa_path)
    files_list = shell.get_files_list(corpus_directory)
    filtered_files_list = [
        f for f in files_list if os.path.getsize(f) <= CORPUS_INPUT_SIZE_LIMIT
    ]
    if not filtered_files_list:
        # No mutations to do on an empty corpus or one with very large files.
        return

    old_corpus_size = shell.get_directory_file_count(
        new_testcase_mutations_directory)
    expected_completion_time = time.time() + generation_timeout

    for i in range(RADAMSA_MUTATIONS):
        original_file_path = random_choice(filtered_files_list)
        original_filename = os.path.basename(original_file_path)
        output_path = os.path.join(
            new_testcase_mutations_directory,
            get_radamsa_output_filename(original_filename, i))

        result = radamsa_runner.run_and_wait(
            ['-o', output_path, original_file_path], timeout=RADAMSA_TIMEOUT)

        if (os.path.exists(output_path)
                and os.path.getsize(output_path) > CORPUS_INPUT_SIZE_LIMIT):
            # Skip large files to avoid furthur mutations and impact fuzzing
            # efficiency.
            shell.remove_file(output_path)
        elif result.return_code or result.timed_out:
            logs.log_warn('Radamsa failed to mutate or timed out.',
                          output=result.output)

        # Check if we exceeded our timeout. If yes, do no more mutations and break.
        if time.time() > expected_completion_time:
            break

    new_corpus_size = shell.get_directory_file_count(
        new_testcase_mutations_directory)
    logs.log('Added %d tests using Radamsa mutations.' %
             (new_corpus_size - old_corpus_size))
Esempio n. 25
0
def add_keys_to_zbi(fuchsia_resources_dir, initrd_path, fuchsia_zbi):
    """Adds keys to the ZBI so we can SSH into it. See:
  fuchsia.googlesource.com/fuchsia/+/refs/heads/master/sdk/docs/ssh.md"""
    zbi_tool = os.path.join(fuchsia_resources_dir, 'tools', 'zbi')
    os.chmod(zbi_tool, 0o500)
    authorized_keys_path = os.path.join(fuchsia_resources_dir, '.ssh',
                                        'authorized_keys')
    process = new_process.ProcessRunner(zbi_tool, [
        '-o', initrd_path, fuchsia_zbi, '-e',
        'data/ssh/authorized_keys=' + authorized_keys_path
    ])
    result = process.run_and_wait()
    if result.return_code or result.timed_out:
        raise errors.FuchsiaSdkError('Failed to add keys to Fuchsia ZBI: ' +
                                     result.output)
    os.chmod(initrd_path, 0o644)
Esempio n. 26
0
def start_emulator():
    """Return a ProcessRunner configured to start the Android emulator."""
    root_dir = environment.get_value('ROOT_DIR')

    runner = new_process.ProcessRunner(
        os.path.join(root_dir, EMULATOR_RELATIVE_PATH),
        ['-avd', 'TestImage', '-writable-system', '-partition-size', '2048'])
    emulator_process = runner.run()

    # If we run adb commands too soon after the emulator starts, we may see
    # flake or errors. Delay a short while to account for this.
    # TODO(mbarbella): This is slow and flaky, but wait-for-device isn't usable if
    # another device is connected (as we don't know the serial yet). Find a better
    # solution.
    time.sleep(30)

    return emulator_process
Esempio n. 27
0
    def reproduce(self, target_path, input_path, arguments, max_time):
        """Reproduce a crash given an input.

    Args:
      target_path: Path to the target.
      input_path: Path to the reproducer input.
      arguments: Additional arguments needed for reproduction.
      max_time: Maximum allowed time for the reproduction.

    Returns:
      A ReproduceResult.
    """
        runner = new_process.ProcessRunner(target_path)
        with open(input_path) as f:
            result = runner.run_and_wait(timeout=max_time, stdin=f)

        return engine.ReproduceResult(result.command, result.return_code,
                                      result.time_executed, result.output)
def train_gradientfuzz(fuzzer_name, dataset_name, num_inputs):
  """Train GradientFuzz model.

  Args:
    fuzzer_name (str): Prefix to --run-name flag.
    dataset_name (str): Inputs/labels stored under
        GRADIENTFUZZ_SCRIPTS_DIR/data/[dataset_name].
    num_inputs (int): Number of input files (for val split/batch size).

  Returns:
    (new_process.ProcessResult): Result of `run_and_wait()`.
    (str): Run name (results stored under
        GRADIENTFUZZ_SCRIPTS_DIR/models/[architecture]/[run_name]).
  """
  if num_inputs < run_constants.MIN_NUM_INPUTS:
    return new_process.ProcessResult(
        return_code=run_constants.ExitCode.CORPUS_TOO_SMALL), None

  batch_size = os.environ.get(
      'GRADIENTFUZZ_BATCH_SIZE', default=min(32, int(num_inputs * 0.4)))
  val_batch_size = os.environ.get(
      'GRADIENTFUZZ_VAL_BATCH_SIZE', default=min(32, int(num_inputs * 0.1)))
  num_epochs = os.environ.get(
      'GRADIENTFUZZ_NUM_EPOCHS', default=run_constants.NUM_EPOCHS)

  script_path = get_script_path(run_constants.TRAIN_MODEL_SCRIPT)
  run_name = fuzzer_name + run_constants.RUN_NAME_SUFFIX
  args_list = [
      script_path, run_constants.RUN_NAME_FLAG, run_name,
      run_constants.DATASET_NAME_FLAG, dataset_name, run_constants.EPOCHS_FLAG,
      str(num_epochs), run_constants.BATCH_SIZE_FLAG,
      str(batch_size), run_constants.VAL_BATCH_SIZE_FLAG,
      str(val_batch_size), run_constants.ARCHITECTURE_FLAG,
      constants.NEUZZ_ONE_HIDDEN_LAYER_MODEL
  ]

  logs.log('Launching training with the following arguments: "{args_list}".')

  # Run process in gradientfuzz directory.
  gradientfuzz_trainer = new_process.ProcessRunner(sys.executable)
  return gradientfuzz_trainer.run_and_wait(
      args_list,
      cwd=GRADIENTFUZZ_SCRIPTS_DIR,
      timeout=run_constants.TRAIN_TIMEOUT), run_name
Esempio n. 29
0
def extend_fvm(fuchsia_resources_dir, orig_drive_path, drive_path):
    """The FVM is minimally sized to begin with; make an extended copy
  of it to make room for ephemeral packages etc."""
    fvm_tool_path = os.path.join(fuchsia_resources_dir, 'build', 'out',
                                 'default.zircon', 'tools', 'fvm')
    os.chmod(fvm_tool_path, 0o500)

    # Since the fvm tool modifies the image in place, make a copy so the build
    # isn't mutated (required for running undercoat on a cached build previously
    # affected by this legacy codepath)
    shutil.copy(orig_drive_path, drive_path)

    process = new_process.ProcessRunner(
        fvm_tool_path, [drive_path, 'extend', '--length', '3G'])
    result = process.run_and_wait()
    if result.return_code or result.timed_out:
        raise errors.FuchsiaSdkError('Failed to extend FVM: ' + result.output)

    os.chmod(drive_path, 0o644)
Esempio n. 30
0
    def create(self):
        """Configures a emulator process which can subsequently be `run`."""
        # Download emulator image.
        if not environment.get_value('ANDROID_EMULATOR_BUCKET_PATH'):
            logs.log_error('ANDROID_EMULATOR_BUCKET_PATH is not set.')
            return
        temp_directory = environment.get_value('BOT_TMPDIR')
        archive_src_path = environment.get_value(
            'ANDROID_EMULATOR_BUCKET_PATH')
        archive_dst_path = os.path.join(temp_directory, 'emulator_bundle.zip')
        storage.copy_file_from(archive_src_path, archive_dst_path)

        # Extract emulator image.
        self.emulator_path = os.path.join(temp_directory, 'emulator')
        archive.unpack(archive_dst_path, self.emulator_path)
        shell.remove_file(archive_dst_path)

        # Run emulator.
        script_path = os.path.join(self.emulator_path, 'run')
        self.process_runner = new_process.ProcessRunner(script_path)