Пример #1
0
def warm_up():
  """Loads and warms up the tensor features model."""
  global DESCRIPTION_HANDLER, TENSOR_MODEL, TENSOR_CONFIG, WARMED_UP
  if WARMED_UP:
    return
  WARMED_UP = True

  # Use the default choices for the description handler and tensor model. The
  # Colab interface will not allow users to change these.

  DESCRIPTION_HANDLER = description_handler_factory.create_handler(
      DEFAULT_SETTINGS.description_handler_name)
  if (not DEFAULT_SETTINGS.tensor_model.config_path or
      not DEFAULT_SETTINGS.tensor_model.checkpoint_path):
    return
  try:
    TENSOR_CONFIG = tensor_features_model.load_config(
        DEFAULT_SETTINGS.tensor_model.config_path)
    TENSOR_MODEL = tensor_features_model.get_model(TENSOR_CONFIG)
    tensor_checkpoint = tensor_features_model.create_checkpoint(TENSOR_MODEL)
    tensor_checkpoint.restore(
        DEFAULT_SETTINGS.tensor_model.checkpoint_path).expect_partial()

    # Warm up. Running the model for the first time takes an extra ~10 seconds.
    value_search.operation_multipliers_from_tensor_model(
        all_benchmarks.find_benchmark_with_name('simple_cast'),
        TENSOR_MODEL, TENSOR_CONFIG, DEFAULT_SETTINGS)
  except Exception:  # pylint: disable=broad-except
    # No matter what goes wrong with loading the tensor features model, we
    # should fall back to value search without the model.
    print('Could not load the tensor features model. ' + CONTACT_MESSAGE)
    TENSOR_MODEL = None
    TENSOR_CONFIG = None
Пример #2
0
    def test_value_search_can_load_data(self, mock_stdout):
        benchmark = all_benchmarks.find_benchmark_with_name('simple_cast')
        handler = description_handler_factory.create_handler('naive_bayes')

        settings = settings_module.from_dict({
            'timeout':
            5,
            'printing.prioritized_operations':
            True,
            'printing.deprioritized_operations':
            True,
        })

        tensor_config = tensor_features_model.load_config(
            settings.tensor_model.config_path)
        tensor_model = tensor_features_model.get_model(tensor_config)
        tensor_checkpoint = tensor_features_model.create_checkpoint(
            tensor_model)
        tensor_checkpoint.restore(
            settings.tensor_model.checkpoint_path).expect_partial()

        results = value_search.run_value_search(benchmark=benchmark,
                                                description_handler=handler,
                                                settings=settings,
                                                tensor_model=tensor_model,
                                                tensor_config=tensor_config)

        self.assertLen(results.solutions, 1)
        self.assertIn('BOW handler prioritized tf.cast(x, dtype)',
                      mock_stdout.getvalue())
        self.assertIn('Tensor features model prioritized tf.cast(x, dtype)',
                      mock_stdout.getvalue())
def run_on_all_benchmarks():
  """Runs value search on all benchmarks, printing results to stdout."""

  benchmark_count = 0
  benchmark_success = 0
  unsolved_benchmarks = []
  solution_times = []  # Only including successful tasks.

  settings = settings_module.from_list(FLAGS.settings)

  description_handler = description_handler_factory.create_handler(
      settings.description_handler_name)
  print('Description handler: {!r}\n'.format(description_handler))

  results_json = {
      'benchmark_name': FLAGS.benchmark_name,
      'settings': settings.as_dict(),
      'notes': FLAGS.notes,
      'results': [],
  }

  if (settings.tensor_model.config_path and
      settings.tensor_model.checkpoint_path):
    tensor_config = tensor_features_model.load_config(
        settings.tensor_model.config_path)
    tensor_model = tensor_features_model.get_model(tensor_config)
    checkpoint = tensor_features_model.create_checkpoint(tensor_model)
    checkpoint.restore(settings.tensor_model.checkpoint_path).expect_partial()

    # Warm up. Running the model for the first time takes an extra ~10 seconds.
    print('Warming up the tensor features model...')
    value_search.operation_multipliers_from_tensor_model(
        all_benchmarks.find_benchmark_with_name('simple_cast'),
        tensor_model, tensor_config, settings)
    print('Finished warming up.')
  else:
    tensor_config = None
    tensor_model = None

  print('=' * 80)
  modules = [google_benchmarks, stackoverflow_benchmarks]
  for benchmark in all_benchmarks.get_chosen_benchmarks(
      FLAGS.benchmark_name, modules=modules):
    gc.collect()

    print('Performing value search for benchmark {}.\n'
          .format(benchmark.name))
    benchmark_count += 1

    result = value_search.run_value_search(
        benchmark=benchmark,
        settings=settings,
        description_handler=description_handler,
        tensor_model=tensor_model,
        tensor_config=tensor_config)

    if settings.printing.statistics:
      print('\nOperation statistics:\n{}'.format(
          result.statistics.statistics_as_string(
              num_unique_values=len(result.value_set),
              elapsed_time=result.total_time,
              sort_by_time=settings.printing.statistics_sort_by_time)))

    solutions = result.solutions
    if solutions:
      first_solution = solutions[0]
      print('\nBest solution of weight {} found in {:.2f} sec:\n{}'.format(
          first_solution.weight, first_solution.time,
          first_solution.expression))
      benchmark_success += 1
      solution_times.append(first_solution.time)
    else:
      unsolved_benchmarks.append(benchmark)
    print('=' * 80)
    sys.stdout.flush()

    results_json['results'].append({
        'name': benchmark.name,
        'solved': bool(solutions),
        'solution': solutions[0].expression if solutions else None,
        'solution_weight': solutions[0].weight if solutions else None,
        'time': solutions[0].time if solutions else None,
    })

  solve_time_total = sum(solution_times)
  solve_time_mean = np.mean(solution_times)
  solve_time_geometric_mean = mstats.gmean(solution_times)

  results_json['num_benchmarks'] = benchmark_count
  results_json['num_solved'] = benchmark_success
  results_json['solve_time_total'] = solve_time_total
  results_json['solve_time_mean'] = solve_time_mean
  results_json['solve_time_geometric_mean'] = solve_time_geometric_mean

  print('Solved {} out of {} benchmarks in {:.2f} sec.'.format(
      benchmark_success, benchmark_count, solve_time_total))
  print('\n'
        'Arithmetic mean of solve times: {:.2f} sec\n'
        'Geometric mean of solve times: {:.2f} sec\n'.format(
            solve_time_mean, solve_time_geometric_mean))

  print('Unsolved benchmarks:')
  for unsolved in unsolved_benchmarks:
    print('Name: {}, target program: {}'.format(
        unsolved.name, unsolved.target_program))
  print()

  if FLAGS.json_output and FLAGS.benchmark_name == 'ALL':
    with open(FLAGS.json_output, 'w') as json_file:
      json.dump(results_json, json_file,
                indent=4, sort_keys=True, separators=(',', ': '))
      json_file.write('\n')
    print('Wrote JSON results to {}.'.format(FLAGS.json_output))
  else:
    print('Did not write JSON results file.')