def RunCrosperf(argv):
    parser = argparse.ArgumentParser()

    parser.add_argument('--noschedv2',
                        dest='noschedv2',
                        default=False,
                        action='store_true',
                        help=('Do not use new scheduler. '
                              'Use original scheduler instead.'))
    parser.add_argument(
        '-l',
        '--log_dir',
        dest='log_dir',
        default='',
        help='The log_dir, default is under <crosperf_logs>/logs')

    SetupParserOptions(parser)
    options, args = parser.parse_known_args(argv)

    # Convert the relevant options that are passed in into a settings
    # object which will override settings in the experiment file.
    option_settings = ConvertOptionsToSettings(options)
    log_dir = os.path.abspath(os.path.expanduser(options.log_dir))
    logger.GetLogger(log_dir)

    if len(args) == 2:
        experiment_filename = args[1]
    else:
        parser.error('Invalid number arguments.')

    working_directory = os.getcwd()
    if options.dry_run:
        test_flag.SetTestMode(True)

    experiment_file = ExperimentFile(open(experiment_filename, 'rb'),
                                     option_settings)
    if not experiment_file.GetGlobalSettings().GetField('name'):
        experiment_name = os.path.basename(experiment_filename)
        experiment_file.GetGlobalSettings().SetField('name', experiment_name)
    experiment = ExperimentFactory().GetExperiment(experiment_file,
                                                   working_directory, log_dir)

    json_report = experiment_file.GetGlobalSettings().GetField('json_report')

    signal.signal(signal.SIGTERM, CallExitHandler)
    atexit.register(Cleanup, experiment)

    if options.dry_run:
        runner = MockExperimentRunner(experiment, json_report)
    else:
        runner = ExperimentRunner(experiment,
                                  json_report,
                                  using_schedv2=(not options.noschedv2))

    runner.Run()
Exemplo n.º 2
0
    def testOverrideSetting(self):
        input_file = StringIO.StringIO(EXPERIMENT_FILE_2)
        experiment_file = ExperimentFile(input_file)
        global_settings = experiment_file.GetGlobalSettings()
        self.assertEqual(global_settings.GetField('remote'),
                         ['chromeos-alex3'])

        benchmark_settings = experiment_file.GetSettings('benchmark')
        self.assertEqual(len(benchmark_settings), 2)
        self.assertEqual(benchmark_settings[0].name, 'PageCycler')
        self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)
        self.assertEqual(benchmark_settings[1].name, 'AndroidBench')
        self.assertEqual(benchmark_settings[1].GetField('iterations'), 2)
Exemplo n.º 3
0
 def make_fake_experiment(self):
   test_flag.SetTestMode(True)
   experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
   experiment = ExperimentFactory().GetExperiment(experiment_file,
                                                  working_directory='',
                                                  log_dir='')
   return experiment
Exemplo n.º 4
0
def MakeMockExperiment(compiler='gcc'):
  """Mocks an experiment using the given compiler."""
  mock_experiment_file = StringIO("""
      board: x86-alex
      remote: 127.0.0.1
      perf_args: record -a -e cycles
      benchmark: PageCycler {
        iterations: 3
      }

      image1 {
        chromeos_image: %s
      }

      image2 {
        remote: 127.0.0.2
        chromeos_image: %s
      }
      """ % (FakePath('cros_image1.bin'), FakePath('cros_image2.bin')))
  efile = ExperimentFile(mock_experiment_file)
  experiment = ExperimentFactory().GetExperiment(efile,
                                                 FakePath('working_directory'),
                                                 FakePath('log_dir'))
  for label in experiment.labels:
    label.compiler = compiler
  return experiment
Exemplo n.º 5
0
    def _make_fake_experiment(self, expstr):
        """Create fake experiment from string.

        Note - we mock out BenchmarkRun in this step.
        """
        experiment_file = ExperimentFile(StringIO.StringIO(expstr))
        experiment = ExperimentFactory().GetExperiment(experiment_file,
                                                       working_directory='',
                                                       log_dir='')
        return experiment
Exemplo n.º 6
0
    def testLoadExperimentFile1(self):
        input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
        experiment_file = ExperimentFile(input_file)
        global_settings = experiment_file.GetGlobalSettings()
        self.assertEqual(global_settings.GetField('remote'),
                         ['chromeos-alex3'])
        self.assertEqual(global_settings.GetField('perf_args'),
                         'record -a -e cycles')
        benchmark_settings = experiment_file.GetSettings('benchmark')
        self.assertEqual(len(benchmark_settings), 1)
        self.assertEqual(benchmark_settings[0].name, 'PageCycler')
        self.assertEqual(benchmark_settings[0].GetField('iterations'), 3)

        label_settings = experiment_file.GetSettings('label')
        self.assertEqual(len(label_settings), 2)
        self.assertEqual(label_settings[0].name, 'image1')
        self.assertEqual(label_settings[0].GetField('chromeos_image'),
                         '/usr/local/google/cros_image1.bin')
        self.assertEqual(label_settings[1].GetField('remote'),
                         ['chromeos-lumpy1'])
        self.assertEqual(label_settings[0].GetField('remote'),
                         ['chromeos-alex3'])
Exemplo n.º 7
0
class MeasurementsPlayer(object):

    def __init__(self, data_file, callback):
        self.data_file = data_file
        self.callback = callback
        self.experiment_file = ExperimentFile(self.data_file)

    def play(self):
        playback_start = int(time.time())
        measurement_start = None

        logger.info('Start replaying file')
        nb_messages = 0
        for measurement in self.experiment_file.open_for_reading():
            if not measurement_start:
                measurement_start = measurement['created_at']

            logger.info('Sending a message %s' % measurement['sensor_id'])
            nb_messages += 1
            playback_delta = int(time.time()) - playback_start
            measurement_delta = measurement['created_at'] - measurement_start

            if playback_delta < measurement_delta:
                # Sleep in order to wait for the correct time to play
                # back this measurement.
                logger.info('sleeping %d' % (measurement_delta - playback_delta))
                time.sleep((measurement_delta - playback_delta) / 1000000.0)
            """
                else:
                # Skip this measurement because we're behind
                continue
            """

            self.callback(json.dumps(measurement))

        logger.info('Done replaying file with %s messages', nb_messages)

        self.experiment_file.close()
Exemplo n.º 8
0
class MeasurementsPlayer(object):

    def __init__(self, data_file, callback):
        self.data_file = data_file
        self.callback = callback
        self.experiment_file = ExperimentFile(self.data_file)

    def play(self):
        playback_start = int(time.time())
        measurement_start = None

        logger.info('Start replaying file')
        nb_messages = 0
        for measurement in self.experiment_file.open_for_reading():
            if not measurement_start:
                measurement_start = measurement['created_at']

            logger.info('Sending a message %s' % measurement['sensor_id'])
            nb_messages += 1
            playback_delta = int(time.time()) - playback_start
            measurement_delta = (measurement['created_at'] - measurement_start) / 1000.0

            if playback_delta < measurement_delta:
                # Sleep in order to wait for the correct time to play
                # back this measurement.
                time_to_sleep = measurement_delta - playback_delta
                logger.info('sleeping %.2f seconds' % time_to_sleep)
                time.sleep(time_to_sleep)

            # Put the exact time of the measurement pumping into the
            # created_at field so that we simulate that the measurement
            # appeared right now.
            measurement['created_at'] = int(time.time() * 1000.0)
            self.callback(json.dumps(measurement))

        logger.info('Done replaying file with %s messages', nb_messages)

        self.experiment_file.close()
Exemplo n.º 9
0
  def testLoadExperimentFile1(self):
    experiment_file = ExperimentFile(StringIO.StringIO(EXPERIMENT_FILE_1))
    exp = ExperimentFactory().GetExperiment(
        experiment_file, working_directory='', log_dir='')
    self.assertEqual(exp.remote, ['chromeos-alex3'])

    self.assertEqual(len(exp.benchmarks), 1)
    self.assertEqual(exp.benchmarks[0].name, 'PageCycler')
    self.assertEqual(exp.benchmarks[0].test_name, 'PageCycler')
    self.assertEqual(exp.benchmarks[0].iterations, 3)

    self.assertEqual(len(exp.labels), 2)
    self.assertEqual(exp.labels[0].chromeos_image,
                     '/usr/local/google/cros_image1.bin')
    self.assertEqual(exp.labels[0].board, 'x86-alex')
    def test_get_experiment(self, mock_socket):

        test_flag.SetTestMode(False)
        self.append_benchmark_call_args = []

        def FakeAppendBenchmarkSet(bench_list, set_list, args, iters, rm_ch,
                                   perf_args, suite, show_all):
            'Helper function for test_get_experiment'
            arg_list = [
                bench_list, set_list, args, iters, rm_ch, perf_args, suite,
                show_all
            ]
            self.append_benchmark_call_args.append(arg_list)

        def FakeGetDefaultRemotes(board):
            if not board:
                return []
            return [
                'fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'
            ]

        def FakeGetXbuddyPath(build, autotest_dir, board, chroot, log_level):
            autotest_path = autotest_dir
            if not autotest_path:
                autotest_path = 'fake_autotest_path'
            if not build or not board or not chroot or not log_level:
                return '', autotest_path
            return 'fake_image_path', autotest_path

        ef = ExperimentFactory()
        ef.AppendBenchmarkSet = FakeAppendBenchmarkSet
        ef.GetDefaultRemotes = FakeGetDefaultRemotes

        label_settings = settings_factory.LabelSettings('image_label')
        benchmark_settings = settings_factory.BenchmarkSettings('bench_test')
        global_settings = settings_factory.GlobalSettings('test_name')

        label_settings.GetXbuddyPath = FakeGetXbuddyPath

        mock_experiment_file = ExperimentFile(StringIO.StringIO(''))
        mock_experiment_file.all_settings = []

        test_flag.SetTestMode(True)
        # Basic test.
        global_settings.SetField('name', 'unittest_test')
        global_settings.SetField('board', 'lumpy')
        global_settings.SetField('remote', '123.45.67.89 123.45.76.80')
        benchmark_settings.SetField('test_name', 'kraken')
        benchmark_settings.SetField('suite', 'telemetry_Crosperf')
        benchmark_settings.SetField('iterations', 1)
        label_settings.SetField(
            'chromeos_image',
            'chromeos/src/build/images/lumpy/latest/chromiumos_test_image.bin')
        label_settings.SetField('chrome_src',
                                '/usr/local/google/home/chrome-top')
        label_settings.SetField('autotest_path', '/tmp/autotest')

        mock_experiment_file.global_settings = global_settings
        mock_experiment_file.all_settings.append(label_settings)
        mock_experiment_file.all_settings.append(benchmark_settings)
        mock_experiment_file.all_settings.append(global_settings)

        mock_socket.return_value = ''

        # First test. General test.
        exp = ef.GetExperiment(mock_experiment_file, '', '')
        self.assertEqual(exp.remote, ['123.45.67.89', '123.45.76.80'])
        self.assertEqual(exp.cache_conditions, [0, 2, 1])
        self.assertEqual(exp.log_level, 'average')

        self.assertEqual(len(exp.benchmarks), 1)
        self.assertEqual(exp.benchmarks[0].name, 'kraken')
        self.assertEqual(exp.benchmarks[0].test_name, 'kraken')
        self.assertEqual(exp.benchmarks[0].iterations, 1)
        self.assertEqual(exp.benchmarks[0].suite, 'telemetry_Crosperf')
        self.assertFalse(exp.benchmarks[0].show_all_results)

        self.assertEqual(len(exp.labels), 1)
        self.assertEqual(
            exp.labels[0].chromeos_image,
            'chromeos/src/build/images/lumpy/latest/'
            'chromiumos_test_image.bin')
        self.assertEqual(exp.labels[0].autotest_path, '/tmp/autotest')
        self.assertEqual(exp.labels[0].board, 'lumpy')

        # Second test: Remotes listed in labels.
        test_flag.SetTestMode(True)
        label_settings.SetField('remote', 'chromeos1.cros chromeos2.cros')
        exp = ef.GetExperiment(mock_experiment_file, '', '')
        self.assertEqual(exp.remote, [
            'chromeos1.cros', 'chromeos2.cros', '123.45.67.89', '123.45.76.80'
        ])

        # Third test: Automatic fixing of bad  logging_level param:
        global_settings.SetField('logging_level', 'really loud!')
        exp = ef.GetExperiment(mock_experiment_file, '', '')
        self.assertEqual(exp.log_level, 'verbose')

        # Fourth test: Setting cache conditions; only 1 remote with "same_machine"
        global_settings.SetField('rerun_if_failed', 'true')
        global_settings.SetField('rerun', 'true')
        global_settings.SetField('same_machine', 'true')
        global_settings.SetField('same_specs', 'true')

        self.assertRaises(Exception, ef.GetExperiment, mock_experiment_file,
                          '', '')
        label_settings.SetField('remote', '')
        global_settings.SetField('remote', '123.45.67.89')
        exp = ef.GetExperiment(mock_experiment_file, '', '')
        self.assertEqual(exp.cache_conditions, [0, 2, 3, 4, 6, 1])

        # Fifth Test: Adding a second label; calling GetXbuddyPath; omitting all
        # remotes (Call GetDefaultRemotes).
        mock_socket.return_value = 'test.corp.google.com'
        global_settings.SetField('remote', '')
        global_settings.SetField('same_machine', 'false')

        label_settings_2 = settings_factory.LabelSettings(
            'official_image_label')
        label_settings_2.SetField('chromeos_root', 'chromeos')
        label_settings_2.SetField('build', 'official-dev')
        label_settings_2.SetField('autotest_path', '')
        label_settings_2.GetXbuddyPath = FakeGetXbuddyPath

        mock_experiment_file.all_settings.append(label_settings_2)
        exp = ef.GetExperiment(mock_experiment_file, '', '')
        self.assertEqual(len(exp.labels), 2)
        self.assertEqual(exp.labels[1].chromeos_image, 'fake_image_path')
        self.assertEqual(exp.labels[1].autotest_path, 'fake_autotest_path')
        self.assertEqual(
            exp.remote,
            ['fake_chromeos_machine1.cros', 'fake_chromeos_machine2.cros'])
Exemplo n.º 11
0
 def __init__(self, data_file, callback):
     self.data_file = data_file
     self.callback = callback
     self.experiment_file = ExperimentFile(self.data_file)
Exemplo n.º 12
0
 def testCanonicalize(self):
     input_file = StringIO.StringIO(EXPERIMENT_FILE_1)
     experiment_file = ExperimentFile(input_file)
     res = experiment_file.Canonicalize()
     self.assertEqual(res, OUTPUT_FILE)