def setUp(self):
     self._output = StringIO.StringIO()
     self._page_set = _MakePageSet()
     self._benchmark_metadata = benchmark.BenchmarkMetadata(
         'benchmark_name', 'benchmark_description')
     self._formatter = chart_json_output_formatter.ChartJsonOutputFormatter(
         self._output, self._benchmark_metadata)
def GenerateProfiles(profile_creator_class, profile_creator_name, options):
  """Generate a profile"""
  expectations = test_expectations.TestExpectations()
  test = profile_creator_class()

  temp_output_directory = tempfile.mkdtemp()
  options.output_profile_path = temp_output_directory

  results = results_options.CreateResults(
      benchmark.BenchmarkMetadata(test.__class__.__name__), options)
  page_runner.Run(test, test.page_set, expectations, options, results)

  if results.failures:
    logging.warning('Some pages failed.')
    logging.warning('Failed pages:\n%s',
                    '\n'.join(results.pages_that_failed))
    return 1

  # Everything is a-ok, move results to final destination.
  generated_profiles_dir = os.path.abspath(options.output_dir)
  if not os.path.exists(generated_profiles_dir):
    os.makedirs(generated_profiles_dir)
  out_path = os.path.join(generated_profiles_dir, profile_creator_name)
  if os.path.exists(out_path):
    shutil.rmtree(out_path)

  shutil.copytree(temp_output_directory, out_path, ignore=_IsPseudoFile)
  shutil.rmtree(temp_output_directory)
  sys.stderr.write("SUCCESS: Generated profile copied to: '%s'.\n" % out_path)

  return 0
Exemple #3
0
 def setUp(self):
   self._output = StringIO.StringIO()
   self._story_set = _MakeStorySet()
   self._results = page_test_results.PageTestResults(
       benchmark_metadata=benchmark.BenchmarkMetadata('benchmark'))
   self._formatter = None
   self.MakeFormatter()
    def testPopulateHistogramSet_UsesHistogramSetData(self):
        original_diagnostic = histogram_module.GenericSet(['benchmark_name'])

        results = page_test_results.PageTestResults()
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.WillRunPage(self.pages[0])
        results.histograms.AddHistogram(
            histogram_module.Histogram('foo', 'count'))
        results.histograms.AddSharedDiagnostic(reserved_infos.BENCHMARKS.name,
                                               original_diagnostic)
        results.DidRunPage(self.pages[0])
        results.CleanUp()

        benchmark_metadata = benchmark.BenchmarkMetadata(
            'benchmark_name', 'benchmark_description')
        results.PopulateHistogramSet(benchmark_metadata)

        histogram_dicts = results.AsHistogramDicts()
        self.assertEquals(2, len(histogram_dicts))

        hs = histogram_set.HistogramSet()
        hs.ImportDicts(histogram_dicts)

        diag = hs.LookupDiagnostic(original_diagnostic.guid)
        self.assertIsInstance(diag, histogram_module.GenericSet)
 def setUp(self):
   self._output = StringIO.StringIO()
   self._benchmark_metadata = benchmark.BenchmarkMetadata(
       'benchmark_name', 'benchmark_description')
   self._formatter = (
       histogram_set_json_output_formatter.HistogramSetJsonOutputFormatter(
           self._output, self._benchmark_metadata, False))
Exemple #6
0
    def CreateResults(self):
        if self._benchmark is not None:
            benchmark_metadata = self._benchmark.GetMetadata()
        else:
            benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')

        return results_options.CreateResults(benchmark_metadata, self._options)
Exemple #7
0
  def testIntegrationCreateJsonTestResultsWithDisabledBenchmark(self):
    benchmark_metadata = benchmark.BenchmarkMetadata('test_benchmark')
    options = options_for_unittests.GetCopy()
    options.output_formats = ['json-test-results']
    options.upload_results = False
    tempfile_dir = 'unittest_results'
    options.output_dir = tempfile_dir
    options.suppress_gtest_report = False
    options.results_label = None
    parser = options.CreateParser()
    results_options.ProcessCommandLineArgs(parser, options)
    results = results_options.CreateResults(
        benchmark_metadata, options, benchmark_enabled=False)
    results.PrintSummary()
    results.CloseOutputFormatters()

    tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
    with open(tempfile_name) as f:
      json_test_results = json.load(f)
    shutil.rmtree(tempfile_dir)

    self.assertEquals(json_test_results['interrupted'], False)
    self.assertEquals(json_test_results['num_failures_by_type'], {})
    self.assertEquals(json_test_results['path_delimiter'], '/')
    self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
                           time.time(), 1)
    self.assertEquals(json_test_results['tests'], {})
    self.assertEquals(json_test_results['version'], 3)
    def Run(self, options):
        # Installing extensions requires that the profile directory exist before
        # the browser is launched.
        if not options.browser_options.profile_dir:
            options.browser_options.profile_dir = tempfile.mkdtemp()
        options.browser_options.disable_default_apps = False

        self._PrepareExtensionInstallFiles(options.browser_options.profile_dir)

        expectations = test_expectations.TestExpectations()
        results = results_options.CreateResults(
            benchmark.BenchmarkMetadata(profile_creator.__class__.__name__),
            options)
        extension_page_test = _ExtensionPageTest()
        extension_page_test._expected_extension_count = len(
            self._extensions_to_install)
        user_story_runner.Run(extension_page_test,
                              extension_page_test._page_set, expectations,
                              options, results)

        self._CleanupExtensionInstallFiles()

        # Check that files on this list exist and have content.
        expected_files = [os.path.join('Default', 'Network Action Predictor')]
        for filename in expected_files:
            filename = os.path.join(options.output_profile_path, filename)
            if not os.path.getsize(filename) > 0:
                raise Exception("Profile not complete: %s is zero length." %
                                filename)

        if results.failures:
            logging.warning('Some pages failed.')
            logging.warning('Failed pages:\n%s',
                            '\n'.join(map(str, results.pages_that_failed)))
            raise Exception('ExtensionsProfileCreator failed.')
    def testAsChartDictNoDescription(self):
        page_specific_values = []
        summary_values = []

        d = chart_json_output_formatter._ResultsAsChartDict(  # pylint: disable=W0212
            benchmark.BenchmarkMetadata('benchmark_name', ''),
            page_specific_values, summary_values)

        self.assertEquals('', d['benchmark_description'])
 def setUp(self):
     self._output = StringIO.StringIO()
     self._story_set = _MakeStorySet()
     self._results = page_test_results.PageTestResults(
         benchmark_metadata=benchmark.BenchmarkMetadata('benchmark'))
     self._results.telemetry_info.benchmark_name = 'benchmark'
     self._results.telemetry_info.benchmark_start_epoch = 15e8
     self._results.telemetry_info.benchmark_descriptions = 'foo'
     self._formatter = None
     self.MakeFormatter()
    def testIntegrationCreateJsonTestResults(self, time_module):
        time_module.time.side_effect = [1.0, 6.0123]

        benchmark_metadata = benchmark.BenchmarkMetadata('test_benchmark')
        options = options_for_unittests.GetCopy()
        options.output_formats = ['json-test-results']
        options.upload_results = False
        tempfile_dir = tempfile.mkdtemp(prefix='unittest_results')
        try:
            options.output_dir = tempfile_dir
            options.suppress_gtest_report = False
            options.results_label = None
            results_options.ProcessCommandLineArgs(options)
            results = results_options.CreateResults(benchmark_metadata,
                                                    options)

            story_set = story.StorySet(base_dir=os.path.dirname(__file__))
            test_page = page_module.Page('http://www.foo.com/',
                                         story_set,
                                         story_set.base_dir,
                                         name='Foo')
            results.WillRunPage(test_page)
            v0 = scalar.ScalarValue(
                results.current_page,
                'foo',
                'seconds',
                3,
                improvement_direction=improvement_direction.DOWN)
            results.AddValue(v0)
            results.DidRunPage(test_page)
            results.PrintSummary()
            results.CloseOutputFormatters()

            tempfile_name = os.path.join(tempfile_dir, 'test-results.json')
            with open(tempfile_name) as f:
                json_test_results = json.load(f)
        finally:
            shutil.rmtree(tempfile_dir)

        self.assertEquals(json_test_results['interrupted'], False)
        self.assertEquals(json_test_results['num_failures_by_type'],
                          {'PASS': 1})
        self.assertEquals(json_test_results['path_delimiter'], '/')
        self.assertAlmostEqual(json_test_results['seconds_since_epoch'],
                               time.time(),
                               delta=1)
        testBenchmarkFoo = json_test_results['tests']['test_benchmark']['Foo']
        self.assertEquals(testBenchmarkFoo['actual'], 'PASS')
        self.assertEquals(testBenchmarkFoo['expected'], 'PASS')
        self.assertFalse(testBenchmarkFoo['is_unexpected'])
        self.assertEquals(testBenchmarkFoo['time'], 5.0123)
        self.assertEquals(testBenchmarkFoo['times'][0], 5.0123)
        self.assertEquals(json_test_results['version'], 3)
    def Run(self, options):
        expectations = test_expectations.TestExpectations()
        results = results_options.CreateResults(
            benchmark.BenchmarkMetadata(profile_creator.__class__.__name__),
            options)
        user_story_runner.Run(self._page_test, self._page_test._page_set,
                              expectations, options, results)

        if results.failures:
            logging.warning('Some pages failed to load.')
            logging.warning('Failed pages:\n%s',
                            '\n'.join(map(str, results.pages_that_failed)))
            raise Exception('SmallProfileCreator failed.')
 def testPrintSummaryDisabledResults(self):
   output_stream = stream.TestOutputStream()
   output_formatters = []
   benchmark_metadata = benchmark.BenchmarkMetadata(
     'benchmark_name', 'benchmark_description')
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(
           output_stream, benchmark_metadata))
   output_formatters.append(json_output_formatter.JsonOutputFormatter(
       output_stream, benchmark_metadata))
   results = page_test_results.PageTestResults(
       output_formatters=output_formatters, benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(output_stream.output_data,
     "{\n  \"enabled\": false,\n  \"benchmark_name\": \"benchmark_name\"\n}\n")
  def testPopulateHistogramSet_UsesScalarValueData(self):
    results = page_test_results.PageTestResults()
    results.telemetry_info.benchmark_start_epoch = 1501773200
    results.WillRunPage(self.pages[0])
    results.AddValue(scalar.ScalarValue(
        self.pages[0], 'a', 'seconds', 3,
        improvement_direction=improvement_direction.UP))
    results.DidRunPage(self.pages[0])
    results.CleanUp()

    benchmark_metadata = benchmark.BenchmarkMetadata(
        'benchmark_name', 'benchmark_description')
    results.PopulateHistogramSet(benchmark_metadata)

    self.assertEquals(1, len(results.histograms))
    self.assertEquals('a', list(results.histograms)[0].name)
Exemple #15
0
 def testImportHistogramDicts_DelayedImport(self):
   hs = histogram_set.HistogramSet()
   hs.AddHistogram(histogram_module.Histogram('foo', 'count'))
   hs.AddSharedDiagnosticToAllHistograms(
       'bar', generic_set.GenericSet(['baz']))
   histogram_dicts = hs.AsDicts()
   benchmark_metadata = benchmark.BenchmarkMetadata(
       'benchmark_name', 'benchmark_description')
   results = self.getPageTestResults(
       benchmark_metadata=benchmark_metadata, start=1501773200)
   results.WillRunPage(self.pages[0])
   results.ImportHistogramDicts(histogram_dicts, import_immediately=False)
   results.DidRunPage(self.pages[0])
   self.assertEqual(len(results.AsHistogramDicts()), 0)
   results.PopulateHistogramSet()
   self.assertEqual(results.AsHistogramDicts(), histogram_dicts)
Exemple #16
0
 def testPrintSummaryDisabledResults(self):
   output_stream = StringIO.StringIO()
   output_formatters = []
   benchmark_metadata = benchmark.BenchmarkMetadata(
       'benchmark_name', 'benchmark_description')
   output_formatters.append(
       chart_json_output_formatter.ChartJsonOutputFormatter(
           output_stream, benchmark_metadata))
   output_formatters.append(html_output_formatter.HtmlOutputFormatter(
       output_stream, benchmark_metadata, True))
   results = self.getPageTestResults(
       output_formatters=output_formatters, benchmark_enabled=False)
   results.PrintSummary()
   self.assertEquals(
       output_stream.getvalue(),
       '{\n  \"enabled\": false,\n  ' +
       '\"benchmark_name\": \"benchmark_name\"\n}\n')
  def testAddSharedDiagnostic(self):
    results = page_test_results.PageTestResults()
    results.WillRunPage(self.pages[0])
    results.DidRunPage(self.pages[0])
    results.CleanUp()
    results.histograms.AddSharedDiagnostic(
        histogram_module.TelemetryInfo.NAME, histogram_module.TelemetryInfo())

    benchmark_metadata = benchmark.BenchmarkMetadata(
      'benchmark_name', 'benchmark_description')
    results.PopulateHistogramSet(benchmark_metadata)

    histogram_dicts = results.AsHistogramDicts()
    self.assertEquals(1, len(histogram_dicts))

    diagnostic = histogram_module.Diagnostic.FromDict(histogram_dicts[0])
    self.assertIsInstance(diagnostic, histogram_module.TelemetryInfo)
Exemple #18
0
    def testAddSharedDiagnostic(self):
        results = page_test_results.PageTestResults()
        results.telemetry_info.benchmark_start_epoch = 1501773200
        results.WillRunPage(self.pages[0])
        results.DidRunPage(self.pages[0])
        results.CleanUp()
        results.AddSharedDiagnostic(reserved_infos.BENCHMARKS.name,
                                    generic_set.GenericSet(['benchmark_name']))

        benchmark_metadata = benchmark.BenchmarkMetadata(
            'benchmark_name', 'benchmark_description')
        results.PopulateHistogramSet(benchmark_metadata)

        histogram_dicts = results.AsHistogramDicts()
        self.assertEquals(1, len(histogram_dicts))

        diag = diagnostic.Diagnostic.FromDict(histogram_dicts[0])
        self.assertIsInstance(diag, generic_set.GenericSet)
  def testPopulateHistogramSet_UsesScalarValueData(self):
    results = page_test_results.PageTestResults()
    results.WillRunPage(self.pages[0])
    results.AddValue(scalar.ScalarValue(
        self.pages[0], 'a', 'seconds', 3,
        improvement_direction=improvement_direction.UP))
    results.DidRunPage(self.pages[0])
    results.CleanUp()

    benchmark_metadata = benchmark.BenchmarkMetadata(
      'benchmark_name', 'benchmark_description')
    results.PopulateHistogramSet(benchmark_metadata)

    histogram_dicts = results.AsHistogramDicts()
    self.assertEquals(1, len(histogram_dicts))

    h = histogram_module.Histogram.FromDict(histogram_dicts[0])
    self.assertEquals('a', h.name)
Exemple #20
0
  def testPopulateHistogramSet_UsesScalarValueData(self):
    benchmark_metadata = benchmark.BenchmarkMetadata(
        'benchmark_name', 'benchmark_description')
    results = self.getPageTestResults(
        benchmark_metadata=benchmark_metadata, start=1501773200)
    results.WillRunPage(self.pages[0])
    results.AddValue(scalar.ScalarValue(
        self.pages[0], 'a', 'seconds', 3,
        improvement_direction=improvement_direction.UP))
    results.DidRunPage(self.pages[0])
    results.CleanUp()

    results.PopulateHistogramSet()

    hs = histogram_set.HistogramSet()
    hs.ImportDicts(results.AsHistogramDicts())
    self.assertEquals(1, len(hs))
    self.assertEquals('a', hs.GetFirstHistogram().name)
Exemple #21
0
  def testAddSharedDiagnosticToAllHistograms(self):
    benchmark_metadata = benchmark.BenchmarkMetadata(
        'benchmark_name', 'benchmark_description')
    results = self.getPageTestResults(
        benchmark_metadata=benchmark_metadata, start=1501773200)
    results.WillRunPage(self.pages[0])
    results.DidRunPage(self.pages[0])
    results.CleanUp()
    results.AddSharedDiagnosticToAllHistograms(
        reserved_infos.BENCHMARKS.name,
        generic_set.GenericSet(['benchmark_name']))

    results.PopulateHistogramSet()

    histogram_dicts = results.AsHistogramDicts()
    self.assertEquals(1, len(histogram_dicts))

    diag = diagnostic.Diagnostic.FromDict(histogram_dicts[0])
    self.assertIsInstance(diag, generic_set.GenericSet)
  def testPopulateHistogramSet_UsesHistogramSetData(self):
    original_diagnostic = histogram_module.TelemetryInfo()

    results = page_test_results.PageTestResults()
    results.WillRunPage(self.pages[0])
    results.histograms.AddHistogram(histogram_module.Histogram('foo', 'count'))
    results.histograms.AddSharedDiagnostic(
        histogram_module.TelemetryInfo.NAME, original_diagnostic)
    results.DidRunPage(self.pages[0])
    results.CleanUp()

    benchmark_metadata = benchmark.BenchmarkMetadata(
      'benchmark_name', 'benchmark_description')
    results.PopulateHistogramSet(benchmark_metadata)

    histogram_dicts = results.AsHistogramDicts()
    self.assertEquals(2, len(histogram_dicts))

    hs = histogram_set.HistogramSet()
    hs.ImportDicts(histogram_dicts)

    diagnostic = hs.LookupDiagnostic(original_diagnostic.guid)
    self.assertIsInstance(diagnostic, histogram_module.TelemetryInfo)
Exemple #23
0
  def testPopulateHistogramSet_UsesHistogramSetData(self):
    original_diagnostic = generic_set.GenericSet(['benchmark_name'])

    benchmark_metadata = benchmark.BenchmarkMetadata(
        'benchmark_name', 'benchmark_description')
    results = self.getPageTestResults(
        benchmark_metadata=benchmark_metadata)
    results.WillRunPage(self.pages[0])
    results.AddHistogram(histogram_module.Histogram('foo', 'count'))
    results.AddSharedDiagnosticToAllHistograms(
        reserved_infos.BENCHMARKS.name, original_diagnostic)
    results.DidRunPage(self.pages[0])
    results.CleanUp()

    results.PopulateHistogramSet()

    histogram_dicts = results.AsHistogramDicts()
    self.assertEquals(8, len(histogram_dicts))

    hs = histogram_set.HistogramSet()
    hs.ImportDicts(histogram_dicts)

    diag = hs.LookupDiagnostic(original_diagnostic.guid)
    self.assertIsInstance(diag, generic_set.GenericSet)
Exemple #24
0
 def setUp(self):
     self._output = StringIO.StringIO()
     self._page_set = _MakePageSet()
     self._formatter = json_output_formatter.JsonOutputFormatter(
         self._output, benchmark.BenchmarkMetadata('benchmark_name'))
 def Format(self):
     self._results.telemetry_info.benchmark_start_epoch = 15e8
     self._results.PopulateHistogramSet(
         benchmark.BenchmarkMetadata('benchmark'))
     self._formatter.Format(self._results)
     return self._output.getvalue()
    def testAsChartDictNoDescription(self):
        d = chart_json_output_formatter.ResultsAsChartDict(
            benchmark.BenchmarkMetadata('benchmark_name', ''),
            _MakePageTestResults())

        self.assertEquals('', d['benchmark_metadata']['description'])
 def setUp(self):
     self._output = StringIO.StringIO()
     self._story_set = _MakeStorySet()
     self._benchmark_metadata = benchmark.BenchmarkMetadata(
         'benchmark_name', 'benchmark_description')
Exemple #28
0
 def _CreateBenchmarkMetadata(self):
     if self._benchmark is not None:
         benchmark_metadata = self._benchmark.GetMetadata()
     else:
         benchmark_metadata = benchmark.BenchmarkMetadata('record_wpr')
     return benchmark_metadata