コード例 #1
0
def Config(benchmark_subdirs):
    return chromium_config.ChromiumConfig(top_level_dir=_top_level_dir,
                                          benchmark_dirs=[
                                              os.path.join(
                                                  _top_level_dir, subdir)
                                              for subdir in benchmark_subdirs
                                          ])
コード例 #2
0
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
     extra_args = extra_args or []
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     test_argv = [test_name,
                  '--write-full-results-to=%s' % temp_file.name
                  ] + extra_args
     unittest_config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     old_manager = binary_manager._binary_manager
     with mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
         processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
         telemetry_args = browser_test_runner.ProcessConfig(
             unittest_config, processed_args)
         try:
             binary_manager._binary_manager = None
             run_browser_tests.RunTests(telemetry_args)
             with open(temp_file.name) as f:
                 self._test_result = json.load(f)
         finally:
             binary_manager._binary_manager = old_manager
             temp_file.close()
コード例 #3
0
def main(args, output):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)

    options = parser.parse_args(args)

    if options.benchmark_name:
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=[
                os.path.join(path_util.GetPerfDir(), 'benchmarks')
            ])
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark, output)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            print >> output, ('Fetch dependencies for benchmark %s' % b.Name())
            FetchDepsForBenchmark(b, output)
コード例 #4
0
 def _RunIntegrationTest(self, test_name, failures, successes, skips,
                         additional_args):
     config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     temp_dir = tempfile.mkdtemp()
     test_results_path = os.path.join(temp_dir, 'test_results.json')
     test_state_path = os.path.join(temp_dir, 'test_state.json')
     try:
         browser_test_runner.Run(config, [
             test_name,
             '--write-full-results-to=%s' % test_results_path,
             '--test-state-json-path=%s' % test_state_path
         ] + additional_args)
         with open(test_results_path) as f:
             self._test_result = json.load(f)
         with open(test_state_path) as f:
             self._test_state = json.load(f)
         actual_successes, actual_failures, actual_skips = (
             _ExtractTestResults(self._test_result))
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
         self.assertEquals(set(actual_skips), set(skips))
     finally:
         shutil.rmtree(temp_dir)
コード例 #5
0
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
   extra_args = extra_args or []
   unittest_config = chromium_config.ChromiumConfig(
       top_level_dir=path_util.GetGpuTestDir(),
       benchmark_dirs=[
           os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
       ])
   with binary_manager.TemporarilyReplaceBinaryManager(None), \
        mock.patch.object(gpu_project_config, 'CONFIG', unittest_config):
     # TODO(crbug.com/1103792): Using NamedTemporaryFile() as a generator is
     # causing windows bots to fail. When the issue is fixed with
     # tempfile_ext.NamedTemporaryFile(), put it in the list of generators
     # starting this with block. Also remove the try finally statement
     # below.
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     try:
       test_argv = [
           test_name, '--write-full-results-to=%s' % temp_file.name
       ] + extra_args
       processed_args = run_gpu_integration_test.ProcessArgs(test_argv)
       telemetry_args = browser_test_runner.ProcessConfig(
           unittest_config, processed_args)
       run_browser_tests.RunTests(telemetry_args)
       with open(temp_file.name) as f:
         self._test_result = json.load(f)
     finally:
       temp_file.close()
コード例 #6
0
  def _RunIntegrationTest(self, test_args):
    """Runs an integration and asserts fail/success/skip expectations.

    Args:
      test_args: A _IntegrationTestArgs instance to use.
    """
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetGpuTestDir(),
        benchmark_dirs=[
            os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
        ])

    with binary_manager.TemporarilyReplaceBinaryManager(None), \
         tempfile_ext.NamedTemporaryDirectory() as temp_dir:
      test_results_path = os.path.join(temp_dir, 'test_results.json')
      test_state_path = os.path.join(temp_dir, 'test_state.json')
      # We are processing ChromiumConfig instance and getting the argument
      # list. Then we pass it directly to run_browser_tests.RunTests. If
      # we called browser_test_runner.Run, then it would spawn another
      # subprocess which is less efficient.
      args = browser_test_runner.ProcessConfig(config, [
          test_args.test_name,
          '--write-full-results-to=%s' % test_results_path,
          '--test-state-json-path=%s' % test_state_path
      ] + test_args.additional_args)
      run_browser_tests.RunTests(args)
      with open(test_results_path) as f:
        self._test_result = json.load(f)
      with open(test_state_path) as f:
        self._test_state = json.load(f)
      actual_successes, actual_failures, actual_skips = (_ExtractTestResults(
          self._test_result))
      self.assertEquals(set(actual_failures), set(test_args.failures))
      self.assertEquals(set(actual_successes), set(test_args.successes))
      self.assertEquals(set(actual_skips), set(test_args.skips))
コード例 #7
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    # Flag --output-deps: output the dependencies to a json file, CrOS autotest
    # telemetry_runner parses the output to upload the dependencies to the DUT.
    # Example output, fetch_benchmark_deps.py --output-deps=deps octane:
    # {'octane': ['tools/perf/page_sets/data/octane_002.wprgo']}
    parser.add_argument('--output-deps',
                        help=('Output dependencies to a json file'))
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    deps = {}
    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        deps[benchmark.Name()] = _FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetOfficialBenchmarks():
            deps[b.Name()] = _FetchDepsForBenchmark(b)

    if options.output_deps:
        with open(options.output_deps, 'w') as outfile:
            json.dump(deps, outfile)
コード例 #8
0
def main():
    config = chromium_config.ChromiumConfig(
        benchmark_dirs=[
            path_util.GetOfficialBenchmarksDir(),
            path_util.GetContribDir()
        ],
        top_level_dir=path_util.GetPerfDir(),
        expectations_files=[path_util.GetExpectationsPath()])
    return benchmark_runner.main(config)
コード例 #9
0
ファイル: run.py プロジェクト: byobrowser/byob
def main():
    parser = optparse.OptionParser()
    parser.add_option('--output-format',
                      default='html',
                      help='The output format of the results file.')
    parser.add_option(
        '--output-dir',
        default=None,
        help='The directory for the output file. Default value is '
        'the base directory of this script.')
    options, _ = parser.parse_args()
    constants.SetBuildType(perf_test_utils.BUILD_TYPE)
    # Install APK
    device = GetDevice()
    device.EnableRoot()
    device.Install(perf_test_utils.APP_APK)
    # Start USB reverse tethering.
    android_rndis_forwarder.AndroidRndisForwarder(
        device, perf_test_utils.GetAndroidRndisConfig(device))
    # Start HTTP server.
    http_server_doc_root = perf_test_utils.GenerateHttpTestResources()
    config_file = tempfile.NamedTemporaryFile()
    http_server = lighttpd_server.LighttpdServer(
        http_server_doc_root,
        port=perf_test_utils.HTTP_PORT,
        base_config_path=config_file.name)
    perf_test_utils.GenerateLighttpdConfig(config_file, http_server_doc_root,
                                           http_server)
    assert http_server.StartupHttpServer()
    config_file.close()
    # Start QUIC server.
    quic_server_doc_root = perf_test_utils.GenerateQuicTestResources(device)
    quic_server = perf_test_utils.QuicServer(quic_server_doc_root)
    quic_server.StartupQuicServer(device)
    # Launch Telemetry's benchmark_runner on CronetPerfTestBenchmark.
    # By specifying this file's directory as the benchmark directory, it will
    # allow benchmark_runner to in turn open this file up and find the
    # CronetPerfTestBenchmark class to run the benchmark.
    top_level_dir = os.path.dirname(os.path.realpath(__file__))
    expectations_file = os.path.join(top_level_dir, 'expectations.config')
    runner_config = chromium_config.ChromiumConfig(
        top_level_dir=top_level_dir,
        benchmark_dirs=[top_level_dir],
        expectations_file=expectations_file)
    sys.argv.insert(1, 'run')
    sys.argv.insert(2, 'run.CronetPerfTestBenchmark')
    sys.argv.insert(3, '--browser=android-system-chrome')
    sys.argv.insert(4, '--output-format=' + options.output_format)
    if options.output_dir:
        sys.argv.insert(5, '--output-dir=' + options.output_dir)
    benchmark_runner.main(runner_config)
    # Shutdown.
    quic_server.ShutdownQuicServer()
    shutil.rmtree(quic_server_doc_root)
    http_server.ShutdownHttpServer()
    shutil.rmtree(http_server_doc_root)
コード例 #10
0
def main(args):
    parser = argparse.ArgumentParser(
        description='Fetch the dependencies of perf benchmark(s).')
    parser.add_argument('benchmark_name', type=str, nargs='?')
    parser.add_argument('--force',
                        '-f',
                        help=('Force fetching all the benchmarks when '
                              'benchmark_name is not specified'),
                        action='store_true',
                        default=False)
    parser.add_argument('-v',
                        '--verbose',
                        action='count',
                        dest='verbosity',
                        help='Increase verbosity level (repeat as needed)')

    options = parser.parse_args(args)

    if options.verbosity >= 2:
        logging.getLogger().setLevel(logging.DEBUG)
    elif options.verbosity:
        logging.getLogger().setLevel(logging.INFO)
    else:
        logging.getLogger().setLevel(logging.WARNING)

    if options.benchmark_name:
        perf_dir = path_util.GetPerfDir()
        benchmark_dirs = [
            os.path.join(perf_dir, 'benchmarks'),
            os.path.join(perf_dir, 'contrib')
        ]
        config = chromium_config.ChromiumConfig(
            top_level_dir=path_util.GetPerfDir(),
            benchmark_dirs=benchmark_dirs)
        benchmark = benchmark_runner.GetBenchmarkByName(
            options.benchmark_name, config)
        if not benchmark:
            raise ValueError('No such benchmark: %s' % options.benchmark_name)
        FetchDepsForBenchmark(benchmark)
    else:
        if not options.force:
            raw_input(
                'No benchmark name is specified. Fetching all benchmark deps. '
                'Press enter to continue...')
        for b in benchmark_finders.GetAllPerfBenchmarks():
            logging.info('Fetch dependencies for benchmark %s', b.Name())
            FetchDepsForBenchmark(b)
コード例 #11
0
 def _RunGpuIntegrationTests(self, test_name, extra_args=None):
   extra_args = extra_args or []
   temp_file = tempfile.NamedTemporaryFile(delete=False)
   temp_file.close()
   try:
     sys.argv = [
         run_gpu_integration_test.__file__,
         test_name,
         '--write-full-results-to=%s' % temp_file.name,
         ] + extra_args
     gpu_project_config.CONFIG = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')])
     run_gpu_integration_test.main()
     with open(temp_file.name) as f:
       self._test_result = json.load(f)
   finally:
     temp_file.close()
コード例 #12
0
ファイル: fetch_benchmark_deps.py プロジェクト: junhuac/MQUIC
def main(output=sys.stdout):
    config = chromium_config.ChromiumConfig(
        top_level_dir=path_util.GetPerfDir(),
        benchmark_dirs=[os.path.join(path_util.GetPerfDir(), 'benchmarks')])

    name = sys.argv[1]
    benchmark = benchmark_runner.GetBenchmarkByName(name, config)
    if not benchmark:
        raise ValueError('No such benchmark: %s' % name)

    # Download files according to specified benchmark.
    story_set = benchmark().CreateStorySet(None)

    _FetchDependenciesIfNeeded(story_set)

    # Print files downloaded.
    deps = _EnumerateDependencies(story_set)
    for dep in deps:
        print >> output, dep
コード例 #13
0
 def testTestNamePrefixGenerationInRunGpuIntegrationTests(self):
     temp_file = tempfile.NamedTemporaryFile(delete=False)
     temp_file.close()
     try:
         sys.argv = [
             run_gpu_integration_test.__file__,
             'simple_integration_unittest',
             '--write-full-results-to=%s' % temp_file.name
         ]
         gpu_project_config.CONFIG = chromium_config.ChromiumConfig(
             top_level_dir=path_util.GetGpuTestDir(),
             benchmark_dirs=[
                 os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
             ])
         run_gpu_integration_test.main()
         with open(temp_file.name) as f:
             results = json.load(f)
         self.assertIn('expected_failure', results['tests'])
         self.assertEqual(results['test_name_prefix'],
                          'unittest_data.integration_tests.SimpleTest.')
     finally:
         temp_file.close()
コード例 #14
0
 def _RunIntegrationTest(self, test_name, failures, successes, skips,
                         additional_args):
     # pylint: disable=too-many-locals
     config = chromium_config.ChromiumConfig(
         top_level_dir=path_util.GetGpuTestDir(),
         benchmark_dirs=[
             os.path.join(path_util.GetGpuTestDir(), 'unittest_data')
         ])
     temp_dir = tempfile.mkdtemp()
     test_results_path = os.path.join(temp_dir, 'test_results.json')
     test_state_path = os.path.join(temp_dir, 'test_state.json')
     old_manager = binary_manager._binary_manager
     try:
         # TODO(crbug.com/1099856): Fix telemetry binary_manager API so that
         # we don't need to access its private global variable
         binary_manager._binary_manager = None
         # We are proccissing ChromiumConfig instance and getting the argument
         # list. Then we pass it directly to run_browser_tests.RunTests. If
         # we called browser_test_runner.Run, then it would spawn another
         # subprocess which is less efficient.
         args = browser_test_runner.ProcessConfig(config, [
             test_name,
             '--write-full-results-to=%s' % test_results_path,
             '--test-state-json-path=%s' % test_state_path
         ] + additional_args)
         run_browser_tests.RunTests(args)
         with open(test_results_path) as f:
             self._test_result = json.load(f)
         with open(test_state_path) as f:
             self._test_state = json.load(f)
         actual_successes, actual_failures, actual_skips = (
             _ExtractTestResults(self._test_result))
         self.assertEquals(set(actual_failures), set(failures))
         self.assertEquals(set(actual_successes), set(successes))
         self.assertEquals(set(actual_skips), set(skips))
     finally:
         binary_manager._binary_manager = old_manager
         shutil.rmtree(temp_dir)
コード例 #15
0
def main(args):
    parser = argparse.ArgumentParser(description='Extra argument parser',
                                     add_help=False)
    parser.add_argument(
        '--output-directory',
        action='store',
        default=None,
        help='Sets the CHROMIUM_OUTPUT_DIR environment variable')
    known_options, rest_args = parser.parse_known_args(args)

    constants.SetOutputDirectory(
        os.path.realpath(known_options.output_directory or os.getcwd()))

    config = chromium_config.ChromiumConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[os.path.dirname(__file__)])

    ret_val = browser_test_runner.Run(config, rest_args)
    if '--help' in rest_args or '-h' in rest_args:
        print('\n\nCommand line arguments used in '
              'run_webview_component_smoketest.py')
        parser.print_help()

    return ret_val
コード例 #16
0
ファイル: perf.py プロジェクト: kleopatra999/erlnmyr
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License

import telemetry.core
import sys
from telemetry.internal.browser import browser_options
from telemetry.internal.browser import browser_finder

# Initialize the dependency manager
from telemetry.internal.util import binary_manager
from chrome_telemetry_build import chromium_config
binary_manager.InitDependencyManager(
    chromium_config.ChromiumConfig().client_config)

from telemetry.timeline import tracing_config

from json import dumps

options = browser_options.BrowserFinderOptions()
parser = options.CreateParser()
(_, args) = parser.parse_args()

browserFactory = browser_finder.FindBrowser(options)

with browserFactory.Create(options) as browser:
    tab = browser.tabs.New()
    tab.Activate()
    for i in browser.tabs:
コード例 #17
0
def main(args):
    config = chromium_config.ChromiumConfig(
        top_level_dir=os.path.dirname(__file__),
        benchmark_dirs=[os.path.dirname(__file__)])
    return browser_test_runner.Run(config, args)
コード例 #18
0
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import os

from gpu_tests import path_util

path_util.AddDirToPathIfNeeded(path_util.GetChromiumSrcDir(), 'tools', 'perf')

from chrome_telemetry_build import chromium_config

CONFIG = chromium_config.ChromiumConfig(
    top_level_dir=path_util.GetGpuTestDir(),
    benchmark_dirs=[os.path.join(path_util.GetGpuTestDir(), 'gpu_tests')])
コード例 #19
0
# Copyright 2022 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.

import os
import sys

CHROMIUM_SRC_DIR = os.path.realpath(
    os.path.join(os.path.dirname(__file__), '..', '..'))

sys.path.append(os.path.join(CHROMIUM_SRC_DIR, 'tools', 'perf'))
from chrome_telemetry_build import chromium_config

CAST_TEST_DIR = os.path.join(CHROMIUM_SRC_DIR, 'chromecast', 'test')
CONFIG = chromium_config.ChromiumConfig(top_level_dir=CAST_TEST_DIR,
                                        benchmark_dirs=[CAST_TEST_DIR])
コード例 #20
0
 #
 # Unless required by applicable law or agreed to in writing, software
 # distributed under the License is distributed on an "AS IS" BASIS,
 # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 # See the License for the specific language governing permissions and
 # limitations under the License

import telemetry.core
import sys;
from telemetry.internal.browser import browser_options
from telemetry.internal.browser import browser_finder

# Initialize the dependency manager
from telemetry.internal.util import binary_manager
from chrome_telemetry_build import chromium_config
binary_manager.InitDependencyManager(chromium_config.ChromiumConfig().client_config)

from telemetry.timeline import tracing_category_filter
from telemetry.timeline import tracing_options

from json import dumps

options = browser_options.BrowserFinderOptions();
parser = options.CreateParser();
(_, args) = parser.parse_args();

browserFactory = browser_finder.FindBrowser(options);

with browserFactory.Create(options) as browser:
  tab = browser.tabs.New();
  tab.Activate();