def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    test_args = ['--retry-limit', '3']
    if 'android' == args.properties.get('target_platform'):
        test_args += ['--browser', 'android-chromium', '--device', 'android']
    else:
        test_args += ['--browser', args.build_config_fs.lower()]

    with common.temporary_file() as tempfile_path:
        test_args += ['--write-full-results-to', tempfile_path]
        rc = common.run_runtest(args, [
            '--test-type', 'telemetry_perf_unittests', '--run-python-script',
            os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests')
        ] + test_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    parsed_results = common.parse_common_test_results(results,
                                                      test_separator='.')
    failures = parsed_results['unexpected_failures']

    json.dump(
        {
            'valid':
            bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                 ((rc == 0) or failures)),
            'failures':
            failures.keys(),
        }, args.output)

    return rc
示例#2
0
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  with common.temporary_file() as tempfile_path:
    rc = common.run_runtest(args, [
        '--annotate', 'gtest',
        '--test-type', 'telemetry_unittests',
        '--run-python-script',
        os.path.join(common.SRC_DIR, 'tools', 'telemetry', 'run_tests'),
        '--browser', args.build_config_fs.lower(),
        '--retry-limit', '3',
        '--write-full-results-to', tempfile_path,
    ] + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
示例#3
0
def main_run(script_args):
  parser = create_argparser()
  args, sizes_args = parser.parse_known_args(script_args.args)

  runtest_args = [
      '--test-type',
      'sizes',
      '--run-python-script',
  ]
  if args.perf_id:
    runtest_args.extend([
        '--perf-id',
        args.perf_id,
        '--results-url=%s' % args.results_url,
        '--perf-dashboard-id=sizes',
        '--annotate=graphing',
    ])
  sizes_cmd = [
      os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts',
                   'slave', 'chromium', 'sizes.py')
  ]
  sizes_cmd.extend(sizes_args)
  rc = common.run_runtest(script_args, runtest_args + sizes_cmd)

  json.dump({
      'valid': rc == 0,
      'failures': [],
  }, script_args.output)

  return rc
示例#4
0
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    perf_id = args.properties.get('perf-id')
    script_args = args.args
    test_suite = script_args[0]
    if IsWindows():
        script_args[0] += '.exe'

    with common.temporary_file() as tempfile_path:
        gtest_args = [
            '--target',
            args.build_config_fs,
            '--annotate',
            'graphing',
            '--perf-id',
            perf_id,
            '--perf-dashboard-id',
            test_suite,
            '--results-url',
            args.properties.get('results-url'),
            '--slave-name',
            args.properties.get('slavename'),
            '--builder-name',
            args.properties.get('buildername'),
            '--build-number',
            str(args.properties.get('buildnumber')),
            '--log-processor-output-file',
            tempfile_path,
            '--test-type',
            test_suite,
        ]

        if 'android' == args.properties.get('target_platform'):
            gtest_args.extend([
                '--no-xvfb',
                '--run-python-script',
                os.path.join(args.paths['checkout'], 'out',
                             args.build_config_fs, 'bin',
                             'run_%s' % test_suite),
                '--verbose',
            ])
            gtest_args.extend(script_args[1:])
        else:
            gtest_args.extend(['--xvfb'])
            gtest_args.extend(script_args)

        rc = common.run_runtest(args, gtest_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({
        'valid': bool(rc == 0),
        'failures': results['failed'],
    }, args.output)

    return rc
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  test_args = ['--retry-limit', '3']
  if 'android' == args.properties.get('target_platform'):
    test_args += ['--browser', 'android-chrome-shell', '--device', 'android']
  else:
    test_args += ['--browser', args.build_config_fs.lower()]

  with common.temporary_file() as tempfile_path:
    test_args += ['--write-full-results-to', tempfile_path]
    rc = common.run_runtest(args, [
        '--test-type', 'telemetry_perf_unittests',
        '--run-python-script',
        os.path.join(common.SRC_DIR, 'tools', 'perf', 'run_tests')
    ] + test_args + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  parsed_results = common.parse_common_test_results(results, test_separator='.')
  failures = parsed_results['unexpected_failures']

  json.dump({
      'valid': bool(rc <= common.MAX_FAILURES_EXIT_STATUS and
                   ((rc == 0) or failures)),
      'failures': failures.keys(),
  }, args.output)

  return rc
def main_run(args):
    filter_tests = []
    if args.filter_file:
        filter_tests = json.load(args.filter_file)

    perf_id = args.properties.get("perf-id")
    script_args = args.args
    if IsWindows():
        script_args[0] += ".exe"
    test_suite = script_args[0]

    with common.temporary_file() as tempfile_path:
        gtest_args = [
            "--target",
            args.build_config_fs,
            "--annotate",
            "graphing",
            "--perf-id",
            perf_id,
            "--perf-dashboard-id",
            test_suite,
            "--results-url",
            args.properties.get("results-url"),
            "--slave-name",
            args.properties.get("slavename"),
            "--builder-name",
            args.properties.get("buildername"),
            "--build-number",
            str(args.properties.get("buildnumber")),
            "--log-processor-output-file",
            tempfile_path,
            "--test-type",
            test_suite,
        ]

        if "android" == args.properties.get("target_platform"):
            gtest_args.extend(
                [
                    "--no-xvfb",
                    "--run-python-script",
                    os.path.join(args.paths["checkout"], "build", "android", "test_runner.py"),
                    "gtest",
                    "--release",
                    "--suite",
                    test_suite,
                    "--verbose",
                ]
            )
        else:
            gtest_args.extend(["--xvfb"])
            gtest_args.extend(script_args)

        rc = common.run_runtest(args, gtest_args + filter_tests)

        with open(tempfile_path) as f:
            results = json.load(f)

    json.dump({"valid": bool(rc == 0), "failures": results["failed"]}, args.output)

    return rc
示例#7
0
def main_run(args):
    with common.temporary_file() as tempfile_path:
        rc = common.run_runtest(args, [
            '--test-type', 'sizes', '--run-python-script',
            os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy',
                         'scripts', 'slave', 'chromium', 'sizes.py'), '--json',
            tempfile_path
        ])
        with open(tempfile_path) as f:
            results = json.load(f)

    with open(
            os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
                         'perf_expectations.json')) as f:
        perf_expectations = json.load(f)

    prefix = args.args[0]

    valid = (rc == 0)
    failures = []

    for name, result in results.iteritems():
        fqtn = '%s/%s/%s' % (prefix, name, result['identifier'])
        if fqtn not in perf_expectations:
            continue

        if perf_expectations[fqtn]['type'] != 'absolute':
            print 'ERROR: perf expectation %r is not yet supported' % fqtn
            valid = False
            continue

        actual = result['value']
        expected = perf_expectations[fqtn]['regress']
        better = perf_expectations[fqtn]['better']
        check_result = ((actual <= expected) if better == 'lower' else
                        (actual >= expected))

        if not check_result:
            failures.append(fqtn)
            print 'FAILED %s: actual %s, expected %s, better %s' % (
                fqtn, actual, expected, better)

    json.dump({
        'valid': valid,
        'failures': failures,
    }, args.output)

    # sizes.py itself doesn't fail on regressions.
    if failures and rc == 0:
        rc = 1

    return rc
示例#8
0
def main_run(args):
  with common.temporary_file() as tempfile_path:
    rc = common.run_runtest(args, [
        '--test-type', 'sizes',
        '--run-python-script',
        os.path.join(
            common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts', 'slave',
            'chromium', 'sizes.py'),
        '--json', tempfile_path])
    with open(tempfile_path) as f:
      results = json.load(f)

  with open(os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
                         'perf_expectations.json')) as f:
    perf_expectations = json.load(f)

  prefix = args.args[0]

  valid = (rc == 0)
  failures = []

  for name, result in results.iteritems():
    fqtn = '%s/%s/%s' % (prefix, name, result['identifier'])
    if fqtn not in perf_expectations:
      continue

    if perf_expectations[fqtn]['type'] != 'absolute':
      print 'ERROR: perf expectation %r is not yet supported' % fqtn
      valid = False
      continue

    actual = result['value']
    expected = perf_expectations[fqtn]['regress']
    better = perf_expectations[fqtn]['better']
    check_result = ((actual <= expected) if better == 'lower'
                    else (actual >= expected))

    if not check_result:
      failures.append(fqtn)
      print 'FAILED %s: actual %s, expected %s, better %s' % (
          fqtn, actual, expected, better)

  json.dump({
      'valid': valid,
      'failures': failures,
  }, args.output)

  # sizes.py itself doesn't fail on regressions.
  if failures and rc == 0:
    rc = 1

  return rc
示例#9
0
def main_run(args):
  rc = common.run_runtest(args, [
      os.path.join(common.SRC_DIR, 'tools', 'valgrind', 'chrome_tests.sh'),
      '--tool', 'memcheck',
      '--build-dir', os.path.join(common.SRC_DIR, 'out', args.build_config_fs),
    ] + args.args)

  json.dump({
      'valid': True,
      'failures': ['failed'] if rc else []
  }, args.output)

  return rc
示例#10
0
def main_run(args):
    rc = common.run_runtest(args, [
        '--test-type', 'sizes', '--run-python-script',
        os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts',
                     'slave', 'chromium', 'sizes.py')
    ])

    # TODO(phajdan.jr): Implement more granular failures.
    json.dump({
        'valid': True,
        'failures': ['sizes_failed'] if rc else [],
    }, args.output)

    return rc
def main_run(args):
  filter_tests = []
  if args.filter_file:
    filter_tests = json.load(args.filter_file)

  perf_id = args.properties.get('perf-id')
  script_args = args.args
  test_suite = script_args[0]
  if IsWindows():
    script_args[0] += '.exe'

  with common.temporary_file() as tempfile_path:
    gtest_args = [
          '--target', args.build_config_fs,
          '--annotate', 'graphing',
          '--perf-id', perf_id,
          '--perf-dashboard-id', test_suite,
          '--results-url', args.properties.get('results-url'),
          '--slave-name', args.properties.get('slavename'),
          '--builder-name', args.properties.get('buildername'),
          '--build-number', str(args.properties.get('buildnumber')),
          '--log-processor-output-file', tempfile_path,
          '--test-type', test_suite,
    ]

    if 'android' == args.properties.get('target_platform'):
      gtest_args.extend([
          '--no-xvfb',
          '--run-python-script', os.path.join(
              args.paths['checkout'], 'out', args.build_config_fs, 'bin',
              'run_%s' % test_suite),
          '--verbose',
      ])
      gtest_args.extend(script_args[1:])
    else:
      gtest_args.extend(['--xvfb'])
      gtest_args.extend(script_args)

    rc = common.run_runtest(args, gtest_args + filter_tests)

    with open(tempfile_path) as f:
      results = json.load(f)

  json.dump({
      'valid': bool(rc == 0),
      'failures': results['failed'],
  }, args.output)

  return rc
示例#12
0
def main_run(args):
    rc = common.run_runtest(args, [
        os.path.join(common.SRC_DIR, 'tools', 'valgrind', 'chrome_tests.sh'),
        '--tool',
        'memcheck',
        '--build-dir',
        os.path.join(common.SRC_DIR, 'out', args.build_config_fs),
    ] + args.args)

    json.dump({
        'valid': True,
        'failures': ['failed'] if rc else []
    }, args.output)

    return rc
示例#13
0
def main_run(args):
  rc = common.run_runtest(args, [
      '--test-type', 'sizes',
      '--run-python-script',
      os.path.join(
          common.SRC_DIR, 'infra', 'scripts', 'legacy', 'scripts', 'slave',
          'chromium', 'sizes.py')])

  # TODO(phajdan.jr): Implement more granular failures.
  json.dump({
      'valid': True,
      'failures': ['sizes_failed'] if rc else [],
  }, args.output)

  return rc
示例#14
0
def main_run(script_args):
    parser = create_argparser()
    parser.add_argument('prefix')
    args = parser.parse_args(script_args.args)

    with common.temporary_file() as tempfile_path:
        runtest_args = [
            '--test-type',
            'sizes',
            '--run-python-script',
        ]
        if args.perf_id:
            runtest_args.extend([
                '--perf-id',
                args.perf_id,
                '--results-url=%s' % args.results_url,
                '--perf-dashboard-id=sizes',
                '--annotate=graphing',
            ])
        sizes_cmd = [
            os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy',
                         'scripts', 'slave', 'chromium', 'sizes.py'),
            '--failures', tempfile_path
        ]
        if args.platform:
            sizes_cmd.extend(['--platform', args.platform])
        rc = common.run_runtest(script_args, runtest_args + sizes_cmd)
        with open(tempfile_path) as f:
            failures = json.load(f)

    json.dump({
        'valid': (rc == 0 or rc == 125),
        'failures': failures,
    }, script_args.output)

    return rc
示例#15
0
def main_run(script_args):
    parser = create_argparser()
    parser.add_argument('prefix')
    args = parser.parse_args(script_args.args)

    with common.temporary_file() as tempfile_path:
        runtest_args = [
            '--test-type',
            'sizes',
            '--run-python-script',
        ]
        if args.perf_id:
            runtest_args.extend([
                '--perf-id',
                args.perf_id,
                '--results-url=%s' % args.results_url,
                '--perf-dashboard-id=sizes',
                '--annotate=graphing',
            ])
        sizes_cmd = [
            os.path.join(common.SRC_DIR, 'infra', 'scripts', 'legacy',
                         'scripts', 'slave', 'chromium', 'sizes.py'), '--json',
            tempfile_path
        ]
        if args.platform:
            sizes_cmd.extend(['--platform', args.platform])
        rc = common.run_runtest(script_args, runtest_args + sizes_cmd)
        with open(tempfile_path) as f:
            results = json.load(f)

    with open(
            os.path.join(common.SRC_DIR, 'tools', 'perf_expectations',
                         'perf_expectations.json')) as f:
        perf_expectations = json.load(f)

    valid = (rc == 0)
    failures = []

    for name, result in results.iteritems():
        fqtn = '%s/%s/%s' % (args.prefix, name, result['identifier'])
        if fqtn not in perf_expectations:
            continue

        if perf_expectations[fqtn]['type'] != 'absolute':
            print 'ERROR: perf expectation %r is not yet supported' % fqtn
            valid = False
            continue

        actual = result['value']
        expected = perf_expectations[fqtn]['regress']
        better = perf_expectations[fqtn]['better']
        check_result = ((actual <= expected) if better == 'lower' else
                        (actual >= expected))

        if not check_result:
            failures.append(fqtn)
            print 'FAILED %s: actual %s, expected %s, better %s' % (
                fqtn, actual, expected, better)

    json.dump({
        'valid': valid,
        'failures': failures,
    }, script_args.output)

    # sizes.py itself doesn't fail on regressions.
    if failures and rc == 0:
        rc = 1

    return rc