Esempio n. 1
0
def record_local_script_results(name, output_fd, failures, valid):
    """Records to a local json file and to RDB the results of the script test.

  For legacy reasons, local script tests (ie: script tests that run
  locally and that don't conform to the isolated-test API) are expected to
  record their results using a specific format. This method encapsulates
  that format and also uploads those results to Result DB.

  Args:
    name: Name of the script test.
    output_fd: A .write()-supporting file descriptor to write results to.
    failures: List of strings representing test failures.
    valid: Whether the results are valid.
  """
    local_script_results = {'valid': valid, 'failures': failures}
    json.dump(local_script_results, output_fd)

    if not result_sink:
        return
    result_sink_client = result_sink.TryInitClient()
    if not result_sink_client:
        return
    status = result_types.PASS
    if not valid:
        status = result_types.UNKNOWN
    elif failures:
        status = result_types.FAIL
    test_log = '\n'.join(failures)
    result_sink_client.Post(name, status, None, test_log, None)
Esempio n. 2
0
def main():
    """Parses arguments and runs high level flows."""
    argparser = argparse.ArgumentParser(
        description='Writes LaCrOS size metrics.')

    argparser.add_argument('--chromium-output-directory',
                           dest='out_dir',
                           required=True,
                           type=os.path.realpath,
                           help='Location of the build artifacts.')

    output_group = argparser.add_mutually_exclusive_group()

    output_group.add_argument('--output-dir',
                              default='.',
                              help='Directory to save chartjson to.')

    # Accepted to conform to the isolated script interface, but ignored.
    argparser.add_argument('--isolated-script-test-filter',
                           help=argparse.SUPPRESS)
    argparser.add_argument('--isolated-script-test-perf-output',
                           type=os.path.realpath,
                           help=argparse.SUPPRESS)

    output_group.add_argument(
        '--isolated-script-test-output',
        type=os.path.realpath,
        help='File to which results will be written in the simplified JSON '
        'output format.')

    args = argparser.parse_args()

    isolated_script_output = {'valid': False, 'failures': []}
    if args.isolated_script_test_output:
        test_name = 'lacros_resource_sizes'
        args.output_dir = os.path.join(
            os.path.dirname(args.isolated_script_test_output), test_name)
        if not os.path.exists(args.output_dir):
            os.makedirs(args.output_dir)

    try:
        _run_resource_sizes(args)
        isolated_script_output = {'valid': True, 'failures': []}
    finally:
        if args.isolated_script_test_output:
            results_path = os.path.join(args.output_dir, 'test_results.json')
            with open(results_path, 'w') as output_file:
                json.dump(isolated_script_output, output_file)
            with open(args.isolated_script_test_output, 'w') as output_file:
                json.dump(isolated_script_output, output_file)
    result_sink_client = result_sink.TryInitClient()
    if result_sink_client:
        status = result_types.PASS
        if not isolated_script_output['valid']:
            status = result_types.UNKNOWN
        elif isolated_script_output['failures']:
            status = result_types.FAIL
        result_sink_client.Post(test_name, status, None, None, None)
Esempio n. 3
0
    def __init__(self, args, unknown_args):
        self._additional_args = unknown_args
        self._path_to_outdir = args.path_to_outdir
        self._test_launcher_summary_output = args.test_launcher_summary_output
        self._logs_dir = args.logs_dir
        self._use_vm = args.use_vm
        self._rdb_client = result_sink.TryInitClient()

        self._retries = 0
        self._timeout = None
        self._test_launcher_shard_index = args.test_launcher_shard_index
        self._test_launcher_total_shards = args.test_launcher_total_shards

        # The location on disk of a shell script that can be optionally used to
        # invoke the test on the device. If it's not set, we assume self._test_cmd
        # contains the test invocation.
        self._on_device_script = None

        self._test_cmd = [
            CROS_RUN_TEST_PATH,
            '--board',
            args.board,
            '--cache-dir',
            args.cros_cache,
        ]
        if args.use_vm:
            self._test_cmd += [
                '--start',
                # Don't persist any filesystem changes after the VM shutsdown.
                '--copy-on-write',
            ]
        else:
            self._test_cmd += [
                '--device', args.device if args.device else LAB_DUT_HOSTNAME
            ]
        if args.logs_dir:
            for log in SYSTEM_LOG_LOCATIONS:
                self._test_cmd += ['--results-src', log]
            self._test_cmd += [
                '--results-dest-dir',
                os.path.join(args.logs_dir, 'system_logs')
            ]
        if args.flash:
            self._test_cmd += ['--flash']
            if args.public_image:
                self._test_cmd += ['--public-image']

        self._test_env = setup_env()
Esempio n. 4
0
 def testBasicClient(self):
   luci_context_json = {
       'result_sink': {
           'address': 'some-ip-address',
           'auth_token': 'some-auth-token',
       },
   }
   if six.PY2:
     open_builtin_path = '__builtin__.open'
   else:
     open_builtin_path = 'builtins.open'
   with mock.patch(open_builtin_path,
                   mock.mock_open(read_data=json.dumps(luci_context_json))):
     client = result_sink.TryInitClient()
   self.assertEqual(
       client.test_results_url,
       'http://some-ip-address/prpc/luci.resultsink.v1.Sink/ReportTestResults')
   self.assertEqual(client.headers['Authorization'],
                    'ResultSink some-auth-token')
Esempio n. 5
0
def main():
    signal.signal(signal.SIGUSR1, DumpThreadStacks)

    parser = argparse.ArgumentParser()
    command_parsers = parser.add_subparsers(title='test types', dest='command')

    subp = command_parsers.add_parser('gtest',
                                      help='googletest-based C++ tests')
    AddCommonOptions(subp)
    AddDeviceOptions(subp)
    AddEmulatorOptions(subp)
    AddGTestOptions(subp)
    AddTracingOptions(subp)
    AddCommandLineOptions(subp)

    subp = command_parsers.add_parser(
        'instrumentation', help='InstrumentationTestCase-based Java tests')
    AddCommonOptions(subp)
    AddDeviceOptions(subp)
    AddEmulatorOptions(subp)
    AddInstrumentationTestOptions(subp)
    AddSkiaGoldTestOptions(subp)
    AddTracingOptions(subp)
    AddCommandLineOptions(subp)

    subp = command_parsers.add_parser('junit', help='JUnit4-based Java tests')
    AddCommonOptions(subp)
    AddJUnitTestOptions(subp)

    subp = command_parsers.add_parser('linker', help='linker tests')
    AddCommonOptions(subp)
    AddDeviceOptions(subp)
    AddEmulatorOptions(subp)
    AddLinkerTestOptions(subp)

    subp = command_parsers.add_parser(
        'monkey', help="tests based on Android's monkey command")
    AddCommonOptions(subp)
    AddDeviceOptions(subp)
    AddEmulatorOptions(subp)
    AddMonkeyTestOptions(subp)

    subp = command_parsers.add_parser(
        'python', help='python tests based on unittest.TestCase')
    AddCommonOptions(subp)
    AddPythonTestOptions(subp)

    args, unknown_args = parser.parse_known_args()
    if unknown_args:
        if hasattr(args, 'allow_unknown') and args.allow_unknown:
            args.command_line_flags = unknown_args
        else:
            parser.error('unrecognized arguments: %s' % ' '.join(unknown_args))

    # --replace-system-package/--remove-system-package has the potential to cause
    # issues if --enable-concurrent-adb is set, so disallow that combination.
    concurrent_adb_enabled = (hasattr(args, 'enable_concurrent_adb')
                              and args.enable_concurrent_adb)
    replacing_system_packages = (hasattr(args, 'replace_system_package')
                                 and args.replace_system_package)
    removing_system_packages = (hasattr(args, 'system_packages_to_remove')
                                and args.system_packages_to_remove)
    if (concurrent_adb_enabled
            and (replacing_system_packages or removing_system_packages)):
        parser.error('--enable-concurrent-adb cannot be used with either '
                     '--replace-system-package or --remove-system-package')

    # --use-webview-provider has the potential to cause issues if
    # --enable-concurrent-adb is set, so disallow that combination
    if (hasattr(args, 'use_webview_provider')
            and hasattr(args, 'enable_concurrent_adb')
            and args.use_webview_provider and args.enable_concurrent_adb):
        parser.error(
            '--use-webview-provider and --enable-concurrent-adb cannot '
            'be used together')

    if (getattr(args, 'coverage_on_the_fly', False)
            and not getattr(args, 'coverage_dir', '')):
        parser.error('--coverage-on-the-fly requires --coverage-dir')

    if (hasattr(args, 'debug_socket')
            or (hasattr(args, 'wait_for_java_debugger')
                and args.wait_for_java_debugger)):
        args.num_retries = 0

    # Result-sink may not exist in the environment if rdb stream is not enabled.
    result_sink_client = result_sink.TryInitClient()

    try:
        return RunTestsCommand(args, result_sink_client)
    except base_error.BaseError as e:
        logging.exception('Error occurred.')
        if e.is_infra_error:
            return constants.INFRA_EXIT_CODE
        return constants.ERROR_EXIT_CODE
    except:  # pylint: disable=W0702
        logging.exception('Unrecognized error occurred.')
        return constants.ERROR_EXIT_CODE
Esempio n. 6
0
 def testEmptyClient(self):
   # No LUCI_CONTEXT env var should prevent a client from being created.
   client = result_sink.TryInitClient()
   self.assertIsNone(client)
Esempio n. 7
0
  def __init__(self, args, unknown_args):
    self._additional_args = unknown_args
    self._path_to_outdir = args.path_to_outdir
    self._test_launcher_summary_output = args.test_launcher_summary_output
    self._logs_dir = args.logs_dir
    self._use_vm = args.use_vm
    self._rdb_client = result_sink.TryInitClient()

    self._retries = 0
    self._timeout = None
    self._test_launcher_shard_index = args.test_launcher_shard_index
    self._test_launcher_total_shards = args.test_launcher_total_shards

    # The location on disk of a shell script that can be optionally used to
    # invoke the test on the device. If it's not set, we assume self._test_cmd
    # contains the test invocation.
    self._on_device_script = None

    self._test_cmd = [
        CROS_RUN_TEST_PATH,
        '--board',
        args.board,
        '--cache-dir',
        args.cros_cache,
    ]
    if args.use_vm:
      self._test_cmd += [
          '--start',
          # Don't persist any filesystem changes after the VM shutsdown.
          '--copy-on-write',
      ]
    else:
      self._test_cmd += [
          '--device', args.device if args.device else LAB_DUT_HOSTNAME
      ]
    if args.logs_dir:
      for log in SYSTEM_LOG_LOCATIONS:
        self._test_cmd += ['--results-src', log]
      self._test_cmd += [
          '--results-dest-dir',
          os.path.join(args.logs_dir, 'system_logs')
      ]
    if args.flash:
      self._test_cmd += ['--flash']
      if args.public_image:
        self._test_cmd += ['--public-image']

    # This environment variable is set for tests that have been instrumented
    # for code coverage. Its incoming value is expected to be a location
    # inside a subdirectory of result_dir above. This is converted to an
    # absolute path that the vm is able to write to, and passed in the
    # --results-src flag to cros_run_vm_test for copying out of the vm before
    # its termination.
    self._llvm_profile_var = None
    if os.environ.get('LLVM_PROFILE_FILE'):
      _, llvm_profile_file = os.path.split(os.environ['LLVM_PROFILE_FILE'])
      self._llvm_profile_var = '/tmp/profraw/%s' % llvm_profile_file

      # This should make the vm test runner exfil the profiling data.
      self._test_cmd += ['--results-src', '/tmp/profraw']

    self._test_env = setup_env()
Esempio n. 8
0
def main():
    if sys.platform in ('win32', 'cygwin'):
        default_platform = 'win'
    elif sys.platform.startswith('darwin'):
        default_platform = 'mac'
    elif sys.platform == 'linux2':
        default_platform = 'linux'
    else:
        default_platform = None

    main_map = {
        'android': main_android,
        'android-cronet': main_android_cronet,
        'linux': main_linux,
        'mac': main_mac,
        'win': main_win,
    }
    platforms = sorted(main_map.keys())

    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--output-directory',
        type=os.path.realpath,
        help='Chromium output directory, e.g. /path/to/src/out/Debug')
    parser.add_argument('--platform',
                        default=default_platform,
                        help='specify platform (%s) [default: %%(default)s]' %
                        ', '.join(platforms))
    parser.add_argument('--size-path',
                        default=None,
                        help='Path to size binary')

    # Accepted to conform to the isolated script interface, but ignored.
    parser.add_argument('--isolated-script-test-filter',
                        help=argparse.SUPPRESS)
    parser.add_argument('--isolated-script-test-perf-output',
                        help=argparse.SUPPRESS)

    parser.add_argument(
        '--isolated-script-test-output',
        type=os.path.realpath,
        help='File to which simplified JSON results will be written.')

    args = parser.parse_args()

    real_main = main_map.get(args.platform)
    if not real_main:
        if args.platform is None:
            sys.stderr.write('Unsupported sys.platform %s.\n' %
                             repr(sys.platform))
        else:
            sys.stderr.write('Unknown platform %s.\n' % repr(args.platform))
        msg = 'Use the --platform= option to specify a supported platform:\n'
        sys.stderr.write(msg + '    ' + ' '.join(platforms) + '\n')
        return 2

    isolated_script_output = {
        'valid': False,
        'failures': [],
        'version': 'simplified'
    }
    test_name = 'sizes'

    results_directory = None
    if args.isolated_script_test_output:
        results_directory = os.path.join(
            os.path.dirname(args.isolated_script_test_output), test_name)
        if not os.path.exists(results_directory):
            os.makedirs(results_directory)

    results_collector = ResultsCollector()
    result_sink_client = result_sink.TryInitClient()
    try:
        rc = real_main(args.output_directory, results_collector,
                       args.size_path)
        isolated_script_output = {
            'valid': True,
            'failures': [test_name] if rc else [],
            'version': 'simplified',
        }
    finally:
        if results_directory:
            results_path = os.path.join(results_directory, 'test_results.json')
            with open(results_path, 'w') as output_file:
                json.dump(isolated_script_output, output_file)

            histogram_path = os.path.join(results_directory,
                                          'perf_results.json')
            # We need to add a bit more data to the results and rearrange some things,
            # otherwise the conversion fails due to the provided data being malformed.
            updated_results = format_for_histograms_conversion(
                results_collector.results)
            with open(histogram_path, 'w') as f:
                json.dump(updated_results, f)
            histogram_result = convert_chart_json.ConvertChartJson(
                histogram_path)
            if histogram_result.returncode != 0:
                sys.stderr.write('chartjson conversion failed: %s\n' %
                                 histogram_result.stdout)
                rc = rc or histogram_result.returncode
            else:
                with open(histogram_path, 'w') as f:
                    f.write(histogram_result.stdout)
        if result_sink_client:
            status = result_types.PASS
            if not isolated_script_output['valid']:
                status = result_types.UNKNOWN
            elif isolated_script_output['failures']:
                status = result_types.FAIL
            result_sink_client.Post(test_name, status, None, None, None)

    return rc
Esempio n. 9
0
def main():
  build_utils.InitLogging('RESOURCE_SIZES_DEBUG')
  argparser = argparse.ArgumentParser(description='Print APK size metrics.')
  argparser.add_argument(
      '--min-pak-resource-size',
      type=int,
      default=20 * 1024,
      help='Minimum byte size of displayed pak resources.')
  argparser.add_argument(
      '--chromium-output-directory',
      dest='out_dir',
      type=os.path.realpath,
      help='Location of the build artifacts.')
  argparser.add_argument(
      '--chartjson',
      action='store_true',
      help='DEPRECATED. Use --output-format=chartjson '
      'instead.')
  argparser.add_argument(
      '--output-format',
      choices=['chartjson', 'histograms'],
      help='Output the results to a file in the given '
      'format instead of printing the results.')
  argparser.add_argument('--loadable_module', help='Obsolete (ignored).')

  # Accepted to conform to the isolated script interface, but ignored.
  argparser.add_argument(
      '--isolated-script-test-filter', help=argparse.SUPPRESS)
  argparser.add_argument(
      '--isolated-script-test-perf-output',
      type=os.path.realpath,
      help=argparse.SUPPRESS)

  output_group = argparser.add_mutually_exclusive_group()

  output_group.add_argument(
      '--output-dir', default='.', help='Directory to save chartjson to.')
  output_group.add_argument(
      '--output-file',
      help='Path to output .json (replaces --output-dir). Works only for '
      '--output-format=chartjson')
  output_group.add_argument(
      '--isolated-script-test-output',
      type=os.path.realpath,
      help='File to which results will be written in the '
      'simplified JSON output format.')

  argparser.add_argument('input', help='Path to .apk or .apks file to measure.')
  trichrome_group = argparser.add_argument_group(
      'Trichrome inputs',
      description='When specified, |input| is used only as Test suite name.')
  trichrome_group.add_argument(
      '--trichrome-chrome', help='Path to Trichrome Chrome .apks')
  trichrome_group.add_argument(
      '--trichrome-webview', help='Path to Trichrome WebView .apk(s)')
  trichrome_group.add_argument(
      '--trichrome-library', help='Path to Trichrome Library .apk')
  args = argparser.parse_args()

  args.out_dir = _ConfigOutDir(args.out_dir)
  devil_chromium.Initialize(output_directory=args.out_dir)

  # TODO(bsheedy): Remove this once uses of --chartjson have been removed.
  if args.chartjson:
    args.output_format = 'chartjson'

  result_sink_client = result_sink.TryInitClient()
  isolated_script_output = {'valid': False, 'failures': []}

  test_name = 'resource_sizes (%s)' % os.path.basename(args.input)

  if args.isolated_script_test_output:
    args.output_dir = os.path.join(
        os.path.dirname(args.isolated_script_test_output), test_name)
    if not os.path.exists(args.output_dir):
      os.makedirs(args.output_dir)

  try:
    _ResourceSizes(args)
    isolated_script_output = {
        'valid': True,
        'failures': [],
    }
  finally:
    if args.isolated_script_test_output:
      results_path = os.path.join(args.output_dir, 'test_results.json')
      with open(results_path, 'w') as output_file:
        json.dump(isolated_script_output, output_file)
      with open(args.isolated_script_test_output, 'w') as output_file:
        json.dump(isolated_script_output, output_file)
    if result_sink_client:
      status = result_types.PASS
      if not isolated_script_output['valid']:
        status = result_types.UNKNOWN
      elif isolated_script_output['failures']:
        status = result_types.FAIL
      result_sink_client.Post(test_name, status, None, None, None)