def run(self):
        """Does required setup before calling update_expectations().

        Do not override this function!
        """
        log_level = logging.DEBUG if self.options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        if not (self.options.android_product
                or self.options.update_android_expectations_only):
            assert not self.options.include_unexpected_pass, (
                'Command line argument --include-unexpected-pass is not '
                'supported in desktop mode.')
        self.patchset = self.options.patchset

        if (self.options.clean_up_test_expectations or
                self.options.clean_up_test_expectations_only):
            # Remove expectations for deleted tests and rename tests in
            # expectations for renamed tests.
            self.cleanup_test_expectations_files()

        if not self.options.clean_up_test_expectations_only:
            # Use try job results to update expectations and baselines
            self.update_expectations()

        return 0
def main(argv, stderr, host=None):
    parser = optparse.OptionParser(option_list=platform_options(use_globs=True))
    parser.add_option('--json', help='Path to JSON output file')
    parser.add_option('--verbose', action='store_true', default=False,
                      help='log extra details that may be helpful when debugging')
    options, _ = parser.parse_args(argv)

    if not host:
        if options.platform and 'test' in options.platform:
            # It's a bit lame to import mocks into real code, but this allows the user
            # to run tests against the test platform interactively, which is useful for
            # debugging test failures.
            from blinkpy.common.host_mock import MockHost
            host = MockHost()
        else:
            host = Host()

    if options.verbose:
        configure_logging(logging_level=logging.DEBUG, stream=stderr)
        # Print full stdout/stderr when a command fails.
        host.executive.error_output_limit = None
    else:
        # PRESUBMIT.py relies on our output, so don't include timestamps.
        configure_logging(logging_level=logging.INFO, stream=stderr, include_time=False)

    try:
        exit_status = run_checks(host, options)
    except KeyboardInterrupt:
        exit_status = exit_codes.INTERRUPTED_EXIT_STATUS
    except Exception as error:  # pylint: disable=broad-except
        print >> stderr, '\n%s raised: %s' % (error.__class__.__name__, error)
        traceback.print_exc(file=stderr)
        exit_status = exit_codes.EXCEPTIONAL_EXIT_STATUS

    return exit_status
    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument(
            "--metadata-output-dir",
            help="The directory to output the metadata files into.")
        parser.add_argument(
            "--checked-in-metadata-dir",
            help="Root directory of any checked-in WPT metadata files to use. "
            "If set, these files will take precedence over legacy expectations "
            "and baselines when both exist for a test.")
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.metadata_output_dir = args.metadata_output_dir
        self.checked_in_metadata_dir = args.checked_in_metadata_dir
        self._build_metadata_and_write()

        return 0
示例#4
0
    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument(
            "--old-json-output-file-path",
            help="The JSON output file to be updated, generated by WPT.")
        parser.add_argument(
            "--new-json-output-dir",
            help="The directory to put the new JSON output file.")
        parser.add_argument("--new-json-output-filename",
                            help="The name of the new JSON output file.")
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.old_json_output_file_path = args.old_json_output_file_path
        self.new_json_output_dir = args.new_json_output_dir
        self.new_json_output_filename = args.new_json_output_filename
        self._update_output_and_write()

        return 0
示例#5
0
    def main(self, argv=None):
        """Creates PRs for in-flight CLs and merges changes that land on master.

        Returns:
            A boolean: True if success, False if there were any patch failures.
        """
        options = self.parse_args(argv)

        self.dry_run = options.dry_run
        log_level = logging.DEBUG if options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)
        if options.verbose:
            # Print out the full output when executive.run_command fails.
            self.host.executive.error_output_limit = None

        credentials = read_credentials(self.host, options.credentials_json)
        if not (credentials.get('GH_USER') and credentials.get('GH_TOKEN')):
            _log.error('You must provide your GitHub credentials for this '
                       'script to work.')
            _log.error('See https://chromium.googlesource.com/chromium/src'
                       '/+/master/docs/testing/web_platform_tests.md'
                       '#GitHub-credentials for instructions on how to set '
                       'your credentials up.')
            return False

        self.wpt_github = self.wpt_github or WPTGitHub(
            self.host, credentials['GH_USER'], credentials['GH_TOKEN'])
        self.gerrit = self.gerrit or GerritAPI(
            self.host, credentials['GERRIT_USER'], credentials['GERRIT_TOKEN'])
        self.local_wpt = self.local_wpt or LocalWPT(self.host,
                                                    credentials['GH_TOKEN'])
        self.local_wpt.fetch()

        _log.info('Searching for exportable in-flight CLs.')
        # The Gerrit search API is slow and easy to fail, so we wrap it in a try
        # statement to continue exporting landed commits when it fails.
        try:
            open_gerrit_cls = self.gerrit.query_exportable_open_cls()
        except GerritError as e:
            _log.info(
                'In-flight CLs cannot be exported due to the following error:')
            _log.error(str(e))
            gerrit_error = True
        else:
            self.process_gerrit_cls(open_gerrit_cls)
            gerrit_error = False

        _log.info('Searching for exportable Chromium commits.')
        exportable_commits, git_errors = self.get_exportable_commits()
        self.process_chromium_commits(exportable_commits)
        if git_errors:
            _log.info(
                'Attention: The following errors have prevented some commits from being '
                'exported:')
            for error in git_errors:
                _log.error(error)

        return not (gerrit_error or git_errors)
示例#6
0
    def main(self, argv=None):
        """Closes all PRs that are abandoned in Gerrit."""
        options = self.parse_args(argv)
        log_level = logging.DEBUG if options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)
        credentials = read_credentials(self.host, options.credentials_json)
        gh_user = credentials.get('GH_USER')
        gh_token = credentials.get('GH_TOKEN')
        if not gh_user or not gh_token:
            _log.error('You have not set your GitHub credentials. This '
                       'script may fail with a network error when making '
                       'an API request to GitHub.')
            _log.error('See https://chromium.googlesource.com/chromium/src'
                       '/+/master/docs/testing/web_platform_tests.md'
                       '#GitHub-credentials for instructions on how to set '
                       'your credentials up.')
            return False

        gr_user = credentials['GERRIT_USER']
        gr_token = credentials['GERRIT_TOKEN']
        if not gr_user or not gr_token:
            _log.warning('You have not set your Gerrit credentials. This '
                         'script may fail with a network error when making '
                         'an API request to Gerrit.')

        self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
                                                       gh_token)
        self.gerrit = self.gerrit or GerritAPI(self.host, gr_user, gr_token)
        pull_requests = self.retrieve_all_prs()
        for pull_request in pull_requests:
            if pull_request.state != 'open':
                continue
            change_id = self.wpt_github.extract_metadata(
                'Change-Id: ', pull_request.body)

            if not change_id:
                continue

            try:
                cl = self.gerrit.query_cl(change_id)
            except GerritError as e:
                _log.error('Could not query change_id %s: %s', change_id,
                           str(e))
                continue

            cl_status = cl.status
            if cl_status == 'ABANDONED':
                comment = 'Close this PR because the Chromium CL has been abandoned.'
                self.log_affected_pr_details(pull_request, comment)
                self.close_pr_and_delete_branch(pull_request.number, comment)
            elif cl_status == 'MERGED' and (not cl.is_exportable()):
                comment = 'Close this PR because the Chromium CL does not have exportable changes.'
                self.log_affected_pr_details(pull_request, comment)
                self.close_pr_and_delete_branch(pull_request.number, comment)

        return True
示例#7
0
def main(server_constructor,
         sleep_fn=None,
         argv=None,
         description=None,
         **kwargs):
    host = Host()
    sleep_fn = sleep_fn or (lambda: host.sleep(1))

    parser = optparse.OptionParser(description=description,
                                   formatter=RawTextHelpFormatter())
    parser.add_option('--output-dir',
                      type=str,
                      default=None,
                      help='output directory, for log files etc.')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='print debug logs')
    for opt in configuration_options():
        parser.add_option(opt)
    options, _ = parser.parse_args(argv)

    configure_logging(
        logging_level=logging.DEBUG if options.verbose else logging.INFO,
        include_time=options.verbose)

    port_obj = host.port_factory.get(options=options)
    if not options.output_dir:
        options.output_dir = host.filesystem.join(
            port_obj.default_results_directory(), ARTIFACTS_SUB_DIR)

    # Create the output directory if it doesn't already exist.
    host.filesystem.maybe_make_directory(options.output_dir)

    def handler(signum, _):
        _log.debug('Received signal %d', signum)
        raise SystemExit

    signal.signal(signal.SIGINT, handler)
    signal.signal(signal.SIGTERM, handler)

    server = server_constructor(port_obj, options.output_dir, **kwargs)
    server.start()

    print('Press Ctrl-C or `kill {}` to stop the server'.format(os.getpid()))
    try:
        while True:
            sleep_fn()
            if not server.alive():
                raise ServerError('Server is no longer listening')
    except ServerError as e:
        _log.error(e)
    except (SystemExit, KeyboardInterrupt):
        _log.info('Exiting...')
    finally:
        server.stop()
示例#8
0
def main():
    # This is a hack to let us enable DEBUG logging as early as possible.
    # Note this can't be ternary as versioning.check_version()
    # hasn't run yet and this python might be older than 2.5.
    if set(["-v", "--verbose"]).intersection(set(sys.argv)):
        logging_level = logging.DEBUG
    else:
        logging_level = logging.INFO
    configure_logging(logging_level=logging_level)
    BlinkTool(os.path.abspath(__file__)).main()
示例#9
0
    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument(
            "--metadata-output-dir",
            help="The directory to output the metadata files into.")
        parser.add_argument(
            "--checked-in-metadata-dir",
            help="Root directory of any checked-in WPT metadata files to use. "
            "If set, these files will take precedence over legacy expectations "
            "and baselines when both exist for a test.")
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        parser.add_argument(
            "--process-baselines",
            action="store_true",
            default=True,
            dest="process_baselines",
            help="Whether to translate baseline (-expected.txt) files into WPT "
            "metadata files. This translation is lossy and results in any "
            "subtest being accepted by wptrunner.")
        parser.add_argument("--no-process-baselines",
                            action="store_false",
                            dest="process_baselines")
        parser.add_argument(
            "--handle-annotations",
            action="store_true",
            default=True,
            dest="handle_annotations",
            help="Whether to handle annotations in expectations files. These "
            "are trailing comments that give additional details for how "
            "to translate an expectation into WPT metadata.")
        parser.add_argument("--no-handle-annotations",
                            action="store_false",
                            dest="handle_annotations")
        parser.add_argument(
            "--use-subtest-results",
            action="store_true",
            help="Treat subtest failures as test-level failures")
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.metadata_output_dir = args.metadata_output_dir
        self.checked_in_metadata_dir = args.checked_in_metadata_dir
        self.process_baselines = args.process_baselines
        self.handle_annotations = args.handle_annotations
        self.use_subtest_results = args.use_subtest_results
        self._build_metadata_and_write()

        return 0
示例#10
0
    def run(self):
        """Does required setup before calling update_expectations().
        Do not override this function!
        """
        log_level = logging.DEBUG if self.options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.patchset = self.options.patchset
        self.update_expectations()

        return 0
    def run(self, args=None):
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('--patchset', default=None,
                            help='Patchset number to fetch new baselines from.')
        parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.patchset = args.patchset
        self.update_expectations()

        return 0
示例#12
0
    def run(self, args=None):
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument('-v',
                            '--verbose',
                            action='store_true',
                            help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.update_expectations()

        return 0
示例#13
0
    def run(self, args=None):
        """Main entry point to parse flags and execute the script."""
        parser = argparse.ArgumentParser(description=__doc__)
        parser.add_argument("--metadata-output-dir",
                            help="The directory to output the metadata files into.")
        parser.add_argument('-v', '--verbose', action='store_true', help='More verbose logging.')
        args = parser.parse_args(args)

        log_level = logging.DEBUG if args.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.metadata_output_dir = args.metadata_output_dir
        self._build_metadata_and_write()

        return 0
示例#14
0
    def __init__(self, host, argv):
        self.host = host
        self.port = host.port_factory.get()
        self.filesystem = host.filesystem
        self.executive = host.executive

        self.options = self.parse_args(argv[1:])
        self._path = argv[0]

        if self.options.verbose >= 2:
            log_level = logging.DEBUG
        elif self.options.verbose == 1:
            log_level = logging.INFO
        else:
            log_level = logging.WARNING
        configure_logging(logging_level=log_level, include_time=False)
示例#15
0
    def run(self):
        """Does required setup before calling update_expectations().

        Do not override this function!
        """
        log_level = logging.DEBUG if self.options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        self.patchset = self.options.patchset

        # Remove expectations for deleted tests and rename tests in expectations
        # for renamed tests.
        self.cleanup_test_expectations_files()

        if not self.options.cleanup_test_expectations_only:
            # Use try job results to update expectations and baselines
            self.update_expectations()

        return 0
示例#16
0
def main(server_constructor,
         input_fn=None,
         argv=None,
         description=None,
         **kwargs):
    input_fn = input_fn or raw_input

    parser = optparse.OptionParser(description=description,
                                   formatter=RawTextHelpFormatter())
    parser.add_option('--output-dir',
                      type=str,
                      default=None,
                      help='output directory, for log files etc.')
    parser.add_option('-v',
                      '--verbose',
                      action='store_true',
                      help='print more information, including port numbers')
    for opt in configuration_options():
        parser.add_option(opt)
    options, _ = parser.parse_args(argv)

    configure_logging(
        logging_level=logging.DEBUG if options.verbose else logging.INFO,
        include_time=options.verbose)

    host = Host()
    port_obj = host.port_factory.get(options=options)
    if not options.output_dir:
        options.output_dir = port_obj.host.filesystem.join(
            port_obj.default_results_directory(), ARTIFACTS_SUB_DIR)

    # Create the output directory if it doesn't already exist.
    port_obj.host.filesystem.maybe_make_directory(options.output_dir)

    server = server_constructor(port_obj, options.output_dir, **kwargs)
    server.start()
    try:
        _ = input_fn('Hit any key to stop the server and exit.')
    except (KeyboardInterrupt, EOFError):
        pass

    server.stop()
示例#17
0
    def main(self, argv=None):
        """Closes all PRs that are abandoned in Gerrit."""
        options = self.parse_args(argv)
        log_level = logging.DEBUG if options.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)
        credentials = read_credentials(self.host, options.credentials_json)
        gh_user = credentials.get('GH_USER')
        gh_token = credentials.get('GH_TOKEN')
        if not gh_user or not gh_token:
            _log.error('You have not set your GitHub credentials. This '
                       'script may fail with a network error when making '
                       'an API request to GitHub.')
            _log.error('See https://chromium.googlesource.com/chromium/src'
                       '/+/master/docs/testing/web_platform_tests.md'
                       '#GitHub-credentials for instructions on how to set '
                       'your credentials up.')
            return False

        gr_user = credentials['GERRIT_USER']
        gr_token = credentials['GERRIT_TOKEN']
        if not gr_user or not gr_token:
            _log.warning('You have not set your Gerrit credentials. This '
                         'script may fail with a network error when making '
                         'an API request to Gerrit.')

        self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user, gh_token)
        self.gerrit = self.gerrit or GerritAPI(self.host, gr_user, gr_token)
        pull_requests = self.retrieve_all_prs()
        for pull_request in pull_requests:
            if pull_request.state != 'open':
                continue
            change_id = self.wpt_github.extract_metadata('Change-Id: ', pull_request.body)
            if not change_id:
                continue
            cl_status = self.gerrit.query_cl(change_id).status
            if cl_status == 'ABANDONED':
                _log.info('https://github.com/web-platform-tests/wpt/pull/%s', pull_request.number)
                _log.info(self.wpt_github.extract_metadata('Reviewed-on: ', pull_request.body))
                self.close_abandoned_pr(pull_request)
        return True
示例#18
0
    def setUp(self):
        log_stream = TestLogStream(self)

        # Use a logger other than the root logger or one prefixed with
        # "blinkpy." so as not to conflict with run_blinkpy_tests.py logging.
        logger = logging.getLogger('unittest')

        # Configure the test logger not to pass messages along to the
        # root logger.  This prevents test messages from being
        # propagated to loggers used by run_blinkpy_tests.py logging (e.g.
        # the root logger).
        logger.propagate = False

        logging_level = self._logging_level()
        self._handlers = configure_logging(logging_level=logging_level,
                                           logger=logger,
                                           stream=log_stream,
                                           include_time=False)
        self._log = logger
        self._log_stream = log_stream
示例#19
0
def main(argv):

    parser = argparse.ArgumentParser()
    parser.description = """\
Merges sharded web test results into a single output directory.
"""
    parser.epilog = """\

If a post merge script is given, it will be run on the resulting merged output
directory. The script will be given the arguments plus
'--results_dir <output_directory>'.
"""

    parser.add_argument('-v',
                        '--verbose',
                        action='store_true',
                        help='Output information about merging progress.')

    parser.add_argument(
        '--results-json-override-value',
        nargs=2,
        metavar=('KEY', 'VALUE'),
        default=[],
        action='append',
        help='Override the value of a value in the result style JSON file '
        '(--result-jsons-override-value layout_test_dirs /tmp/output).')
    parser.add_argument(
        '--results-json-allow-unknown-if-matching',
        action='store_true',
        default=False,
        help='Allow unknown values in the result.json file as long as the '
        'value match on all shards.')

    parser.add_argument('--output-directory',
                        help='Directory to create the merged results in.')
    parser.add_argument(
        '--allow-existing-output-directory',
        action='store_true',
        default=False,
        help='Allow merging results into a directory which already exists.')
    parser.add_argument(
        '--remove-existing-layout-test-results',
        action='store_true',
        default=False,
        help='Remove existing layout test results from the output directory.')
    parser.add_argument('--input-directories',
                        nargs='+',
                        help='Directories to merge the results from.')

    # Swarming Isolated Merge Script API
    # script.py \
    #     --build-properties /s/build.json \
    #     --output-json /tmp/output.json \
    #     --task-output-dir /path/to/task/output/dir \
    #     shard0/output.json \
    #     shard1/output.json
    parser.add_argument(
        '-o',
        '--output-json',
        help='(Swarming Isolated Merge Script API) Output JSON file to create.'
    )
    parser.add_argument(
        '--build-properties',
        help=
        '(Swarming Isolated Merge Script API) Build property JSON file provided by recipes.'
    )
    parser.add_argument(
        '--task-output-dir',
        help=
        '(Swarming Isolated Merge Script API) Directory containing all swarming task results.'
    )
    parser.add_argument(
        '--results-json-override-with-build-property',
        nargs=2,
        metavar=('RESULT_JSON_KEY', 'BUILD_PROPERTY_KEY'),
        default=[],
        action='append',
        help='Override the value of a value in the result style JSON file '
        '(--result-jsons-override-value layout_test_dirs /tmp/output).')
    parser.add_argument(
        '--summary-json',
        help=
        '(Swarming Isolated Merge Script API) Summary of shard state running on swarming.'
        '(Output of the swarming.py collect --task-summary-json=XXX command.)')

    # Script to run after merging the directories together. Normally used with archive_layout_test_results.py
    # scripts/slave/chromium/archive_layout_test_results.py \
    #     --results-dir /b/rr/tmpIcChUS/w/layout-test-results \
    #     --build-dir /b/rr/tmpIcChUS/w/src/out \
    #     --build-number 3665 \
    #     --builder-name 'WebKit Linux - RandomOrder' \
    #     --gs-bucket gs://chromium-layout-test-archives \
    #     --staging-dir /b/c/chrome_staging \
    #     --slave-utils-gsutil-py-path /b/rr/tmpIcChUS/rw/scripts/slave/.recipe_deps/depot_tools/gsutil.py
    # in dir /b/rr/tmpIcChUS/w
    parser.add_argument(
        '--post-merge-script',
        nargs='*',
        help='Script to call after the results have been merged.')

    # The position arguments depend on if we are using the isolated merge
    # script API mode or not.
    parser.add_argument('positional',
                        nargs='*',
                        help='output.json from shards.')

    args = parser.parse_args(argv)
    if args.verbose:
        logging_level = logging.DEBUG
    else:
        logging_level = logging.INFO
    configure_logging(logging_level=logging_level)

    # Map the isolate arguments back to our output / input arguments.
    if args.output_json:
        logging.info('Running with isolated arguments')
        assert args.positional

        # TODO(tansell): Once removed everywhere, these lines can be removed.
        # For now we just check nobody is supply arguments we didn't expect.
        if args.results_json_override_with_build_property:
            for result_key, build_prop_key in args.results_json_override_with_build_property:
                assert (result_key, build_prop_key
                        ) in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY, (
                            "%s not in %s" %
                            (result_key,
                             RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY))

        if not args.output_directory:
            args.output_directory = os.getcwd()
            args.allow_existing_output_directory = True
            args.remove_existing_layout_test_results = True

        assert not args.input_directories
        args.input_directories = [os.path.dirname(f) for f in args.positional]
        args.positional = []

    # Allow skipping the --input-directories bit, for example,
    #   merge_web_test_results.py -o outputdir shard0 shard1 shard2
    if args.positional and not args.input_directories:
        args.input_directories = args.positional

    if not args.output_directory:
        args.output_directory = tempfile.mkdtemp(
            suffix='_merged_web_test_results')
        args.allow_existing_output_directory = True

    assert args.output_directory
    assert args.input_directories

    results_json_value_overrides = {}
    if args.build_properties:
        build_properties = json.loads(args.build_properties)

        for result_key, build_prop_key in RESULTS_JSON_VALUE_OVERRIDE_WITH_BUILD_PROPERTY:
            if build_prop_key not in build_properties:
                logging.warn('Required build property key "%s" was not found!',
                             build_prop_key)
                continue
            results_json_value_overrides[result_key] = build_properties[
                build_prop_key]
        logging.debug('results_json_value_overrides: %r',
                      results_json_value_overrides)

    merger = WebTestDirMerger(
        results_json_value_overrides=results_json_value_overrides,
        results_json_allow_unknown_if_matching=args.
        results_json_allow_unknown_if_matching)

    ensure_empty_dir(FileSystem(),
                     args.output_directory,
                     allow_existing=args.allow_existing_output_directory,
                     remove_existing=args.remove_existing_layout_test_results)

    merger.merge(args.output_directory, args.input_directories)

    merged_output_json = os.path.join(args.output_directory, 'output.json')
    if os.path.exists(merged_output_json) and args.output_json:
        # process summary_json to mark missing shards.
        mark_missing_shards(args.summary_json, args.input_directories,
                            merged_output_json)
        logging.debug('Copying output.json from %s to %s', merged_output_json,
                      args.output_json)
        shutil.copyfile(merged_output_json, args.output_json)

    if args.post_merge_script:
        logging.debug('Changing directory to %s', args.output_directory)
        os.chdir(args.output_directory)

        post_script = list(args.post_merge_script)
        post_script.append('--result-dir', args.output_directory)

        logging.info('Running post merge script %r', post_script)
        os.execlp(post_script)
示例#20
0
    def main(self, argv=None):
        # TODO(robertma): Test this method! Split it to make it easier to test
        # if necessary.

        options = self.parse_args(argv)

        self.verbose = options.verbose
        log_level = logging.DEBUG if self.verbose else logging.INFO
        configure_logging(logging_level=log_level, include_time=True)

        # Having the full output when executive.run_command fails is useful when
        # investigating a failed import, as all we have are logs.
        self.executive.error_output_limit = None

        if options.auto_update and options.auto_upload:
            _log.error(
                '--auto-upload and --auto-update cannot be used together.')
            return 1

        if not self.checkout_is_okay():
            return 1

        credentials = read_credentials(self.host, options.credentials_json)
        gh_user = credentials.get('GH_USER')
        gh_token = credentials.get('GH_TOKEN')
        if not gh_user or not gh_token:
            _log.warning('You have not set your GitHub credentials. This '
                         'script may fail with a network error when making '
                         'an API request to GitHub.')
            _log.warning('See https://chromium.googlesource.com/chromium/src'
                         '/+/master/docs/testing/web_platform_tests.md'
                         '#GitHub-credentials for instructions on how to set '
                         'your credentials up.')
        self.wpt_github = self.wpt_github or WPTGitHub(self.host, gh_user,
                                                       gh_token)
        self.git_cl = GitCL(
            self.host, auth_refresh_token_json=options.auth_refresh_token_json)

        _log.debug('Noting the current Chromium revision.')
        chromium_revision = self.chromium_git.latest_git_commit()

        # Instantiate Git after local_wpt.fetch() to make sure the path exists.
        local_wpt = LocalWPT(self.host, gh_token=gh_token)
        local_wpt.fetch()
        self.wpt_git = self.host.git(local_wpt.path)

        if options.revision is not None:
            _log.info('Checking out %s', options.revision)
            self.wpt_git.run(['checkout', options.revision])

        _log.debug('Noting the revision we are importing.')
        self.wpt_revision = self.wpt_git.latest_git_commit()
        self.last_wpt_revision = self._get_last_imported_wpt_revision()
        import_commit = 'wpt@%s' % self.wpt_revision

        _log.info('Importing %s to Chromium %s', import_commit,
                  chromium_revision)

        if options.ignore_exportable_commits:
            commit_message = self._commit_message(chromium_revision,
                                                  import_commit)
        else:
            commits = self.apply_exportable_commits_locally(local_wpt)
            if commits is None:
                _log.error('Could not apply some exportable commits cleanly.')
                _log.error('Aborting import to prevent clobbering commits.')
                return 1
            commit_message = self._commit_message(
                chromium_revision,
                import_commit,
                locally_applied_commits=commits)

        self._clear_out_dest_path()

        _log.info('Copying the tests from the temp repo to the destination.')
        test_copier = TestCopier(self.host, local_wpt.path)
        test_copier.do_import()

        # TODO(robertma): Implement `add --all` in Git (it is different from `commit --all`).
        self.chromium_git.run(['add', '--all', self.dest_path])

        # Remove expectations for tests that were deleted and rename tests
        # in expectations for renamed tests.
        self._expectations_updater.cleanup_test_expectations_files()

        self._generate_manifest()

        # TODO(crbug.com/800570 robertma): Re-enable it once we fix the bug.
        # self._delete_orphaned_baselines()


        if not self.chromium_git.has_working_directory_changes():
            _log.info('Done: no changes to import.')
            return 0

        if self._only_wpt_manifest_changed():
            _log.info('Only manifest was updated; skipping the import.')
            return 0

        self._commit_changes(commit_message)
        _log.info('Changes imported and committed.')

        if not options.auto_upload and not options.auto_update:
            return 0

        self._upload_cl()
        _log.info('Issue: %s', self.git_cl.run(['issue']).strip())

        if not self.update_expectations_for_cl():
            return 1

        if not options.auto_update:
            return 0

        if not self.run_commit_queue_for_cl():
            return 1

        if not self.send_notifications(local_wpt, options.auto_file_bugs,
                                       options.monorail_auth_json):
            return 1

        return 0
      help='log extra details that may be helpful when debugging')

  options = parser.parse_args()
  env = os.environ

  total_shards = 1
  shard_index = 0
  if 'GTEST_TOTAL_SHARDS' in env:
      total_shards = int(env['GTEST_TOTAL_SHARDS'])
  if 'GTEST_SHARD_INDEX' in env:
      shard_index = int(env['GTEST_SHARD_INDEX'])
  test_shard = TestShard(total_shards, shard_index)

  test_results = []
  log_level = logging.DEBUG if options.verbose else logging.INFO
  configure_logging(logging_level=log_level, include_time=True)

  host = Host()
  port = host.port_factory.get()
  path_finder = PathFinder(host.filesystem)

  # Starts WPT Serve to serve the WPT WebDriver test content.
  port.start_wptserve()

  # WebDriverExpectations stores skipped and failed WebDriver tests.
  expectations = parse_webdriver_expectations(host, port)
  skipped_tests = preprocess_skipped_tests(
      test_results, expectations, path_finder)

  options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
  if (not os.path.exists(options.chromedriver) and
示例#22
0
        help='log extra details that may be helpful when debugging')

    options = parser.parse_args()
    env = os.environ

    total_shards = 1
    shard_index = 0
    if 'GTEST_TOTAL_SHARDS' in env:
        total_shards = int(env['GTEST_TOTAL_SHARDS'])
    if 'GTEST_SHARD_INDEX' in env:
        shard_index = int(env['GTEST_SHARD_INDEX'])
    test_shard = TestShard(total_shards, shard_index)

    test_results = []
    log_level = logging.DEBUG if options.verbose else logging.INFO
    configure_logging(logging_level=log_level, include_time=True)

    host = Host()
    port = host.port_factory.get()
    path_finder = PathFinder(host.filesystem)

    # Starts WPT Serve to serve the WPT WebDriver test content.
    port.start_wptserve()

    # WebDriverExpectations stores skipped and failed WebDriver tests.
    expectations = parse_webdriver_expectations(host, port)
    skipped_tests = preprocess_skipped_tests(test_results, expectations,
                                             path_finder)

    options.chromedriver = util.GetAbsolutePathOfUserPath(options.chromedriver)
    if (not os.path.exists(options.chromedriver)