Exemple #1
0
    def _print_counts(self, counters_by_email):
        counters = sorted(counters_by_email.items(),
                          key=lambda counter: counter[1]['count'])
        counters = sorted(counters,
                          key=lambda counter: counter[1]['latest_name'])

        for author_email, counter in counters:
            if author_email != counter['latest_email']:
                continue
            author_name = counter['latest_name']
            patch_count = counter['count']
            counter['names'] = counter['names'] - set([author_name])
            counter['emails'] = counter['emails'] - set([author_email])

            alias_list = []
            for alias in counter['names']:
                alias_list.append(alias)
            for alias in counter['emails']:
                alias_list.append(alias)
            if alias_list:
                print("CONTRIBUTOR: %s (%s) has %s %s" %
                      (author_name, author_email,
                       string_utils.pluralize(patch_count,
                                              'reviewed patch',
                                              plural='reviewed patches'),
                       "(aliases: " + ", ".join(alias_list) + ")"))
            else:
                print("CONTRIBUTOR: %s (%s) has %s" %
                      (author_name, author_email,
                       string_utils.pluralize(patch_count,
                                              'reviewed patch',
                                              plural='reviewed patches')))
        return
Exemple #2
0
    def _print_one_line_summary(self, total, expected, unexpected):
        incomplete = total - expected - unexpected
        incomplete_str = ''
        if incomplete:
            self._print_default("")
            incomplete_str = " (%d didn't run)" % incomplete

        if self._options.verbose or self._options.debug_rwt_logging or unexpected:
            self.writeln("")

        summary = ''
        if unexpected == 0:
            if expected == total:
                if expected > 1:
                    summary = "All %d tests ran as expected." % expected
                else:
                    summary = "The test ran as expected."
            else:
                summary = "%s ran as expected%s." % (pluralize(
                    expected, "test"), incomplete_str)
        else:
            summary = "%s ran as expected, %d didn't%s:" % (pluralize(
                expected, "test"), unexpected, incomplete_str)

        self._print_quiet(summary)
        self._print_quiet("")
Exemple #3
0
 def check_arguments_and_execute(self, options, args, tool=None):
     if len(args) < len(self.required_arguments):
         _log.error(
             "%s required, %s provided.  Provided: %s  Required: %s\nSee '%s help %s' for usage."
             % (pluralize(len(self.required_arguments), "argument"),
                pluralize(len(args), "argument"), "'%s'" % " ".join(args),
                " ".join(self.required_arguments), tool.name(), self.name))
         return 1
     return self.execute(options, args, tool) or 0
Exemple #4
0
 def print_found(self, num_all_test_files, num_to_run, repeat_each,
                 iterations):
     found_str = 'Found %s; running %d' % (pluralize(
         num_all_test_files, "test"), num_to_run)
     if repeat_each * iterations > 1:
         found_str += ' (%s each: --repeat-each=%d --iterations=%d)' % (
             pluralize(repeat_each * iterations,
                       "time"), repeat_each, iterations)
     found_str += ', skipping %d' % (num_all_test_files - num_to_run)
     self._print_default(found_str + '.')
Exemple #5
0
    def print_workers_and_shards(self, num_workers, num_shards):
        driver_name = self._port.driver_name()

        if num_workers == 1:
            self._print_default('Running 1 {}.'.format(driver_name))
            self._print_debug('({}).'.format(pluralize(num_shards, "shard")))
        else:
            self._print_default('Running {} in parallel.'.format(
                pluralize(num_workers, driver_name)))
            self._print_debug('({} shards).'.format(num_shards))
        self._print_default('')
Exemple #6
0
    def execute(self, options, args, tool):
        self._prepare_to_process(options, args, tool)
        patches = self._fetch_list_of_patches_to_process(options, args, tool)

        # It's nice to print out total statistics.
        bugs_to_patches = self._collect_patches_by_bug(patches)
        _log.info("Processing %s from %s." %
                  (pluralize(len(patches), 'patch', plural='patches'),
                   pluralize(len(bugs_to_patches), "bug")))

        for patch in patches:
            self._process_patch(patch, options, args, tool)
Exemple #7
0
 def _fetch_list_of_patches_to_process(self, options, args, tool):
     all_patches = []
     for bug_id in args:
         patches = tool.bugs.fetch_bug(bug_id).reviewed_patches()
         _log.info("%s found on bug %s." % (pluralize(
             len(patches), 'reviewed patch',
             plural='reviewed patches'), bug_id))
         all_patches += patches
     if not all_patches:
         _log.info(
             "No reviewed patches found, looking for unreviewed patches.")
         for bug_id in args:
             patches = tool.bugs.fetch_bug(bug_id).patches()
             _log.info("%s found on bug %s." % (pluralize(
                 len(patches), 'patch', plural='patches'), bug_id))
             all_patches += patches
     return all_patches
Exemple #8
0
 def print_result(self, run_time):
     write = self.meter.writeln
     write('Ran %s in %.3fs' %
           (pluralize(self.num_started, "test"), run_time))
     if self.num_failures or self.num_errors:
         write('FAILED (failures=%d, errors=%d)\n' %
               (self.num_failures, self.num_errors))
     else:
         write('\nOK\n')
Exemple #9
0
 def sleep(cls, seconds):
     difference = cls.difference()
     if difference is not None and seconds >= difference:
         log.error(
             'Request to sleep {} exceeded the current timeout threshold'.
             format(string_utils.pluralize(seconds, 'second')))
         current = cls.current()
         current.triggered = True
         cls.bind()
         current.handler(Timeout.SIGALRM, None)
     return ORIGINAL_SLEEP(seconds)
Exemple #10
0
 def run(self, state):
     if not self._options.obsolete_patches:
         return
     bug_id = state["bug_id"]
     patches = self._tool.bugs.fetch_bug(bug_id).patches()
     if not patches:
         return
     _log.info("Obsoleting %s on bug %s" % (pluralize(
         len(patches), 'old patch', plural='old patches'), bug_id))
     for patch in patches:
         self._tool.bugs.obsolete_attachment(patch.id())
Exemple #11
0
 def test_pluralize(self):
     self.assertEqual(string_utils.pluralize(0, 'second'), '0 seconds')
     self.assertEqual(string_utils.pluralize(1, 'second'), '1 second')
     self.assertEqual(string_utils.pluralize(2, 'second'), '2 seconds')
     self.assertEqual(string_utils.pluralize(0, 'mouse', 'mice'), '0 mice')
     self.assertEqual(string_utils.pluralize(1, 'mouse', 'mice'), '1 mouse')
     self.assertEqual(string_utils.pluralize(2, 'mouse', 'mice'), '2 mice')
Exemple #12
0
    def execute(self, options, args, tool):
        commit_ids = tool.scm().commit_ids_from_commitish_arguments(args)
        if len(
                commit_ids
        ) > 10:  # We could lower this limit, 10 is too many for one bug as-is.
            _log.error(
                "webkit-patch does not support attaching %s at once.  Are you sure you passed the right commit range?"
                % (string_utils.pluralize(
                    len(commit_ids), 'patch', plural='patches')))
            sys.exit(1)

        have_obsoleted_patches = set()
        for commit_id in commit_ids:
            commit_message = tool.scm().commit_message_for_local_commit(
                commit_id)

            # Prefer --bug-id=, then a bug url in the commit message, then a bug url in the entire commit diff (i.e. ChangeLogs).
            bug_id = options.bug_id or parse_bug_id_from_changelog(
                commit_message.message()) or parse_bug_id_from_changelog(
                    tool.scm().create_patch(git_commit=commit_id))
            if not bug_id:
                _log.info(
                    "Skipping %s: No bug id found in commit or specified with --bug-id."
                    % commit_id)
                continue

            if options.obsolete_patches and bug_id not in have_obsoleted_patches:
                state = {"bug_id": bug_id}
                steps.ObsoletePatches(tool, options).run(state)
                have_obsoleted_patches.add(bug_id)

            diff = tool.scm().create_patch(git_commit=commit_id)
            description = options.description or commit_message.description(
                lstrip=True, strip_url=True)
            comment_text = self._comment_text_for_commit(
                options, commit_message, tool, commit_id)
            attachment_id = tool.bugs.add_patch_to_bug(
                bug_id,
                diff,
                description,
                comment_text,
                mark_for_review=options.review,
                mark_for_commit_queue=options.request_commit)

            # We only need to submit --no-review patches to EWS as patches posted for review are
            # automatically submitted to EWS by EWSFeeder.
            if not options.review and options.ews:
                state = {'attachment_ids': [attachment_id]}
                steps.SubmitToEWS(tool, options).run(state)
Exemple #13
0
    def _fetch_list_of_patches_to_process(self, options, args, tool):
        all_patches = []
        for url in args:
            bug_id = urls.parse_bug_id(url)
            if bug_id:
                patches = tool.bugs.fetch_bug(bug_id).patches()
                _log.info("%s found on bug %s." % (pluralize(
                    len(patches), 'patch', plural='patches'), bug_id))
                all_patches += patches

            attachment_id = urls.parse_attachment_id(url)
            if attachment_id:
                all_patches += tool.bugs.fetch_attachment(attachment_id)

        return all_patches
Exemple #14
0
 def _builder_to_explain(self):
     builder_statuses = self._tool.buildbot.builder_statuses()
     red_statuses = [
         status for status in builder_statuses if not status["is_green"]
     ]
     print("%s failing" % (pluralize(len(red_statuses), "builder")))
     builder_choices = [status["name"] for status in red_statuses]
     # We could offer an "All" choice here.
     chosen_name = self._tool.user.prompt_with_list(
         "Which builder to diagnose:", builder_choices)
     # FIXME: prompt_with_list should really take a set of objects and a set of names and then return the object.
     for status in red_statuses:
         if status["name"] == chosen_name:
             return (self._tool.buildbot.builder_with_name(chosen_name),
                     status["built_revision"])
Exemple #15
0
 def execute(self, options, args, tool):
     builder_statuses = tool.buildbot.builder_statuses()
     longest_builder_name = max(
         map(len, map(lambda builder: builder["name"], builder_statuses)))
     failing_builders = 0
     for builder_status in builder_statuses:
         # If the builder is green, print OK, exit.
         if builder_status["is_green"]:
             continue
         self._print_blame_information_for_builder(
             builder_status, name_width=longest_builder_name)
         failing_builders += 1
     if failing_builders:
         print("%s of %s are failing" %
               (failing_builders, pluralize(len(builder_statuses),
                                            "builder")))
     else:
         print("All builders are passing!")
Exemple #16
0
 def _lookup_bug_for_flaky_test(self, flaky_test):
     bugs = self._tool.bugs.queries.fetch_bugs_matching_search(
         search_string=flaky_test)
     if not bugs:
         return None
     # Match any bugs which are from known bots or the email this bot is using.
     allowed_emails = self._bot_emails | set([self._bugzilla_email])
     bugs = list(
         filter(lambda bug: bug.reporter_email() in allowed_emails, bugs))
     if not bugs:
         return None
     if len(bugs) > 1:
         # FIXME: There are probably heuristics we could use for finding
         # the right bug instead of the first, like open vs. closed.
         _log.warn(
             "Found %s %s matching '%s' filed by a bot, using the first." %
             (string_utils.pluralize(
                 len(bugs), "bug"), [bug.id() for bug in bugs], flaky_test))
     return bugs[0]
Exemple #17
0
    def _run_test_subset(self,
                         test_inputs,  # type: List[TestInput]
                         device_type,  # type: Optional[DeviceType]
                         ):
        try:
            enabled_pixel_tests_in_retry = False
            initial_results = self._run_tests(test_inputs, self._options.repeat_each, self._options.iterations, int(self._options.child_processes), retrying=False, device_type=device_type)

            tests_to_retry = self._tests_to_retry(initial_results, include_crashes=self._port.should_retry_crashes())
            # Don't retry failures when interrupted by user or failures limit exception.
            retry_failures = self._options.retry_failures and not (initial_results.interrupted or initial_results.keyboard_interrupted)
            if retry_failures and tests_to_retry:
                enabled_pixel_tests_in_retry = self._force_pixel_tests_if_needed()
                if enabled_pixel_tests_in_retry:
                    retry_test_inputs = [self._test_input_for_file(test_input.test, device_type=device_type)
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]
                else:
                    retry_test_inputs = [test_input
                                         for test_input in test_inputs
                                         if test_input.test.test_path in tests_to_retry]

                _log.info('')
                _log.info("Retrying %s ..." % pluralize(len(tests_to_retry), "unexpected failure"))
                _log.info('')
                retry_results = self._run_tests(retry_test_inputs,
                                                repeat_each=1,
                                                iterations=1,
                                                num_workers=1,
                                                retrying=True,
                                                device_type=device_type)

                if enabled_pixel_tests_in_retry:
                    self._options.pixel_tests = False
            else:
                retry_results = None
        finally:
            self._clean_up_run()

        return (initial_results, retry_results, enabled_pixel_tests_in_retry)
Exemple #18
0
    def run(self, args):
        num_failed_uploads = 0

        device_type_list = self._port.supported_device_types()
        try:
            tests_to_run_by_device, aggregate_tests_to_skip = self._collect_tests(args, device_type_list)
        except IOError:
            # This is raised if --test-list doesn't exist
            return test_run_results.RunDetails(exit_code=-1)

        aggregate_tests_to_run = set()  # type: Set[Test]
        for v in tests_to_run_by_device.values():
            aggregate_tests_to_run.update(v)

        skipped_tests_by_path = defaultdict(set)
        for test in aggregate_tests_to_skip:
            skipped_tests_by_path[test.test_path].add(test)

        # If a test is marked skipped, but was explicitly requested, run it anyways
        if self._options.skipped != 'always':
            for arg in args:
                if arg in skipped_tests_by_path:
                    tests = skipped_tests_by_path[arg]
                    tests_to_run_by_device[device_type_list[0]].extend(tests)
                    aggregate_tests_to_run |= tests
                    aggregate_tests_to_skip -= tests
                    del skipped_tests_by_path[arg]

        aggregate_tests = aggregate_tests_to_run | aggregate_tests_to_skip

        self._printer.print_found(len(aggregate_tests),
                                  len(aggregate_tests_to_run),
                                  self._options.repeat_each,
                                  self._options.iterations)
        start_time = time.time()

        # Check to see if all tests we are running are skipped.
        if aggregate_tests == aggregate_tests_to_skip:
            # XXX: this is currently identical to the follow if, which likely isn't intended
            _log.error("All tests skipped.")
            return test_run_results.RunDetails(exit_code=0, skipped_all_tests=True)

        # Check to make sure we have no tests to run that are not skipped.
        if not aggregate_tests_to_run:
            _log.critical('No tests to run.')
            return test_run_results.RunDetails(exit_code=-1)

        self._printer.write_update("Checking build ...")
        if not self._port.check_build():
            _log.error("Build check failed")
            return test_run_results.RunDetails(exit_code=-1)

        if self._options.clobber_old_results:
            self._clobber_old_results()

        # Create the output directory if it doesn't already exist.
        self._port.host.filesystem.maybe_make_directory(self._results_directory)

        needs_http = any(test.needs_http_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_web_platform_test_server = any(test.needs_wpt_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        needs_websockets = any(test.needs_websocket_server for tests in itervalues(tests_to_run_by_device) for test in tests)
        self._runner = LayoutTestRunner(self._options, self._port, self._printer, self._results_directory,
                                        needs_http=needs_http, needs_web_platform_test_server=needs_web_platform_test_server, needs_websockets=needs_websockets)

        initial_results = None
        retry_results = None
        enabled_pixel_tests_in_retry = False

        max_child_processes_for_run = 1
        child_processes_option_value = self._options.child_processes
        uploads = []

        for device_type in device_type_list:
            self._options.child_processes = min(self._port.max_child_processes(device_type=device_type), int(child_processes_option_value or self._port.default_child_processes(device_type=device_type)))

            _log.info('')
            if not self._options.child_processes:
                _log.info('Skipping {} because {} is not available'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), str(device_type)))
                _log.info('')
                continue

            max_child_processes_for_run = max(self._options.child_processes, max_child_processes_for_run)

            self._printer.print_baseline_search_path(device_type=device_type)

            _log.info(u'Running {}{}'.format(pluralize(len(tests_to_run_by_device[device_type]), 'test'), u' for {}'.format(device_type) if device_type else ''))
            _log.info('')
            start_time_for_device = time.time()
            if not tests_to_run_by_device[device_type]:
                continue

            test_inputs = [self._test_input_for_file(test, device_type=device_type)
                           for test in tests_to_run_by_device[device_type]]

            if not self._set_up_run(test_inputs, device_type=device_type):
                return test_run_results.RunDetails(exit_code=-1)

            configuration = self._port.configuration_for_upload(self._port.target_host(0))
            if not configuration.get('flavor', None):  # The --result-report-flavor argument should override wk1/wk2
                configuration['flavor'] = 'wk2' if self._options.webkit_test_runner else 'wk1'
            temp_initial_results, temp_retry_results, temp_enabled_pixel_tests_in_retry = self._run_test_subset(test_inputs, device_type=device_type)

            skipped_results = TestRunResults(self._expectations[device_type], len(aggregate_tests_to_skip))
            for skipped_test in set(aggregate_tests_to_skip):
                skipped_result = test_results.TestResult(skipped_test.test_path)
                skipped_result.type = test_expectations.SKIP
                skipped_results.add(skipped_result, expected=True)
            temp_initial_results = temp_initial_results.merge(skipped_results)

            if self._options.report_urls:
                self._printer.writeln('\n')
                self._printer.write_update('Preparing upload data ...')

                upload = Upload(
                    suite=self._options.suite or 'layout-tests',
                    configuration=configuration,
                    details=Upload.create_details(options=self._options),
                    commits=self._port.commits_for_upload(),
                    timestamp=start_time,
                    run_stats=Upload.create_run_stats(
                        start_time=start_time_for_device,
                        end_time=time.time(),
                        tests_skipped=temp_initial_results.remaining + temp_initial_results.expected_skips,
                    ),
                    results=self._results_to_upload_json_trie(self._expectations[device_type], temp_initial_results),
                )
                for hostname in self._options.report_urls:
                    self._printer.write_update('Uploading to {} ...'.format(hostname))
                    if not upload.upload(hostname, log_line_func=self._printer.writeln):
                        num_failed_uploads += 1
                    else:
                        uploads.append(upload)
                self._printer.writeln('Uploads completed!')

            initial_results = initial_results.merge(temp_initial_results) if initial_results else temp_initial_results
            retry_results = retry_results.merge(temp_retry_results) if retry_results else temp_retry_results
            enabled_pixel_tests_in_retry |= temp_enabled_pixel_tests_in_retry

            if (initial_results and (initial_results.interrupted or initial_results.keyboard_interrupted)) or \
                    (retry_results and (retry_results.interrupted or retry_results.keyboard_interrupted)):
                break

        # Used for final logging, max_child_processes_for_run is most relevant here.
        self._options.child_processes = max_child_processes_for_run

        self._runner.stop_servers()

        end_time = time.time()
        result = self._end_test_run(start_time, end_time, initial_results, retry_results, enabled_pixel_tests_in_retry)

        if self._options.report_urls and uploads:
            self._printer.writeln('\n')
            self._printer.write_update('Preparing to upload test archive ...')

            with self._filesystem.mkdtemp() as temp:
                archive = self._filesystem.join(temp, 'test-archive')
                shutil.make_archive(archive, 'zip', self._results_directory)

                for upload in uploads:
                    for hostname in self._options.report_urls:
                        self._printer.write_update('Uploading archive to {} ...'.format(hostname))
                        if not upload.upload_archive(hostname, self._filesystem.open_binary_file_for_reading(archive + '.zip'), log_line_func=self._printer.writeln):
                            num_failed_uploads += 1

        if num_failed_uploads:
            result.exit_code = -1
        return result
Exemple #19
0
    def run_tests(self,
                  expectations,
                  test_inputs,
                  num_workers,
                  retrying,
                  device_type=None):
        self._expectations = expectations
        self._test_inputs = list(test_inputs)

        self._retrying = retrying

        # FIXME: rename all variables to test_run_results or some such ...
        run_results = TestRunResults(self._expectations, len(test_inputs))
        self._current_run_results = run_results
        self.printer.num_tests = len(test_inputs)
        self.printer.num_started = 0

        if not retrying:
            self.printer.print_expected(
                run_results,
                self._expectations.model().get_tests_with_result_type)

        self.printer.write_update('Sharding tests ...')
        all_shards = self._sharder.shard_tests(
            test_inputs, int(self._options.child_processes),
            self._options.fully_parallel)

        num_workers = min(num_workers, len(all_shards))
        self.printer.print_workers_and_shards(num_workers, len(all_shards))

        if self._options.dry_run:
            return run_results

        self.printer.write_update('Starting {} ...'.format(
            pluralize(num_workers, "worker")))

        devices = None
        if getattr(self._port, 'DEVICE_MANAGER', None):
            devices = dict(
                available_devices=self._port.DEVICE_MANAGER.AVAILABLE_DEVICES,
                initialized_devices=self._port.DEVICE_MANAGER.
                INITIALIZED_DEVICES,
            )

        try:
            LayoutTestRunner.instance = self
            with TaskPool(
                    workers=num_workers,
                    setup=setup_shard,
                    setupkwargs=dict(
                        port=self._port,
                        devices=devices,
                        results_directory=self._results_directory,
                        retrying=self._retrying,
                    ),
                    teardown=teardown_shard,
            ) as pool:
                for shard in all_shards:
                    pool.do(
                        run_shard,
                        shard,
                        callback=lambda value: self.
                        _annotate_results_with_additional_failures(value),
                    )
                pool.wait()

        except TestRunInterruptedException as e:
            _log.warning(e.reason)
            run_results.interrupted = True
        except KeyboardInterrupt:
            self.printer.flush()
            self.printer.writeln('Interrupted, exiting ...')
            run_results.keyboard_interrupted = True
        except Exception as e:
            _log.debug('{}("{}") raised, exiting'.format(
                e.__class__.__name__, str(e)))
            raise
        finally:
            LayoutTestRunner.instance = None

        return run_results
Exemple #20
0
    def main(cls, args, repository, identifier_template=None, **kwargs):
        if not repository.path:
            sys.stderr.write(
                'Cannot canonicalize commits on a remote repository\n')
            return 1
        if not repository.is_git:
            sys.stderr.write(
                'Commits can only be canonicalized on a Git repository\n')
            return 1

        branch = repository.branch
        if not branch:
            sys.stderr.write('Failed to determine current branch\n')
            return -1

        num_commits_to_canonicalize = args.number
        if not num_commits_to_canonicalize:
            result = run([
                repository.executable(),
                'rev-list',
                '--count',
                '--no-merges',
                '{remote}/{branch}..{branch}'.format(remote=args.remote,
                                                     branch=branch),
            ],
                         capture_output=True,
                         cwd=repository.root_path)
            if result.returncode:
                sys.stderr.write('Failed to find local commits\n')
                return -1
            num_commits_to_canonicalize = int(result.stdout.rstrip())
        if num_commits_to_canonicalize <= 0:
            print('No local commits to be edited')
            return 0
        log.warning('{} to be editted...'.format(
            string_utils.pluralize(num_commits_to_canonicalize, 'commit')))

        base = repository.find('{}~{}'.format(branch,
                                              num_commits_to_canonicalize))
        log.info('Base commit is {} (ref {})'.format(base, base.hash))

        log.debug(
            'Saving contributors to temp file to be picked up by child processes'
        )
        contributors = os.path.join(tempfile.gettempdir(),
                                    '{}-contributors.json'.format(os.getpid()))
        try:
            with open(contributors, 'w') as file:
                repository.contributors.save(file)

            message_filter = [
                '--msg-filter',
                "{} {} '{}'".format(
                    sys.executable,
                    os.path.join(os.path.dirname(__file__), 'message.py'),
                    identifier_template or 'Identifier: {}',
                ),
            ] if args.identifier else []

            with open(os.devnull, 'w') as devnull:
                subprocess.check_call(
                    [
                        repository.executable(),
                        'filter-branch',
                        '-f',
                        '--env-filter',
                        '''{overwrite_message}
committerOutput=$({python} {committer_py} {contributor_json})
KEY=''
VALUE=''
for word in $committerOutput; do
    if [[ $word == GIT_* ]] ; then
        if [[ $KEY == GIT_* ]] ; then
            {setting_message}
            printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
            KEY=''
            VALUE=''
        fi
    fi
    if [[ "$KEY" == "" ]] ; then
        KEY="$word"
    else
        VALUE="$VALUE$word "
    fi
done
if [[ $KEY == GIT_* ]] ; then
    {setting_message}
    printf -v $KEY "${{VALUE::$((${{#VALUE}} - 1))}}"
fi'''.format(
                            overwrite_message='' if log.level > logging.INFO
                            else 'echo "Overwriting $GIT_COMMIT"',
                            python=sys.executable,
                            committer_py=os.path.join(
                                os.path.dirname(__file__), 'committer.py'),
                            contributor_json=contributors,
                            setting_message='' if log.level > logging.DEBUG
                            else 'echo "    $KEY=$VALUE"',
                        ),
                    ] + message_filter + ['{}...{}'.format(branch, base.hash)],
                    cwd=repository.root_path,
                    env={
                        'FILTER_BRANCH_SQUELCH_WARNING': '1',
                        'PYTHONPATH': ':'.join(sys.path)
                    },
                    stdout=devnull if log.level > logging.WARNING else None,
                    stderr=devnull if log.level > logging.WARNING else None,
                )

        except subprocess.CalledProcessError:
            sys.stderr.write('Failed to modify local commit messages\n')
            return -1

        finally:
            os.remove(contributors)

        print('{} successfully canonicalized!'.format(
            string_utils.pluralize(num_commits_to_canonicalize, 'commit')))

        return 0