예제 #1
0
    def RunCommand(self):
        """Command entry point for the rm command."""
        # self.recursion_requested is initialized in command.py (so it can be
        # checked in parent class for all commands).
        self.continue_on_error = False
        self.read_args_from_stdin = False
        self.all_versions = False
        if self.sub_opts:
            for o, unused_a in self.sub_opts:
                if o == '-a':
                    self.all_versions = True
                elif o == '-f':
                    self.continue_on_error = True
                elif o == '-I':
                    self.read_args_from_stdin = True
                elif o == '-r' or o == '-R':
                    self.recursion_requested = True
                    self.all_versions = True

        if self.read_args_from_stdin:
            if self.args:
                raise CommandException(
                    'No arguments allowed with the -I flag.')
            url_strs = StdinIterator()
        else:
            if not self.args:
                raise CommandException(
                    'The rm command (without -I) expects at '
                    'least one URL.')
            url_strs = self.args

        bucket_urls_to_delete = []
        bucket_strings_to_delete = []
        if self.recursion_requested:
            bucket_fields = ['id']
            for url_str in url_strs:
                url = StorageUrlFromString(url_str)
                if url.IsBucket() or url.IsProvider():
                    for blr in self.WildcardIterator(url_str).IterBuckets(
                            bucket_fields=bucket_fields):
                        bucket_urls_to_delete.append(blr.storage_url)
                        bucket_strings_to_delete.append(url_str)

        self.preconditions = PreconditionsFromHeaders(self.headers or {})

        # Used to track if any files failed to be removed.
        self.everything_removed_okay = True

        try:
            # Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
            name_expansion_iterator = NameExpansionIterator(
                self.command_name,
                self.debug,
                self.logger,
                self.gsutil_api,
                url_strs,
                self.recursion_requested,
                project_id=self.project_id,
                all_versions=self.all_versions,
                continue_on_error=self.continue_on_error
                or self.parallel_operations)

            # Perform remove requests in parallel (-m) mode, if requested, using
            # configured number of parallel processes and threads. Otherwise,
            # perform requests with sequential function calls in current process.
            self.Apply(_RemoveFuncWrapper,
                       name_expansion_iterator,
                       _RemoveExceptionHandler,
                       fail_on_error=(not self.continue_on_error))

        # Assuming the bucket has versioning enabled, url's that don't map to
        # objects should throw an error even with all_versions, since the prior
        # round of deletes only sends objects to a history table.
        # This assumption that rm -a is only called for versioned buckets should be
        # corrected, but the fix is non-trivial.
        except CommandException as e:
            # Don't raise if there are buckets to delete -- it's valid to say:
            #   gsutil rm -r gs://some_bucket
            # if the bucket is empty.
            if not bucket_urls_to_delete and not self.continue_on_error:
                raise
            # Reset the failure count if we failed due to an empty bucket that we're
            # going to delete.
            msg = 'No URLs matched: '
            if msg in str(e):
                parts = str(e).split(msg)
                if len(parts) == 2 and parts[1] in bucket_strings_to_delete:
                    ResetFailureCount()
        except ServiceException, e:
            if not self.continue_on_error:
                raise
예제 #2
0
    def RunCommand(self):
        """Command entry point for the test command."""
        failfast = False
        list_tests = False
        max_parallel_tests = _DEFAULT_TEST_PARALLEL_PROCESSES
        perform_coverage = False
        sequential_only = False
        if self.sub_opts:
            for o, a in self.sub_opts:
                if o == '-b':
                    tests.util.USE_MULTIREGIONAL_BUCKETS = True
                elif o == '-c':
                    perform_coverage = True
                elif o == '-f':
                    failfast = True
                elif o == '-l':
                    list_tests = True
                elif o == ('--' + _SEQUENTIAL_ISOLATION_FLAG):
                    # Called to isolate a single test in a separate process.
                    # Don't try to isolate it again (would lead to an infinite loop).
                    sequential_only = True
                elif o == '-p':
                    max_parallel_tests = long(a)
                elif o == '-s':
                    if not tests.util.HAS_S3_CREDS:
                        raise CommandException(
                            'S3 tests require S3 credentials. Please '
                            'add appropriate credentials to your .boto '
                            'file and re-run.')
                    tests.util.RUN_S3_TESTS = True
                elif o == '-u':
                    tests.util.RUN_INTEGRATION_TESTS = False

        if perform_coverage and not coverage:
            raise CommandException(
                'Coverage has been requested but the coverage module was not found. '
                'You can install it with "pip install coverage".')

        if (tests.util.RUN_S3_TESTS
                and max_parallel_tests > _DEFAULT_S3_TEST_PARALLEL_PROCESSES):
            self.logger.warn(
                'Reducing parallel tests to %d due to S3 maximum bucket '
                'limitations.', _DEFAULT_S3_TEST_PARALLEL_PROCESSES)
            max_parallel_tests = _DEFAULT_S3_TEST_PARALLEL_PROCESSES

        test_names = sorted(GetTestNames())
        if list_tests and not self.args:
            print('Found %d test names:' % len(test_names))
            print(' ', '\n  '.join(sorted(test_names)))
            return 0

        # Set list of commands to test if supplied.
        if self.args:
            commands_to_test = []
            for name in self.args:
                if name in test_names or name.split('.')[0] in test_names:
                    commands_to_test.append('gslib.tests.test_%s' % name)
                else:
                    commands_to_test.append(name)
        else:
            commands_to_test = [
                'gslib.tests.test_%s' % name for name in test_names
            ]

        # Installs a ctrl-c handler that tries to cleanly tear down tests.
        unittest.installHandler()

        loader = unittest.TestLoader()

        if commands_to_test:
            suite = unittest.TestSuite()
            for command_name in commands_to_test:
                try:
                    suite_for_current_command = loader.loadTestsFromName(
                        command_name)
                    suite.addTests(suite_for_current_command)
                except (ImportError, AttributeError) as e:
                    msg = (
                        'Failed to import test code from file %s. TestLoader provided '
                        'this error:\n\n%s' % (command_name, str(e)))

                    # Try to give a better error message; by default, unittest swallows
                    # ImportErrors and only shows that an import failed, not why. E.g.:
                    # "'module' object has no attribute 'test_cp'
                    try:
                        __import__(command_name)
                    except Exception as e:
                        stack_trace = traceback.format_exc()
                        err = re.sub('\\n', '\n    ', stack_trace)
                        msg += '\n\nAdditional traceback:\n\n%s' % (err)

                    raise CommandException(msg)

        if list_tests:
            test_names = GetTestNamesFromSuites(suite)
            print('Found %d test names:' % len(test_names))
            print(' ', '\n  '.join(sorted(test_names)))
            return 0

        if logging.getLogger().getEffectiveLevel() <= logging.INFO:
            verbosity = 1
        else:
            verbosity = 2
            logging.disable(logging.ERROR)

        if perform_coverage:
            # We want to run coverage over the gslib module, but filter out the test
            # modules and any third-party code. We also filter out anything under the
            # temporary directory. Otherwise, the gsutil update test (which copies
            # code to the temporary directory) gets included in the output.
            coverage_controller = coverage.coverage(source=['gslib'],
                                                    omit=[
                                                        'gslib/third_party/*',
                                                        'gslib/tests/*',
                                                        tempfile.gettempdir() +
                                                        '*',
                                                    ])
            coverage_controller.erase()
            coverage_controller.start()

        num_parallel_failures = 0
        sequential_success = False

        (sequential_tests, isolated_tests, parallel_unit_tests,
         parallel_integration_tests) = (SplitParallelizableTestSuite(suite))

        # Since parallel integration tests are run in a separate process, they
        # won't get the override to tests.util, so skip them here.
        if not tests.util.RUN_INTEGRATION_TESTS:
            parallel_integration_tests = []

        logging.debug('Sequential tests to run: %s', sequential_tests)
        logging.debug('Isolated tests to run: %s', isolated_tests)
        logging.debug('Parallel unit tests to run: %s', parallel_unit_tests)
        logging.debug('Parallel integration tests to run: %s',
                      parallel_integration_tests)

        # If we're running an already-isolated test (spawned in isolation by a
        # previous test process), or we have no parallel tests to run,
        # just run sequentially. For now, unit tests are always run sequentially.
        run_tests_sequentially = (sequential_only
                                  or (len(parallel_integration_tests) <= 1
                                      and not isolated_tests))

        # Disable analytics for the duration of testing. This is set as an
        # environment variable so that the subprocesses will also not report.
        os.environ['GSUTIL_TEST_ANALYTICS'] = '1'

        if run_tests_sequentially:
            total_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(total_tests)

            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)
            sequential_success = ret.wasSuccessful()
        else:
            if max_parallel_tests == 1:
                # We can't take advantage of parallelism, though we may have tests that
                # need isolation.
                sequential_tests += parallel_integration_tests
                parallel_integration_tests = []

            sequential_start_time = time.time()
            # TODO: For now, run unit tests sequentially because they are fast.
            # We could potentially shave off several seconds of execution time
            # by executing them in parallel with the integration tests.
            if len(sequential_tests) + len(parallel_unit_tests):
                print('Running %d tests sequentially.' %
                      (len(sequential_tests) + len(parallel_unit_tests)))
                sequential_tests_to_run = sequential_tests + parallel_unit_tests
                suite = loader.loadTestsFromNames(
                    sorted(
                        [test_name for test_name in sequential_tests_to_run]))
                num_sequential_tests = suite.countTestCases()
                resultclass = MakeCustomTestResultClass(num_sequential_tests)
                runner = unittest.TextTestRunner(verbosity=verbosity,
                                                 resultclass=resultclass,
                                                 failfast=failfast)

                ret = runner.run(suite)
                sequential_success = ret.wasSuccessful()
                sequential_skipped = ret.skipped
            else:
                num_sequential_tests = 0
                sequential_success = True
            sequential_time_elapsed = time.time() - sequential_start_time

            # At this point, all tests get their own process so just treat the
            # isolated tests as parallel tests.
            parallel_integration_tests += isolated_tests
            num_parallel_tests = len(parallel_integration_tests)

            if not num_parallel_tests:
                pass
            else:
                sequential_skipped = []
                num_processes = min(max_parallel_tests, num_parallel_tests)
                if num_parallel_tests > 1 and max_parallel_tests > 1:
                    message = 'Running %d tests in parallel mode (%d processes).'
                    if num_processes > _DEFAULT_TEST_PARALLEL_PROCESSES:
                        message += (
                            ' Please be patient while your CPU is incinerated. '
                            'If your machine becomes unresponsive, consider reducing '
                            'the amount of parallel test processes by running '
                            '\'gsutil test -p <num_processes>\'.')
                    print(('\n'.join(
                        textwrap.wrap(message %
                                      (num_parallel_tests, num_processes)))))
                else:
                    print(
                        ('Running %d tests sequentially in isolated processes.'
                         % num_parallel_tests))
                (num_parallel_failures,
                 parallel_time_elapsed) = self.RunParallelTests(
                     parallel_integration_tests, max_parallel_tests,
                     coverage_controller.data_files.filename
                     if perform_coverage else None)
                self.PrintTestResults(num_sequential_tests, sequential_success,
                                      sequential_skipped,
                                      sequential_time_elapsed,
                                      num_parallel_tests,
                                      num_parallel_failures,
                                      parallel_time_elapsed)

        if perform_coverage:
            coverage_controller.stop()
            coverage_controller.combine()
            coverage_controller.save()
            print(('Coverage information was saved to: %s' %
                   coverage_controller.data_files.filename))

        # Re-enable analytics to report the test command.
        os.environ['GSUTIL_TEST_ANALYTICS'] = '0'

        if sequential_success and not num_parallel_failures:
            ResetFailureCount()
            return 0
        return 1
예제 #3
0
class RmCommand(Command):
    """Implementation of gsutil rm command."""

    # Command specification. See base class for documentation.
    command_spec = Command.CreateCommandSpec(
        'rm',
        command_name_aliases=['del', 'delete', 'remove'],
        usage_synopsis=_SYNOPSIS,
        min_args=0,
        max_args=NO_MAX,
        supported_sub_args='afIrR',
        file_url_ok=False,
        provider_url_ok=False,
        urls_start_arg=0,
        gs_api_support=[ApiSelector.XML, ApiSelector.JSON],
        gs_default_api=ApiSelector.JSON,
        argparse_arguments=[CommandArgument.MakeZeroOrMoreCloudURLsArgument()])
    # Help specification. See help_provider.py for documentation.
    help_spec = Command.HelpSpec(
        help_name='rm',
        help_name_aliases=['del', 'delete', 'remove'],
        help_type='command_help',
        help_one_line_summary='Remove objects',
        help_text=_DETAILED_HELP_TEXT,
        subcommand_help_text={},
    )

    def RunCommand(self):
        """Command entry point for the rm command."""
        # self.recursion_requested is initialized in command.py (so it can be
        # checked in parent class for all commands).
        self.continue_on_error = False
        self.read_args_from_stdin = False
        self.all_versions = False
        if self.sub_opts:
            for o, unused_a in self.sub_opts:
                if o == '-a':
                    self.all_versions = True
                elif o == '-f':
                    self.continue_on_error = True
                elif o == '-I':
                    self.read_args_from_stdin = True
                elif o == '-r' or o == '-R':
                    self.recursion_requested = True
                    self.all_versions = True

        if self.read_args_from_stdin:
            if self.args:
                raise CommandException(
                    'No arguments allowed with the -I flag.')
            url_strs = StdinIterator()
        else:
            if not self.args:
                raise CommandException(
                    'The rm command (without -I) expects at '
                    'least one URL.')
            url_strs = self.args

        bucket_urls_to_delete = []
        bucket_strings_to_delete = []
        if self.recursion_requested:
            bucket_fields = ['id']
            for url_str in url_strs:
                url = StorageUrlFromString(url_str)
                if url.IsBucket() or url.IsProvider():
                    for blr in self.WildcardIterator(url_str).IterBuckets(
                            bucket_fields=bucket_fields):
                        bucket_urls_to_delete.append(blr.storage_url)
                        bucket_strings_to_delete.append(url_str)

        self.preconditions = PreconditionsFromHeaders(self.headers or {})

        # Used to track if any files failed to be removed.
        self.everything_removed_okay = True

        try:
            # Expand wildcards, dirs, buckets, and bucket subdirs in URLs.
            name_expansion_iterator = NameExpansionIterator(
                self.command_name,
                self.debug,
                self.logger,
                self.gsutil_api,
                url_strs,
                self.recursion_requested,
                project_id=self.project_id,
                all_versions=self.all_versions,
                continue_on_error=self.continue_on_error
                or self.parallel_operations)

            # Perform remove requests in parallel (-m) mode, if requested, using
            # configured number of parallel processes and threads. Otherwise,
            # perform requests with sequential function calls in current process.
            self.Apply(_RemoveFuncWrapper,
                       name_expansion_iterator,
                       _RemoveExceptionHandler,
                       fail_on_error=(not self.continue_on_error))

        # Assuming the bucket has versioning enabled, url's that don't map to
        # objects should throw an error even with all_versions, since the prior
        # round of deletes only sends objects to a history table.
        # This assumption that rm -a is only called for versioned buckets should be
        # corrected, but the fix is non-trivial.
        except CommandException as e:
            # Don't raise if there are buckets to delete -- it's valid to say:
            #   gsutil rm -r gs://some_bucket
            # if the bucket is empty.
            if not bucket_urls_to_delete and not self.continue_on_error:
                raise
            # Reset the failure count if we failed due to an empty bucket that we're
            # going to delete.
            msg = 'No URLs matched: '
            if msg in str(e):
                parts = str(e).split(msg)
                if len(parts) == 2 and parts[1] in bucket_strings_to_delete:
                    ResetFailureCount()
        except ServiceException, e:
            if not self.continue_on_error:
                raise

        if not self.everything_removed_okay and not self.continue_on_error:
            raise CommandException('Some files could not be removed.')

        # If this was a gsutil rm -r command covering any bucket subdirs,
        # remove any dir_$folder$ objects (which are created by various web UI
        # tools to simulate folders).
        if self.recursion_requested:
            had_previous_failures = GetFailureCount() > 0
            folder_object_wildcards = []
            for url_str in url_strs:
                url = StorageUrlFromString(url_str)
                if url.IsObject():
                    folder_object_wildcards.append('%s**_$folder$' % url_str)
            if folder_object_wildcards:
                self.continue_on_error = True
                try:
                    name_expansion_iterator = NameExpansionIterator(
                        self.command_name,
                        self.debug,
                        self.logger,
                        self.gsutil_api,
                        folder_object_wildcards,
                        self.recursion_requested,
                        project_id=self.project_id,
                        all_versions=self.all_versions)
                    # When we're removing folder objects, always continue on error
                    self.Apply(_RemoveFuncWrapper,
                               name_expansion_iterator,
                               _RemoveFoldersExceptionHandler,
                               fail_on_error=False)
                except CommandException as e:
                    # Ignore exception from name expansion due to an absent folder file.
                    if not e.reason.startswith('No URLs matched:'):
                        raise
                if not had_previous_failures:
                    ResetFailureCount()

        # Now that all data has been deleted, delete any bucket URLs.
        for url in bucket_urls_to_delete:
            self.logger.info('Removing %s...', url)

            @Retry(NotEmptyException, tries=3, timeout_secs=1)
            def BucketDeleteWithRetry():
                self.gsutil_api.DeleteBucket(url.bucket_name,
                                             provider=url.scheme)

            BucketDeleteWithRetry()

        return 0
예제 #4
0
    def RunCommand(self):
        """Command entry point for the test command."""
        if not unittest:
            raise CommandException(
                'On Python 2.6, the unittest2 module is required '
                'to run the gsutil tests.')

        failfast = False
        list_tests = False
        max_parallel_tests = DEFAULT_TEST_PARALLEL_PROCESSES
        perform_coverage = False
        if self.sub_opts:
            for o, a in self.sub_opts:
                if o == '-c':
                    perform_coverage = True
                elif o == '-f':
                    failfast = True
                elif o == '-l':
                    list_tests = True
                elif o == '-p':
                    max_parallel_tests = long(a)
                elif o == '-s':
                    if not tests.util.HAS_S3_CREDS:
                        raise CommandException(
                            'S3 tests require S3 credentials. Please '
                            'add appropriate credentials to your .boto '
                            'file and re-run.')
                    tests.util.RUN_S3_TESTS = True
                elif o == '-u':
                    tests.util.RUN_INTEGRATION_TESTS = False

        if perform_coverage and not coverage:
            raise CommandException(
                'Coverage has been requested but the coverage module was not found. '
                'You can install it with "pip install coverage".')

        if self.parallel_operations:
            if IS_WINDOWS:
                raise CommandException('-m test is not supported on Windows.')
            elif (tests.util.RUN_S3_TESTS
                  and max_parallel_tests > DEFAULT_S3_TEST_PARALLEL_PROCESSES):
                self.logger.warn(
                    'Reducing parallel tests to %d due to S3 maximum bucket '
                    'limitations.', DEFAULT_S3_TEST_PARALLEL_PROCESSES)
                max_parallel_tests = DEFAULT_S3_TEST_PARALLEL_PROCESSES

        test_names = sorted(GetTestNames())
        if list_tests and not self.args:
            print 'Found %d test names:' % len(test_names)
            print ' ', '\n  '.join(sorted(test_names))
            return 0

        # Set list of commands to test if supplied.
        if self.args:
            commands_to_test = []
            for name in self.args:
                if name in test_names or name.split('.')[0] in test_names:
                    commands_to_test.append('gslib.tests.test_%s' % name)
                else:
                    commands_to_test.append(name)
        else:
            commands_to_test = [
                'gslib.tests.test_%s' % name for name in test_names
            ]

        # Installs a ctrl-c handler that tries to cleanly tear down tests.
        unittest.installHandler()

        loader = unittest.TestLoader()

        if commands_to_test:
            try:
                suite = loader.loadTestsFromNames(commands_to_test)
            except (ImportError, AttributeError) as e:
                raise CommandException('Invalid test argument name: %s' % e)

        if list_tests:
            test_names = GetTestNamesFromSuites(suite)
            print 'Found %d test names:' % len(test_names)
            print ' ', '\n  '.join(sorted(test_names))
            return 0

        if logging.getLogger().getEffectiveLevel() <= logging.INFO:
            verbosity = 1
        else:
            verbosity = 2
            logging.disable(logging.ERROR)

        if perform_coverage:
            # We want to run coverage over the gslib module, but filter out the test
            # modules and any third-party code. We also filter out anything under the
            # temporary directory. Otherwise, the gsutil update test (which copies
            # code to the temporary directory) gets included in the output.
            coverage_controller = coverage.coverage(
                source=['gslib'],
                omit=[
                    'gslib/third_party/*', 'gslib/tests/*',
                    tempfile.gettempdir() + '*'
                ])
            coverage_controller.erase()
            coverage_controller.start()

        num_parallel_failures = 0
        if self.parallel_operations:
            sequential_tests, parallel_integration_tests, parallel_unit_tests = (
                SplitParallelizableTestSuite(suite))

            sequential_start_time = time.time()
            # TODO: For now, run unit tests sequentially because they are fast.
            # We could potentially shave off several seconds of execution time
            # by executing them in parallel with the integration tests.
            # Note that parallelism_framework unit tests cannot be run in a
            # subprocess.
            print 'Running %d tests sequentially.' % (len(sequential_tests) +
                                                      len(parallel_unit_tests))
            sequential_tests_to_run = sequential_tests + parallel_unit_tests
            suite = loader.loadTestsFromNames(
                sorted([test_name for test_name in sequential_tests_to_run]))
            num_sequential_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(num_sequential_tests)
            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)

            num_parallel_tests = len(parallel_integration_tests)
            max_processes = min(max_parallel_tests, num_parallel_tests)

            print('\n'.join(
                textwrap.wrap(
                    'Running %d integration tests in parallel mode (%d processes)! '
                    'Please be patient while your CPU is incinerated. '
                    'If your machine becomes unresponsive, consider reducing '
                    'the amount of parallel test processes by running '
                    '\'gsutil -m test -p <num_processes>\'.' %
                    (num_parallel_tests, max_processes))))
            process_list = []
            process_done = []
            process_results = [
            ]  # Tuples of (name, return code, stdout, stderr)
            hang_detection_counter = 0
            completed_as_of_last_log = 0
            parallel_start_time = last_log_time = time.time()
            test_index = CreateTestProcesses(
                parallel_integration_tests, 0, process_list, process_done,
                max_parallel_tests, coverage_controller.data.filename
                if perform_coverage else None)
            while len(process_results) < num_parallel_tests:
                for proc_num in xrange(len(process_list)):
                    if process_done[proc_num] or process_list[proc_num].poll(
                    ) is None:
                        continue
                    process_done[proc_num] = True
                    stdout, stderr = process_list[proc_num].communicate()
                    # TODO: Differentiate test failures from errors.
                    if process_list[proc_num].returncode != 0:
                        num_parallel_failures += 1
                    process_results.append(
                        (parallel_integration_tests[proc_num],
                         process_list[proc_num].returncode, stdout, stderr))
                if len(process_list) < num_parallel_tests:
                    test_index = CreateTestProcesses(
                        parallel_integration_tests, test_index, process_list,
                        process_done, max_parallel_tests,
                        coverage_controller.data.filename
                        if perform_coverage else None)
                if len(process_results) < num_parallel_tests:
                    if time.time() - last_log_time > 5:
                        print '%d/%d finished - %d failures' % (
                            len(process_results), num_parallel_tests,
                            num_parallel_failures)
                        if len(process_results) == completed_as_of_last_log:
                            hang_detection_counter += 1
                        else:
                            completed_as_of_last_log = len(process_results)
                            hang_detection_counter = 0
                        if hang_detection_counter > 4:
                            still_running = []
                            for proc_num in xrange(len(process_list)):
                                if not process_done[proc_num]:
                                    still_running.append(
                                        parallel_integration_tests[proc_num])
                            print 'Still running: %s' % still_running
                        last_log_time = time.time()
                    time.sleep(1)
            process_run_finish_time = time.time()
            if num_parallel_failures:
                for result in process_results:
                    if result[1] != 0:
                        new_stderr = result[3].split('\n')
                        print 'Results for failed test %s:' % result[0]
                        for line in new_stderr:
                            print line

            # TODO: Properly track test skips.
            print 'Parallel tests complete. Success: %s Fail: %s' % (
                num_parallel_tests - num_parallel_failures,
                num_parallel_failures)
            print(
                'Ran %d tests in %.3fs (%d sequential in %.3fs, %d parallel in %.3fs)'
                % (num_parallel_tests + num_sequential_tests,
                   float(process_run_finish_time - sequential_start_time),
                   num_sequential_tests,
                   float(parallel_start_time - sequential_start_time),
                   num_parallel_tests,
                   float(process_run_finish_time - parallel_start_time)))
            print
            if not num_parallel_failures and ret.wasSuccessful():
                print 'OK'
            else:
                if num_parallel_failures:
                    print 'FAILED (parallel tests)'
                if not ret.wasSuccessful():
                    print 'FAILED (sequential tests)'
        else:
            total_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(total_tests)

            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)

        if perform_coverage:
            coverage_controller.stop()
            coverage_controller.combine()
            coverage_controller.save()
            print('Coverage information was saved to: %s' %
                  coverage_controller.data.filename)

        if ret.wasSuccessful() and not num_parallel_failures:
            ResetFailureCount()
            return 0
        return 1