Exemplo n.º 1
0
    def RunCommand(self):
        """Command entry point for the test command."""
        failfast = False
        list_tests = False
        max_parallel_tests = _DEFAULT_TEST_PARALLEL_PROCESSES
        perform_coverage = False
        sequential_only = False
        if self.sub_opts:
            for o, a in self.sub_opts:
                if o == '-b':
                    tests.util.USE_MULTIREGIONAL_BUCKETS = True
                elif o == '-c':
                    perform_coverage = True
                elif o == '-f':
                    failfast = True
                elif o == '-l':
                    list_tests = True
                elif o == ('--' + _SEQUENTIAL_ISOLATION_FLAG):
                    # Called to isolate a single test in a separate process.
                    # Don't try to isolate it again (would lead to an infinite loop).
                    sequential_only = True
                elif o == '-p':
                    max_parallel_tests = long(a)
                elif o == '-s':
                    if not tests.util.HAS_S3_CREDS:
                        raise CommandException(
                            'S3 tests require S3 credentials. Please '
                            'add appropriate credentials to your .boto '
                            'file and re-run.')
                    tests.util.RUN_S3_TESTS = True
                elif o == '-u':
                    tests.util.RUN_INTEGRATION_TESTS = False

        if perform_coverage and not coverage:
            raise CommandException(
                'Coverage has been requested but the coverage module was not found. '
                'You can install it with "pip install coverage".')

        if (tests.util.RUN_S3_TESTS
                and max_parallel_tests > _DEFAULT_S3_TEST_PARALLEL_PROCESSES):
            self.logger.warn(
                'Reducing parallel tests to %d due to S3 maximum bucket '
                'limitations.', _DEFAULT_S3_TEST_PARALLEL_PROCESSES)
            max_parallel_tests = _DEFAULT_S3_TEST_PARALLEL_PROCESSES

        test_names = sorted(GetTestNames())
        if list_tests and not self.args:
            print('Found %d test names:' % len(test_names))
            print(' ', '\n  '.join(sorted(test_names)))
            return 0

        # Set list of commands to test if supplied.
        if self.args:
            commands_to_test = []
            for name in self.args:
                if name in test_names or name.split('.')[0] in test_names:
                    commands_to_test.append('gslib.tests.test_%s' % name)
                else:
                    commands_to_test.append(name)
        else:
            commands_to_test = [
                'gslib.tests.test_%s' % name for name in test_names
            ]

        # Installs a ctrl-c handler that tries to cleanly tear down tests.
        unittest.installHandler()

        loader = unittest.TestLoader()

        if commands_to_test:
            suite = unittest.TestSuite()
            for command_name in commands_to_test:
                try:
                    suite_for_current_command = loader.loadTestsFromName(
                        command_name)
                    suite.addTests(suite_for_current_command)
                except (ImportError, AttributeError) as e:
                    msg = (
                        'Failed to import test code from file %s. TestLoader provided '
                        'this error:\n\n%s' % (command_name, str(e)))

                    # Try to give a better error message; by default, unittest swallows
                    # ImportErrors and only shows that an import failed, not why. E.g.:
                    # "'module' object has no attribute 'test_cp'
                    try:
                        __import__(command_name)
                    except Exception as e:
                        stack_trace = traceback.format_exc()
                        err = re.sub('\\n', '\n    ', stack_trace)
                        msg += '\n\nAdditional traceback:\n\n%s' % (err)

                    raise CommandException(msg)

        if list_tests:
            test_names = GetTestNamesFromSuites(suite)
            print('Found %d test names:' % len(test_names))
            print(' ', '\n  '.join(sorted(test_names)))
            return 0

        if logging.getLogger().getEffectiveLevel() <= logging.INFO:
            verbosity = 1
        else:
            verbosity = 2
            logging.disable(logging.ERROR)

        if perform_coverage:
            # We want to run coverage over the gslib module, but filter out the test
            # modules and any third-party code. We also filter out anything under the
            # temporary directory. Otherwise, the gsutil update test (which copies
            # code to the temporary directory) gets included in the output.
            coverage_controller = coverage.coverage(source=['gslib'],
                                                    omit=[
                                                        'gslib/third_party/*',
                                                        'gslib/tests/*',
                                                        tempfile.gettempdir() +
                                                        '*',
                                                    ])
            coverage_controller.erase()
            coverage_controller.start()

        num_parallel_failures = 0
        sequential_success = False

        (sequential_tests, isolated_tests, parallel_unit_tests,
         parallel_integration_tests) = (SplitParallelizableTestSuite(suite))

        # Since parallel integration tests are run in a separate process, they
        # won't get the override to tests.util, so skip them here.
        if not tests.util.RUN_INTEGRATION_TESTS:
            parallel_integration_tests = []

        logging.debug('Sequential tests to run: %s', sequential_tests)
        logging.debug('Isolated tests to run: %s', isolated_tests)
        logging.debug('Parallel unit tests to run: %s', parallel_unit_tests)
        logging.debug('Parallel integration tests to run: %s',
                      parallel_integration_tests)

        # If we're running an already-isolated test (spawned in isolation by a
        # previous test process), or we have no parallel tests to run,
        # just run sequentially. For now, unit tests are always run sequentially.
        run_tests_sequentially = (sequential_only
                                  or (len(parallel_integration_tests) <= 1
                                      and not isolated_tests))

        # Disable analytics for the duration of testing. This is set as an
        # environment variable so that the subprocesses will also not report.
        os.environ['GSUTIL_TEST_ANALYTICS'] = '1'

        if run_tests_sequentially:
            total_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(total_tests)

            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)
            sequential_success = ret.wasSuccessful()
        else:
            if max_parallel_tests == 1:
                # We can't take advantage of parallelism, though we may have tests that
                # need isolation.
                sequential_tests += parallel_integration_tests
                parallel_integration_tests = []

            sequential_start_time = time.time()
            # TODO: For now, run unit tests sequentially because they are fast.
            # We could potentially shave off several seconds of execution time
            # by executing them in parallel with the integration tests.
            if len(sequential_tests) + len(parallel_unit_tests):
                print('Running %d tests sequentially.' %
                      (len(sequential_tests) + len(parallel_unit_tests)))
                sequential_tests_to_run = sequential_tests + parallel_unit_tests
                suite = loader.loadTestsFromNames(
                    sorted(
                        [test_name for test_name in sequential_tests_to_run]))
                num_sequential_tests = suite.countTestCases()
                resultclass = MakeCustomTestResultClass(num_sequential_tests)
                runner = unittest.TextTestRunner(verbosity=verbosity,
                                                 resultclass=resultclass,
                                                 failfast=failfast)

                ret = runner.run(suite)
                sequential_success = ret.wasSuccessful()
                sequential_skipped = ret.skipped
            else:
                num_sequential_tests = 0
                sequential_success = True
            sequential_time_elapsed = time.time() - sequential_start_time

            # At this point, all tests get their own process so just treat the
            # isolated tests as parallel tests.
            parallel_integration_tests += isolated_tests
            num_parallel_tests = len(parallel_integration_tests)

            if not num_parallel_tests:
                pass
            else:
                sequential_skipped = []
                num_processes = min(max_parallel_tests, num_parallel_tests)
                if num_parallel_tests > 1 and max_parallel_tests > 1:
                    message = 'Running %d tests in parallel mode (%d processes).'
                    if num_processes > _DEFAULT_TEST_PARALLEL_PROCESSES:
                        message += (
                            ' Please be patient while your CPU is incinerated. '
                            'If your machine becomes unresponsive, consider reducing '
                            'the amount of parallel test processes by running '
                            '\'gsutil test -p <num_processes>\'.')
                    print(('\n'.join(
                        textwrap.wrap(message %
                                      (num_parallel_tests, num_processes)))))
                else:
                    print(
                        ('Running %d tests sequentially in isolated processes.'
                         % num_parallel_tests))
                (num_parallel_failures,
                 parallel_time_elapsed) = self.RunParallelTests(
                     parallel_integration_tests, max_parallel_tests,
                     coverage_controller.data_files.filename
                     if perform_coverage else None)
                self.PrintTestResults(num_sequential_tests, sequential_success,
                                      sequential_skipped,
                                      sequential_time_elapsed,
                                      num_parallel_tests,
                                      num_parallel_failures,
                                      parallel_time_elapsed)

        if perform_coverage:
            coverage_controller.stop()
            coverage_controller.combine()
            coverage_controller.save()
            print(('Coverage information was saved to: %s' %
                   coverage_controller.data_files.filename))

        # Re-enable analytics to report the test command.
        os.environ['GSUTIL_TEST_ANALYTICS'] = '0'

        if sequential_success and not num_parallel_failures:
            ResetFailureCount()
            return 0
        return 1
Exemplo n.º 2
0
  def RunCommand(self):
    if not unittest:
      raise CommandException('On Python 2.6, the unittest2 module is required '
                             'to run the gsutil tests.')

    failfast = False
    list_tests = False
    if self.sub_opts:
      for o, _ in self.sub_opts:
        if o == '-u':
          tests.util.RUN_INTEGRATION_TESTS = False
        elif o == '-f':
          failfast = True
        elif o == '-l':
          list_tests = True

    if list_tests and not self.args:
      test_files = os.listdir(TESTS_DIR)
      matcher = re.compile(r'^test_(?P<name>.*).py$')
      test_names = []
      for fname in test_files:
        m = matcher.match(fname)
        if m:
          test_names.append(m.group('name'))
      print 'Found %d test names:' % len(test_names)
      print ' ', '\n  '.join(sorted(test_names))
      return 0

    # Set list of commands to test if supplied.
    commands_to_test = []
    if self.args:
      for name in self.args:
        if os.path.exists(os.path.join(TESTS_DIR, 'test_%s.py' % name)):
          commands_to_test.append('gslib.tests.test_%s' % name)
        elif os.path.exists(
            os.path.join(TESTS_DIR, 'test_%s.py' % name.split('.')[0])):
          commands_to_test.append('gslib.tests.test_%s' % name)
        else:
          commands_to_test.append(name)

    # Installs a ctrl-c handler that tries to cleanly tear down tests.
    unittest.installHandler()

    loader = unittest.TestLoader()

    if commands_to_test:
      try:
        suite = loader.loadTestsFromNames(commands_to_test)
      except (ImportError, AttributeError) as e:
        raise CommandException('Invalid test argument name: %s' % e)
    else:
      suite = loader.discover(TESTS_DIR)

    if list_tests:
      suites = [suite]
      test_names = []
      while suites:
        suite = suites.pop()
        for test in suite:
          if isinstance(test, unittest.TestSuite):
            suites.append(test)
          else:
            test_names.append(test.id().lstrip('gslib.tests.test_'))
      print 'Found %d test names:' % len(test_names)
      print ' ', '\n  '.join(sorted(test_names))
      return 0

    if logging.getLogger().getEffectiveLevel() <= logging.INFO:
      verbosity = 1
    else:
      verbosity = 2
      logging.disable(logging.ERROR)

    total_tests = suite.countTestCases()
    resultclass = MakeCustomTestResultClass(total_tests)

    runner = unittest.TextTestRunner(verbosity=verbosity,
                                     resultclass=resultclass, failfast=failfast)
    ret = runner.run(suite)
    if ret.wasSuccessful():
      return 0
    return 1
Exemplo n.º 3
0
    def RunCommand(self):
        """Command entry point for the test command."""
        if not unittest:
            raise CommandException(
                'On Python 2.6, the unittest2 module is required '
                'to run the gsutil tests.')

        failfast = False
        list_tests = False
        max_parallel_tests = DEFAULT_TEST_PARALLEL_PROCESSES
        perform_coverage = False
        if self.sub_opts:
            for o, a in self.sub_opts:
                if o == '-c':
                    perform_coverage = True
                elif o == '-f':
                    failfast = True
                elif o == '-l':
                    list_tests = True
                elif o == '-p':
                    max_parallel_tests = long(a)
                elif o == '-s':
                    if not tests.util.HAS_S3_CREDS:
                        raise CommandException(
                            'S3 tests require S3 credentials. Please '
                            'add appropriate credentials to your .boto '
                            'file and re-run.')
                    tests.util.RUN_S3_TESTS = True
                elif o == '-u':
                    tests.util.RUN_INTEGRATION_TESTS = False

        if perform_coverage and not coverage:
            raise CommandException(
                'Coverage has been requested but the coverage module was not found. '
                'You can install it with "pip install coverage".')

        if self.parallel_operations:
            if IS_WINDOWS:
                raise CommandException('-m test is not supported on Windows.')
            elif (tests.util.RUN_S3_TESTS
                  and max_parallel_tests > DEFAULT_S3_TEST_PARALLEL_PROCESSES):
                self.logger.warn(
                    'Reducing parallel tests to %d due to S3 maximum bucket '
                    'limitations.', DEFAULT_S3_TEST_PARALLEL_PROCESSES)
                max_parallel_tests = DEFAULT_S3_TEST_PARALLEL_PROCESSES

        test_names = sorted(GetTestNames())
        if list_tests and not self.args:
            print 'Found %d test names:' % len(test_names)
            print ' ', '\n  '.join(sorted(test_names))
            return 0

        # Set list of commands to test if supplied.
        if self.args:
            commands_to_test = []
            for name in self.args:
                if name in test_names or name.split('.')[0] in test_names:
                    commands_to_test.append('gslib.tests.test_%s' % name)
                else:
                    commands_to_test.append(name)
        else:
            commands_to_test = [
                'gslib.tests.test_%s' % name for name in test_names
            ]

        # Installs a ctrl-c handler that tries to cleanly tear down tests.
        unittest.installHandler()

        loader = unittest.TestLoader()

        if commands_to_test:
            try:
                suite = loader.loadTestsFromNames(commands_to_test)
            except (ImportError, AttributeError) as e:
                raise CommandException('Invalid test argument name: %s' % e)

        if list_tests:
            test_names = GetTestNamesFromSuites(suite)
            print 'Found %d test names:' % len(test_names)
            print ' ', '\n  '.join(sorted(test_names))
            return 0

        if logging.getLogger().getEffectiveLevel() <= logging.INFO:
            verbosity = 1
        else:
            verbosity = 2
            logging.disable(logging.ERROR)

        if perform_coverage:
            # We want to run coverage over the gslib module, but filter out the test
            # modules and any third-party code. We also filter out anything under the
            # temporary directory. Otherwise, the gsutil update test (which copies
            # code to the temporary directory) gets included in the output.
            coverage_controller = coverage.coverage(
                source=['gslib'],
                omit=[
                    'gslib/third_party/*', 'gslib/tests/*',
                    tempfile.gettempdir() + '*'
                ])
            coverage_controller.erase()
            coverage_controller.start()

        num_parallel_failures = 0
        if self.parallel_operations:
            sequential_tests, parallel_integration_tests, parallel_unit_tests = (
                SplitParallelizableTestSuite(suite))

            sequential_start_time = time.time()
            # TODO: For now, run unit tests sequentially because they are fast.
            # We could potentially shave off several seconds of execution time
            # by executing them in parallel with the integration tests.
            # Note that parallelism_framework unit tests cannot be run in a
            # subprocess.
            print 'Running %d tests sequentially.' % (len(sequential_tests) +
                                                      len(parallel_unit_tests))
            sequential_tests_to_run = sequential_tests + parallel_unit_tests
            suite = loader.loadTestsFromNames(
                sorted([test_name for test_name in sequential_tests_to_run]))
            num_sequential_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(num_sequential_tests)
            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)

            num_parallel_tests = len(parallel_integration_tests)
            max_processes = min(max_parallel_tests, num_parallel_tests)

            print('\n'.join(
                textwrap.wrap(
                    'Running %d integration tests in parallel mode (%d processes)! '
                    'Please be patient while your CPU is incinerated. '
                    'If your machine becomes unresponsive, consider reducing '
                    'the amount of parallel test processes by running '
                    '\'gsutil -m test -p <num_processes>\'.' %
                    (num_parallel_tests, max_processes))))
            process_list = []
            process_done = []
            process_results = [
            ]  # Tuples of (name, return code, stdout, stderr)
            hang_detection_counter = 0
            completed_as_of_last_log = 0
            parallel_start_time = last_log_time = time.time()
            test_index = CreateTestProcesses(
                parallel_integration_tests, 0, process_list, process_done,
                max_parallel_tests, coverage_controller.data.filename
                if perform_coverage else None)
            while len(process_results) < num_parallel_tests:
                for proc_num in xrange(len(process_list)):
                    if process_done[proc_num] or process_list[proc_num].poll(
                    ) is None:
                        continue
                    process_done[proc_num] = True
                    stdout, stderr = process_list[proc_num].communicate()
                    # TODO: Differentiate test failures from errors.
                    if process_list[proc_num].returncode != 0:
                        num_parallel_failures += 1
                    process_results.append(
                        (parallel_integration_tests[proc_num],
                         process_list[proc_num].returncode, stdout, stderr))
                if len(process_list) < num_parallel_tests:
                    test_index = CreateTestProcesses(
                        parallel_integration_tests, test_index, process_list,
                        process_done, max_parallel_tests,
                        coverage_controller.data.filename
                        if perform_coverage else None)
                if len(process_results) < num_parallel_tests:
                    if time.time() - last_log_time > 5:
                        print '%d/%d finished - %d failures' % (
                            len(process_results), num_parallel_tests,
                            num_parallel_failures)
                        if len(process_results) == completed_as_of_last_log:
                            hang_detection_counter += 1
                        else:
                            completed_as_of_last_log = len(process_results)
                            hang_detection_counter = 0
                        if hang_detection_counter > 4:
                            still_running = []
                            for proc_num in xrange(len(process_list)):
                                if not process_done[proc_num]:
                                    still_running.append(
                                        parallel_integration_tests[proc_num])
                            print 'Still running: %s' % still_running
                        last_log_time = time.time()
                    time.sleep(1)
            process_run_finish_time = time.time()
            if num_parallel_failures:
                for result in process_results:
                    if result[1] != 0:
                        new_stderr = result[3].split('\n')
                        print 'Results for failed test %s:' % result[0]
                        for line in new_stderr:
                            print line

            # TODO: Properly track test skips.
            print 'Parallel tests complete. Success: %s Fail: %s' % (
                num_parallel_tests - num_parallel_failures,
                num_parallel_failures)
            print(
                'Ran %d tests in %.3fs (%d sequential in %.3fs, %d parallel in %.3fs)'
                % (num_parallel_tests + num_sequential_tests,
                   float(process_run_finish_time - sequential_start_time),
                   num_sequential_tests,
                   float(parallel_start_time - sequential_start_time),
                   num_parallel_tests,
                   float(process_run_finish_time - parallel_start_time)))
            print
            if not num_parallel_failures and ret.wasSuccessful():
                print 'OK'
            else:
                if num_parallel_failures:
                    print 'FAILED (parallel tests)'
                if not ret.wasSuccessful():
                    print 'FAILED (sequential tests)'
        else:
            total_tests = suite.countTestCases()
            resultclass = MakeCustomTestResultClass(total_tests)

            runner = unittest.TextTestRunner(verbosity=verbosity,
                                             resultclass=resultclass,
                                             failfast=failfast)
            ret = runner.run(suite)

        if perform_coverage:
            coverage_controller.stop()
            coverage_controller.combine()
            coverage_controller.save()
            print('Coverage information was saved to: %s' %
                  coverage_controller.data.filename)

        if ret.wasSuccessful() and not num_parallel_failures:
            ResetFailureCount()
            return 0
        return 1