Exemplo n.º 1
0
 def handle_noargs(self, **options):
     database = read_database()
     if not database:
         self.stdout.write("No database found\n")
         return
     self.stdout.write("Last run test results\n")
     self.stdout.write("=====================\n\n")
     self.stdout.write("\n")
     self.stdout.write("Timings\n\n")
     for test, duration in sorted(database.get('timings', {}).items(),
                                  key=lambda x: -x[1]):
         self.stdout.write(
             "{duration:-7.3f} {test}\n".format(
                 test=test, duration=duration
             )
         )
     self.stdout.write("\n")
     self.stdout.write("Failed tests:\n\n")
     for failed in database.get('failed', []):
         self.stdout.write('    ' + failed + '\n')
Exemplo n.º 2
0
    def handle(self, *test_labels, **options):
        from django.conf import settings

        if not options['migrate']:
            settings.MIGRATION_MODULES = DisableMigrations()

        if options['vanilla']:
            return DjangoTest().handle(*test_labels, **options)
        
        from django.test.utils import get_runner

        if 'south' in settings.INSTALLED_APPS:
            from south.management.commands import patch_for_test_db_setup
            patch_for_test_db_setup()

        # load test runner class
        TestRunner = get_runner(settings, options.get('testrunner'))

        options['verbosity'] = int(options.get('verbosity'))

        if options.get('liveserver') is not None:
            os.environ['DJANGO_LIVE_TEST_SERVER_ADDRESS'] = options['liveserver']
            del options['liveserver']

        database = read_database()

        # Re-run using last configuration
        if options['retest'] and database.get('last_run', None):
            last_run = database['last_run']
            options['parallel'] = last_run['parallel']
            options['isolate'] = last_run['isolate']
            options['list_slow'] = last_run['list_slow']
            test_labels = last_run['labels']

        # Get the test runner instance, this won't actually be used to run
        # anything, but rather builds the suite so we can get a list of test
        # labels to run
        # interactive is disabled since it doesn't work in multiprocessing
        options['interactive'] = False
        test_runner = TestRunner(**options)

        suite = test_runner.build_suite(test_labels)

        # Get an actual result class we can use
        pseudo_runner = unittest.TextTestRunner(
            resultclass=MultiProcessingTextTestResult
        )
        real_result = pseudo_runner._makeResult()

        all_test_labels = suite_to_labels(suite, real_result)

        # If there's nothing to run, let Django handle it.
        if not all_test_labels:
            return super(Command, self).handle(*test_labels, **options)

        if options['failed']:
            all_test_labels = [
                label for label in all_test_labels
                if label in database.get('failed', all_test_labels)
            ]

        if options['isolate']:
            # Isolate means one test (label) per task process.
            chunks = [
                [label] for label in all_test_labels
            ]
        elif options['parallel']:
            # Try to distribute the test labels equally across the available
            # CPU cores
            chunk_count = multiprocessing.cpu_count()
            weighted_chunks = [
                (database.get('timings', {}).get(label, 0), label)
                for label in all_test_labels
            ]
            # Try to split the tests into chunks of equal time, not equal size
            chunks = simple_weighted_partition(weighted_chunks, chunk_count)

            # filter empty chunks
            chunks = list(filter(bool, chunks))
        else:
            chunks = [all_test_labels]

        # Initialize shared values
        results_queue = multiprocessing.Queue()

        # Initialize pool
        pool = multiprocessing.Pool(
            initializer=init_task,
            initargs=(results_queue,)
        )

        start_time = time.time()

        # Send tasks to pool
        async_results = []
        for chunk_num, chunk in enumerate(chunks):
            async_results.append(pool.apply_async(
                run_tests,
                (chunk, TestRunner, options, chunk_num)
            ))

        # Wait for results to come in
        wait_for_tests_to_finish(real_result, async_results, results_queue)

        failed_executors = sum(
            0 if result.successful() else 1 for result in async_results
        )

        # Stop all tasks
        pool.close()
        pool.join()

        end_time = time.time()

        # Report result, this is mostly taken from TextTestRunner.run
        time_taken = end_time - start_time

        real_result.printErrors()
        real_result.stream.writeln(real_result.separator2)
        real_result.stream.writeln(
            "Ran {number} test{plural} in {time:.3f}s".format(
                number=real_result.testsRun,
                plural=real_result.testsRun != 1 and "s" or "",
                time=time_taken
            )
        )
        real_result.stream.writeln()

        # Record timings to database
        data = {
            'timings': database.get('timings', {}),
        }
        for test, timing in real_result.timings.items():
            data['timings'][test] = timing

        # Record failed tests to database
        data['failed'] = [
            test.qualname for test, _ in itertools.chain(
                real_result.failures,
                real_result.errors,
                [(test, None) for test in real_result.unexpectedSuccesses]
            )
        ]

        # Record config to database
        data['last_run'] = {
            'isolate': options['isolate'],
            'parallel': options['parallel'],
            'list_slow': options['list_slow'],
            'labels': all_test_labels,
        }

        # Show slowest tests
        if options['list_slow']:
            real_result.stream.writeln("Slowest tests:")
            slowest = sorted(
                ((timing, test) for test, timing in data['timings'].items()),
                reverse=True
            )
            for timing, test in slowest[:options['list_slow']]:
                real_result.stream.writeln(" %.3fs: %s" % (timing, test))
            real_result.stream.writeln()

        # Display info about failures etc
        infos = []
        skipped = len(real_result.skipped)
        expected_fails = len(real_result.expectedFailures)
        unexpected_successes = len(real_result.unexpectedSuccesses)
        if not real_result.wasSuccessful():
            real_result.stream.write("FAILED")
            failed = len(real_result.failures)
            errored = len(real_result.errors)
            if failed:
                infos.append("failures=%d" % failed)
            if errored:
                infos.append("errors=%d" % errored)
        elif failed_executors:
            real_result.stream.write("%d executors failed" % failed_executors)
        else:
            real_result.stream.write("OK")
        if skipped:
            infos.append("skipped=%d" % skipped)
        if expected_fails:
            infos.append("expected failures=%d" % expected_fails)
        if unexpected_successes:
            infos.append("unexpected successes=%d" % unexpected_successes)
        if infos:
            real_result.stream.writeln(" (%s)" % (", ".join(infos),))
        else:
            real_result.stream.write("\n")

        # Save the database
        write_database(data)

        # For easy integration, returns the number of failed tests as the
        # return code. This means that if all tests passed, 0 is returned,
        # which happens to be the standard return code for "success"
        # Also the number of failed executors is added.
        return_code = sum(map(len, (
            real_result.failures,
            real_result.errors,
            real_result.unexpectedSuccesses,
            real_result.expectedFailures
        ))) + failed_executors
        sys.exit(return_code)