Exemplo n.º 1
0
 def finalize(self, exit_code=0):
     '''
     Run the finalization procedures. Show report, clean-up file-system, etc
     '''
     if self.options.no_report is False:
         self.print_overall_testsuite_report()
     self.post_execution_cleanup()
     # Brute force approach to terminate this process and it's children
     logging.getLogger(__name__).info('Terminating test suite child processes.')
     helpers.terminate_process_pid(os.getpid(), only_children=True)
     logging.getLogger(__name__).info('Terminating test suite child processes if any are still found running.')
     helpers.terminate_process_pid(os.getpid(), only_children=True)
     logging.getLogger(__name__).info(
         'Test suite execution finalized with exit code: {0}'.format(
             exit_code
         )
     )
     self.exit(exit_code)
Exemplo n.º 2
0
 def finalize(self, exit_code=0):
     '''
     Run the finalization procedures. Show report, clean-up file-system, etc
     '''
     if self.options.no_report is False:
         self.print_overall_testsuite_report()
     self.post_execution_cleanup()
     # Brute force approach to terminate this process and it's children
     logging.getLogger(__name__).info(
         'Terminating test suite child processes.')
     helpers.terminate_process_pid(os.getpid(), only_children=True)
     logging.getLogger(__name__).info(
         'Terminating test suite child processes if any are still found running.'
     )
     helpers.terminate_process_pid(os.getpid(), only_children=True)
     logging.getLogger(__name__).info(
         'Test suite execution finalized with exit code: {0}'.format(
             exit_code))
     self.exit(exit_code)
Exemplo n.º 3
0
 def print_overall_testsuite_report(self):
     if RUNTESTS_WITH_HARD_KILL is False:
         terminate_process_pid(os.getpid(), only_children=True)
     SaltCoverageTestingParser.print_overall_testsuite_report(self)
Exemplo n.º 4
0
    def print_overall_testsuite_report(self):
        '''
        Print a nicely formatted report about the test suite results
        '''
        # Brute force approach to terminate this process and it's children
        helpers.terminate_process_pid(os.getpid(), only_children=True)
        print()
        print_header(
            u'  Overall Tests Report  ', sep=u'=', centered=True, inline=True,
            width=self.options.output_columns
        )

        failures = errors = skipped = passed = 0
        no_problems_found = True
        for (name, results) in self.testsuite_results:
            failures += len(results.failures)
            errors += len(results.errors)
            skipped += len(results.skipped)
            passed += results.testsRun - len(
                results.failures + results.errors + results.skipped
            )

            if not results.failures and not results.errors and \
                    not results.skipped:
                continue

            no_problems_found = False

            print_header(
                u'*** {0}  '.format(name), sep=u'*', inline=True,
                width=self.options.output_columns
            )
            if results.skipped:
                print_header(
                    u' --------  Skipped Tests  ', sep='-', inline=True,
                    width=self.options.output_columns
                )
                maxlen = len(
                    max([testcase.id() for (testcase, reason) in
                         results.skipped], key=len)
                )
                fmt = u'   -> {0: <{maxlen}}  ->  {1}'
                for testcase, reason in results.skipped:
                    print(fmt.format(testcase.id(), reason, maxlen=maxlen))
                print_header(u' ', sep='-', inline=True,
                            width=self.options.output_columns)

            if results.errors:
                print_header(
                    u' --------  Tests with Errors  ', sep='-', inline=True,
                    width=self.options.output_columns
                )
                for testcase, reason in results.errors:
                    print_header(
                        u'   -> {0}  '.format(testcase.id()),
                        sep=u'.', inline=True,
                        width=self.options.output_columns
                    )
                    for line in reason.rstrip().splitlines():
                        print('       {0}'.format(line.rstrip()))
                    print_header(u'   ', sep=u'.', inline=True,
                                width=self.options.output_columns)
                print_header(u' ', sep='-', inline=True,
                             width=self.options.output_columns)

            if results.failures:
                print_header(
                    u' --------  Failed Tests  ', sep='-', inline=True,
                    width=self.options.output_columns
                )
                for testcase, reason in results.failures:
                    print_header(
                        u'   -> {0}  '.format(testcase.id()),
                        sep=u'.', inline=True,
                        width=self.options.output_columns
                    )
                    for line in reason.rstrip().splitlines():
                        print('       {0}'.format(line.rstrip()))
                    print_header(u'   ', sep=u'.', inline=True,
                                width=self.options.output_columns)
                print_header(u' ', sep='-', inline=True,
                             width=self.options.output_columns)

        if no_problems_found:
            print_header(
                u'***  No Problems Found While Running Tests  ',
                sep=u'*', inline=True, width=self.options.output_columns
            )

        print_header(u'', sep=u'=', inline=True,
                     width=self.options.output_columns)
        total = sum([passed, skipped, errors, failures])
        print(
            '{0} (total={1}, skipped={2}, passed={3}, failures={4}, '
            'errors={5}) '.format(
                (errors or failures) and 'FAILED' or 'OK',
                total, skipped, passed, failures, errors
            )
        )
        print_header(
            '  Overall Tests Report  ', sep='=', centered=True, inline=True,
            width=self.options.output_columns
        )
Exemplo n.º 5
0
    def print_overall_testsuite_report(self):
        '''
        Print a nicely formatted report about the test suite results
        '''
        # Brute force approach to terminate this process and it's children
        helpers.terminate_process_pid(os.getpid(), only_children=True)
        print()
        print_header(u'  Overall Tests Report  ',
                     sep=u'=',
                     centered=True,
                     inline=True,
                     width=self.options.output_columns)

        failures = errors = skipped = passed = 0
        no_problems_found = True
        for (name, results) in self.testsuite_results:
            failures += len(results.failures)
            errors += len(results.errors)
            skipped += len(results.skipped)
            passed += results.testsRun - len(results.failures +
                                             results.errors + results.skipped)

            if not results.failures and not results.errors and \
                    not results.skipped:
                continue

            no_problems_found = False

            print_header(u'*** {0}  '.format(name),
                         sep=u'*',
                         inline=True,
                         width=self.options.output_columns)
            if results.skipped:
                print_header(u' --------  Skipped Tests  ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)
                maxlen = len(
                    max([
                        testcase.id() for (testcase, reason) in results.skipped
                    ],
                        key=len))
                fmt = u'   -> {0: <{maxlen}}  ->  {1}'
                for testcase, reason in results.skipped:
                    print(fmt.format(testcase.id(), reason, maxlen=maxlen))
                print_header(u' ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)

            if results.errors:
                print_header(u' --------  Tests with Errors  ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)
                for testcase, reason in results.errors:
                    print_header(u'   -> {0}  '.format(testcase.id()),
                                 sep=u'.',
                                 inline=True,
                                 width=self.options.output_columns)
                    for line in reason.rstrip().splitlines():
                        print('       {0}'.format(line.rstrip()))
                    print_header(u'   ',
                                 sep=u'.',
                                 inline=True,
                                 width=self.options.output_columns)
                print_header(u' ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)

            if results.failures:
                print_header(u' --------  Failed Tests  ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)
                for testcase, reason in results.failures:
                    print_header(u'   -> {0}  '.format(testcase.id()),
                                 sep=u'.',
                                 inline=True,
                                 width=self.options.output_columns)
                    for line in reason.rstrip().splitlines():
                        print('       {0}'.format(line.rstrip()))
                    print_header(u'   ',
                                 sep=u'.',
                                 inline=True,
                                 width=self.options.output_columns)
                print_header(u' ',
                             sep='-',
                             inline=True,
                             width=self.options.output_columns)

        if no_problems_found:
            print_header(u'***  No Problems Found While Running Tests  ',
                         sep=u'*',
                         inline=True,
                         width=self.options.output_columns)

        print_header(u'',
                     sep=u'=',
                     inline=True,
                     width=self.options.output_columns)
        total = sum([passed, skipped, errors, failures])
        print('{0} (total={1}, skipped={2}, passed={3}, failures={4}, '
              'errors={5}) '.format((errors or failures) and 'FAILED' or 'OK',
                                    total, skipped, passed, failures, errors))
        print_header('  Overall Tests Report  ',
                     sep='=',
                     centered=True,
                     inline=True,
                     width=self.options.output_columns)