Exemple #1
0
    def handleJobStatus(self, job):
        """ Method to handle a job status """
        tester = job.getTester()
        if not tester.isSilent():
            # Print results and perform any desired post job processing
            if job.isFinished():
                tester = self.normalizeStatus(job)

                # perform printing of application output if so desired
                self.printOutput(job)

                # Print status with caveats
                print(util.formatResult(job, self.options, caveats=True))

                timing = job.getTiming()

                # Save these results for 'Final Test Result' summary
                self.test_table.append( (job, tester.getStatus().status, timing) )

                self.postRun(tester.specs, timing)

                if tester.isSkip():
                    self.num_skipped += 1
                elif tester.isPass():
                    self.num_passed += 1
                elif tester.isFail():
                    self.num_failed += 1
                else:
                    self.num_pending += 1

            # Just print current status without saving results
            else:
                print(util.formatResult(job, self.options, result='RUNNING', caveats=False))
Exemple #2
0
    def handleJobStatus(self, job):
        """ Method to handle a job status """
        tester = job.getTester()
        if not tester.isSilent():
            # Print results and perform any desired post job processing
            if job.isFinished():
                tester = self.normalizeStatus(job)

                # perform printing of application output if so desired
                self.printOutput(job)

                # Print status with caveats
                print(util.formatResult(job, self.options, caveats=True))

                timing = job.getTiming()

                # Save these results for 'Final Test Result' summary
                self.test_table.append( (job, tester.getStatus().status, timing) )

                self.postRun(tester.specs, timing)

                if tester.isSkip():
                    self.num_skipped += 1
                elif tester.isPass():
                    self.num_passed += 1
                elif tester.isFail():
                    self.num_failed += 1
                else:
                    self.num_pending += 1

            # Just print current status without saving results
            else:
                print(util.formatResult(job, self.options, result='RUNNING', caveats=False))
Exemple #3
0
    def handleTestStatus(self, tester_data):
        """ Method to handle a testers status """
        tester = tester_data.getTester()

        # print and store those results
        result = self.printResult(tester_data)

        # Test is finished and had some results to print
        if result and tester.isFinished():
            timing = tester_data.getTiming()

            # Store these results to a table we will use when we print final results
            self.test_table.append((tester_data, result, timing))

            self.postRun(tester.specs, timing)
            # Tally the results of this test to be used in our Final Test Results footer
            if tester.isSkipped():
                self.num_skipped += 1
            elif tester.didPass():
                self.num_passed += 1
            elif tester.isQueued() or tester.isWaiting():
                self.num_pending += 1
            else:
                self.num_failed += 1

            # Write results to a file if asked to do so
            if not tester.isSkipped():
                if not tester.didPass() and not self.options.failed_tests:
                    self.writeFailedTest.write(tester.getTestName() + '\n')

                if self.options.file:
                    self.file.write(
                        util.formatResult(
                            tester_data, result, self.options, color=False) +
                        '\n')

                if self.options.sep_files or (self.options.fail_files
                                              and not tester.didPass()) or (
                                                  self.options.ok_files
                                                  and tester.didPass()):
                    fname = os.path.join(
                        tester.getTestDir(),
                        tester.getTestName().split('/')[-1] + '.' +
                        result[:6] + '.txt')
                    f = open(fname, 'w')
                    f.write(
                        util.formatResult(tester_data,
                                          tester_data.getOutput(),
                                          self.options,
                                          color=False) + '\n')
                    f.close()
Exemple #4
0
    def printResult(self, tester_data):
        """ Method to print a testers status to the screen """
        tester = tester_data.getTester()

        # The test has no status to print
        if tester.isSilent() or (tester.isDeleted() and not self.options.extra_info):
            caveat_formatted_results = None
        # Print what ever status the tester has at the time
        else:
            if self.options.verbose or (tester.didFail() and not self.options.quiet):
                output = 'Working Directory: ' + tester.getTestDir() + '\nRunning command: ' + tester.getCommand(self.options) + '\n'

                output += tester_data.getOutput()
                output = output.replace('\r', '\n')  # replace the carriage returns with newlines
                lines = output.split('\n')

                # Obtain color based on test status
                color = tester.getColor()

                if output != '':
                    test_name = util.colorText(tester.getTestName()  + ": ", color, colored=self.options.colored, code=self.options.code)
                    output = test_name + ("\n" + test_name).join(lines)
                    print(output)

            caveat_formatted_results = self.formatCaveats(tester)
            print(util.formatResult(tester_data, caveat_formatted_results, self.options))
        return caveat_formatted_results
Exemple #5
0
    def printResult(self, tester_data):
        """ Method to print a testers status to the screen """
        tester = tester_data.getTester()
        caveat_formatted_results = None

        # Print what ever status the tester has at the time
        if self.canPrint(tester):
            if self.options.verbose or (tester.didFail()
                                        and not self.options.quiet):
                output = 'Working Directory: ' + tester.getTestDir(
                ) + '\nRunning command: ' + tester.getCommand(
                    self.options) + '\n'

                output += tester_data.getOutput()
                output = output.replace(
                    '\r', '\n')  # replace the carriage returns with newlines
                lines = output.split('\n')

                # Obtain color based on test status
                color = tester.getColor()

                if output != '':
                    test_name = util.colorText(tester.getTestName() + ": ",
                                               color,
                                               colored=self.options.colored,
                                               code=self.options.code)
                    output = test_name + ("\n" + test_name).join(lines)
                    print(output)

            caveat_formatted_results = self.formatCaveats(tester)
            print(
                util.formatResult(tester_data, caveat_formatted_results,
                                  self.options))
        return caveat_formatted_results
Exemple #6
0
    def cleanup(self):
        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if (self.options.verbose or (self.num_failed != 0 and not self.options.quiet)) and not self.options.dry_run:
            print('\n\nFinal Test Results:\n' + ('-' * (util.TERM_COLS-1)))
            for (tester_data, result, timing) in sorted(self.test_table, key=lambda x: x[1], reverse=True):
                print(util.formatResult(tester_data, result, self.options))

        time = clock() - self.start_time

        print('-' * (util.TERM_COLS-1))

        # Mask off TestHarness error codes to report parser errors
        fatal_error = ''
        if self.error_code & Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL PARSER ERROR</r>'
        if self.error_code & ~Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'

        # Alert the user to their session file
        if self.options.queueing:
            print 'Your session file is %s' % self.options.session_file

        # Print a different footer when performing a dry run
        if self.options.dry_run:
            print('Processed %d tests in %.1f seconds' % (self.num_passed+self.num_skipped, time))
            summary = '<b>%d would run</b>'
            summary += ', <b>%d would be skipped</b>'
            summary += fatal_error
            print(util.colorText( summary % (self.num_passed, self.num_skipped),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        else:
            print('Ran %d tests in %.1f seconds' % (self.num_passed+self.num_failed, time))

            if self.num_passed:
                summary = '<g>%d passed</g>'
            else:
                summary = '<b>%d passed</b>'
            summary += ', <b>%d skipped</b>'
            if self.num_pending:
                summary += ', <c>%d pending</c>'
            else:
                summary += ', <b>%d pending</b>'
            if self.num_failed:
                summary += ', <r>%d FAILED</r>'
            else:
                summary += ', <b>%d failed</b>'
            summary += fatal_error

            print(util.colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        if self.file:
            self.file.close()

        # Close the failed_tests file
        if self.writeFailedTest != None:
            self.writeFailedTest.close()
Exemple #7
0
    def cleanup(self):
        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if (self.options.verbose or
            (self.num_failed != 0
             and not self.options.quiet)) and not self.options.dry_run:
            print('\n\nFinal Test Results:\n' + ('-' * (util.TERM_COLS - 1)))
            for (tester_data, result, timing) in sorted(self.test_table,
                                                        key=lambda x: x[1],
                                                        reverse=True):
                print(util.formatResult(tester_data, result, self.options))

        time = clock() - self.start_time

        print('-' * (util.TERM_COLS - 1))

        # Mask off TestHarness error codes to report parser errors
        fatal_error = ''
        if self.error_code & Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL PARSER ERROR</r>'
        if self.error_code & ~Parser.getErrorCodeMask():
            fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'

        # Print a different footer when performing a dry run
        if self.options.dry_run:
            print('Processed %d tests in %.1f seconds' %
                  (self.num_passed + self.num_skipped, time))
            summary = '<b>%d would run</b>'
            summary += ', <b>%d would be skipped</b>'
            summary += fatal_error
            print(util.colorText( summary % (self.num_passed, self.num_skipped),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        else:
            print('Ran %d tests in %.1f seconds' %
                  (self.num_passed + self.num_failed, time))

            if self.num_passed:
                summary = '<g>%d passed</g>'
            else:
                summary = '<b>%d passed</b>'
            summary += ', <b>%d skipped</b>'
            if self.num_failed:
                summary += ', <r>%d FAILED</r>'
            else:
                summary += ', <b>%d failed</b>'
            summary += fatal_error

            print(util.colorText( summary % (self.num_passed, self.num_skipped, self.num_failed),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        if self.file:
            self.file.close()

        # Close the failed_tests file
        if self.writeFailedTest != None:
            self.writeFailedTest.close()
Exemple #8
0
    def handleTestStatus(self, tester_data):
        """ Method to handle a testers status """
        tester = tester_data.getTester()

        # print and store those results
        result = self.printResult(tester_data)

        # Test is finished and had some results to print
        if result and tester.isFinished():
            timing = tester_data.getTiming()

            # Store these results to a table we will use when we print final results
            self.test_table.append( (tester_data, result, timing) )

            self.postRun(tester.specs, timing)
            # Tally the results of this test to be used in our Final Test Results footer
            if tester.isSkipped():
                self.num_skipped += 1
            elif tester.didPass():
                self.num_passed += 1
            elif tester.isQueued() or tester.isWaiting():
                self.num_pending += 1
            else:
                self.num_failed += 1

            # Write results to a file if asked to do so
            if not tester.isSkipped():
                if not tester.didPass() and not self.options.failed_tests:
                    self.writeFailedTest.write(tester.getTestName() + '\n')

                if self.options.file:
                    self.file.write(util.formatResult( tester_data, result, self.options, color=False) + '\n')

                if self.options.sep_files or (self.options.fail_files and not tester.didPass()) or (self.options.ok_files and tester.didPass()):
                    fname = os.path.join(tester.getTestDir(), tester.getTestName().split('/')[-1] + '.' + result[:6] + '.txt')
                    f = open(fname, 'w')
                    f.write(util.formatResult( tester_data, tester_data.getOutput(), self.options, color=False) + '\n')
                    f.close()
Exemple #9
0
    def handleJobStatus(self, job):
        """
        The Scheduler is calling back the TestHarness to inform us of a status change.
        The job may or may not be finished yet (RUNNING), or failing, passing, etc.
        """
        if not job.isSilent():
            # Print results and perform any desired post job processing
            if job.isFinished():
                status, message, color, exit_code = job.getJointStatus()
                self.error_code = self.error_code | exit_code

                # perform printing of application output if so desired
                self.printOutput(job, color)

                # Print status with caveats
                print(util.formatResult(job, self.options, caveats=True))

                timing = job.getTiming()

                # Save these results for 'Final Test Result' summary
                self.test_table.append( (job, exit_code, timing) )

                self.postRun(job.specs, timing)

                if job.isSkip():
                    self.num_skipped += 1
                elif job.isPass():
                    self.num_passed += 1
                elif job.isFail():
                    self.num_failed += 1
                else:
                    self.num_pending += 1

            # Just print current status without saving results
            else:
                print(util.formatResult(job, self.options, result='RUNNING', caveats=False))
Exemple #10
0
    def writeResults(self):
        """ Don't update the results file when using the --failed-tests argument """
        if self.options.failed_tests:
            return

        """ write test results to disc in some fashion the user has requested """
        all_jobs = self.scheduler.retrieveJobs()

        # Record the input file name that was used
        self.options.results_storage['INPUT_FILE_NAME'] = self.options.input_file_name

        # Write some useful data to our results_storage
        for job in all_jobs:
            tester = job.getTester()

            # If queueing, do not store silent results in session file
            if tester.isSilent() and self.options.queueing:
                continue

            # Create empty key based on TestDir, or re-inialize with existing data so we can append to it
            self.options.results_storage[tester.getTestDir()] = self.options.results_storage.get(tester.getTestDir(), {})
            self.options.results_storage[tester.getTestDir()][tester.getTestName()] = {'NAME'      : job.getTestNameShort(),
                                                                                       'LONG_NAME' : tester.getTestName(),
                                                                                       'TIMING'    : job.getTiming(),
                                                                                       'STATUS'    : tester.getStatus().status,
                                                                                       'FAIL'      : tester.isFail(),
                                                                                       'COLOR'     : tester.getStatus().color,
                                                                                       'CAVEATS'   : list(tester.getCaveats()),
                                                                                       'OUTPUT'    : job.getOutput(),
                                                                                       'COMMAND'   : tester.getCommand(self.options)}

            # Additional data to store (overwrites any previous matching keys)
            self.options.results_storage[tester.getTestDir()].update(job.getMetaData())

        if self.options.output_dir:
            self.results_storage = os.path.join(self.options.output_dir, self.results_storage)

        if self.options.results_storage:
            try:
                with open(self.results_storage, 'w') as data_file:
                    json.dump(self.options.results_storage, data_file, indent=2)
            except UnicodeDecodeError:
                print('\nERROR: Unable to write results due to unicode decode/encode error')

                # write to a plain file to aid in reproducing error
                with open(self.results_storage + '.unicode_error' , 'w') as f:
                    f.write(self.options.results_storage)

                sys.exit(1)
            except IOError:
                print('\nERROR: Unable to write results due to permissions')
                sys.exit(1)

        try:
            # Write one file, with verbose information (--file)
            if self.options.file:
                with open(os.path.join(self.output_dir, self.options.file), 'w') as f:
                    for job in all_jobs:
                        tester = job.getTester()

                        # Do not write information about silent tests
                        if tester.isSilent():
                            continue

                        formated_results = util.formatResult( job, self.options, result=job.getOutput(), color=False)
                        f.write(formated_results + '\n')

            # Write a separate file for each test with verbose information (--sep-files, --sep-files-ok, --sep-files-fail)
            if ((self.options.ok_files and self.num_passed)
                or (self.options.fail_files and self.num_failed)):
                for job in all_jobs:
                    tester = job.getTester()

                    if self.options.output_dir:
                        output_dir = self.options.output_dir
                    else:
                        output_dir = tester.getTestDir()

                    # Yes, by design test dir will be apart of the output file name
                    output_file = os.path.join(output_dir, '.'.join([os.path.basename(tester.getTestDir()),
                                                                     job.getTestNameShort(),
                                                                     tester.getStatus().status,
                                                                     'txt']))

                    formated_results = util.formatResult(job, self.options, result=job.getOutput(), color=False)

                    # Passing tests
                    if self.options.ok_files and tester.isPass():
                        with open(output_file, 'w') as f:
                            f.write(formated_results)

                    # Failing tests
                    if self.options.fail_files and tester.isFail():
                        with open(output_file, 'w') as f:
                            f.write(formated_results)

        except IOError:
            print('Permission error while writing results to disc')
            sys.exit(1)
        except:
            print('Error while writing results to disc')
            sys.exit(1)
Exemple #11
0
    def cleanup(self):
        # Not interesting in printing any final results if we are cleaning up old queue manager runs
        if self.options.queue_cleanup:
            try:
                os.remove(self.results_storage)
            except OSError:
                pass
            return

        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if len(self.parse_errors) > 0:
            print('\n\nParser Errors:\n' + ('-' * (util.TERM_COLS)))
            for err in self.parse_errors:
                print(util.colorText(err, 'RED', html=True, colored=self.options.colored, code=self.options.code))

        if (self.options.verbose or (self.num_failed != 0 and not self.options.quiet)) and not self.options.dry_run:
            print('\n\nFinal Test Results:\n' + ('-' * (util.TERM_COLS)))
            for (job, result, timing) in sorted(self.test_table, key=lambda x: x[1], reverse=True):
                print(util.formatResult(job, self.options, caveats=True))

        time = clock() - self.start_time

        print('-' * (util.TERM_COLS))

        # Mask off TestHarness error codes to report parser errors
        fatal_error = ''
        if self.error_code:
            fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'
        if len(self.parse_errors) > 0:
            fatal_error += ', <r>FATAL PARSER ERROR</r>'
            self.error_code = 1

        # Alert the user to their session file
        if self.options.queueing:
            print('Your session file is %s' % self.results_storage)

        # Print a different footer when performing a dry run
        if self.options.dry_run:
            print('Processed %d tests in %.1f seconds.' % (self.num_passed+self.num_skipped, time))
            summary = '<b>%d would run</b>'
            summary += ', <b>%d would be skipped</b>'
            summary += fatal_error
            print(util.colorText( summary % (self.num_passed, self.num_skipped),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        else:
            print('Ran %d tests in %.1f seconds.' % (self.num_passed+self.num_failed, time))

            if self.num_passed:
                summary = '<g>%d passed</g>'
            else:
                summary = '<b>%d passed</b>'
            summary += ', <b>%d skipped</b>'
            if self.num_pending:
                summary += ', <c>%d pending</c>'
            else:
                summary += ', <b>%d pending</b>'
            if self.num_failed:
                summary += ', <r>%d FAILED</r>'
            else:
                summary += ', <b>%d failed</b>'
            if self.scheduler.maxFailures():
                summary += '\n<r>MAX FAILURES REACHED</r>'

            summary += fatal_error

            print(util.colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

            # Perform any write-to-disc operations
            self.writeResults()
Exemple #12
0
    def writeResults(self):
        """ Don't update the results file when using the --failed-tests argument """
        if self.options.failed_tests:
            return

        """ write test results to disc in some fashion the user has requested """
        all_jobs = self.scheduler.retrieveJobs()

        # Record the input file name that was used
        self.options.results_storage['INPUT_FILE_NAME'] = self.options.input_file_name

        # Write some useful data to our results_storage
        for job in all_jobs:
            tester = job.getTester()

            # If queueing, do not store silent results in session file
            if tester.isSilent() and self.options.queueing:
                continue

            # Create empty key based on TestDir, or re-inialize with existing data so we can append to it
            self.options.results_storage[tester.getTestDir()] = self.options.results_storage.get(tester.getTestDir(), {})
            self.options.results_storage[tester.getTestDir()][tester.getTestName()] = {'NAME'      : job.getTestNameShort(),
                                                                                       'LONG_NAME' : tester.getTestName(),
                                                                                       'TIMING'    : job.getTiming(),
                                                                                       'STATUS'    : tester.getStatus().status,
                                                                                       'FAIL'      : tester.isFail(),
                                                                                       'COLOR'     : tester.getStatus().color,
                                                                                       'CAVEATS'   : list(tester.getCaveats()),
                                                                                       'OUTPUT'    : job.getOutput(),
                                                                                       'COMMAND'   : tester.getCommand(self.options)}

            # Additional data to store (overwrites any previous matching keys)
            self.options.results_storage[tester.getTestDir()].update(job.getMetaData())

        if self.options.output_dir:
            self.results_storage = os.path.join(self.options.output_dir, self.results_storage)

        if self.options.results_storage:
            try:
                with open(self.results_storage, 'w') as data_file:
                    json.dump(self.options.results_storage, data_file, indent=2)
            except UnicodeDecodeError:
                print('\nERROR: Unable to write results due to unicode decode/encode error')

                # write to a plain file to aid in reproducing error
                with open(self.results_storage + '.unicode_error' , 'w') as f:
                    f.write(self.options.results_storage)

                sys.exit(1)
            except IOError:
                print('\nERROR: Unable to write results due to permissions')
                sys.exit(1)

        try:
            # Write one file, with verbose information (--file)
            if self.options.file:
                with open(os.path.join(self.output_dir, self.options.file), 'w') as f:
                    for job in all_jobs:
                        tester = job.getTester()

                        # Do not write information about silent tests
                        if tester.isSilent():
                            continue

                        formated_results = util.formatResult( job, self.options, result=job.getOutput(), color=False)
                        f.write(formated_results + '\n')

            # Write a separate file for each test with verbose information (--sep-files, --sep-files-ok, --sep-files-fail)
            if ((self.options.ok_files and self.num_passed)
                or (self.options.fail_files and self.num_failed)):
                for job in all_jobs:
                    tester = job.getTester()

                    if self.options.output_dir:
                        output_dir = self.options.output_dir
                    else:
                        output_dir = tester.getTestDir()

                    # Yes, by design test dir will be apart of the output file name
                    output_file = os.path.join(output_dir, '.'.join([os.path.basename(tester.getTestDir()),
                                                                     job.getTestNameShort(),
                                                                     tester.getStatus().status,
                                                                     'txt']))

                    formated_results = util.formatResult(job, self.options, result=job.getOutput(), color=False)

                    # Passing tests
                    if self.options.ok_files and tester.isPass():
                        with open(output_file, 'w') as f:
                            f.write(formated_results)

                    # Failing tests
                    if self.options.fail_files and tester.isFail():
                        with open(output_file, 'w') as f:
                            f.write(formated_results)

        except IOError:
            print('Permission error while writing results to disc')
            sys.exit(1)
        except:
            print('Error while writing results to disc')
            sys.exit(1)
Exemple #13
0
    def cleanup(self):
        # Not interesting in printing any final results if we are cleaning up old queue manager runs
        if self.options.queue_cleanup:
            try:
                os.remove(self.results_storage)
            except OSError:
                pass
            return

        # Print the results table again if a bunch of output was spewed to the screen between
        # tests as they were running
        if len(self.parse_errors) > 0:
            print('\n\nParser Errors:\n' + ('-' * (util.TERM_COLS)))
            for err in self.parse_errors:
                print(util.colorText(err, 'RED', html=True, colored=self.options.colored, code=self.options.code))

        if (self.options.verbose or (self.num_failed != 0 and not self.options.quiet)) and not self.options.dry_run:
            print('\n\nFinal Test Results:\n' + ('-' * (util.TERM_COLS)))
            for (job, result, timing) in sorted(self.test_table, key=lambda x: x[1], reverse=True):
                print(util.formatResult(job, self.options, caveats=True))

        time = clock() - self.start_time

        print('-' * (util.TERM_COLS))

        # Mask off TestHarness error codes to report parser errors
        fatal_error = ''
        if self.error_code:
            fatal_error += ', <r>FATAL TEST HARNESS ERROR</r>'
        if len(self.parse_errors) > 0:
            fatal_error += ', <r>FATAL PARSER ERROR</r>'
            self.error_code = 1

        # Alert the user to their session file
        if self.options.queueing:
            print('Your session file is %s' % self.results_storage)

        # Print a different footer when performing a dry run
        if self.options.dry_run:
            print('Processed %d tests in %.1f seconds.' % (self.num_passed+self.num_skipped, time))
            summary = '<b>%d would run</b>'
            summary += ', <b>%d would be skipped</b>'
            summary += fatal_error
            print(util.colorText( summary % (self.num_passed, self.num_skipped),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

        else:
            print('Ran %d tests in %.1f seconds.' % (self.num_passed+self.num_failed, time))

            if self.num_passed:
                summary = '<g>%d passed</g>'
            else:
                summary = '<b>%d passed</b>'
            summary += ', <b>%d skipped</b>'
            if self.num_pending:
                summary += ', <c>%d pending</c>'
            else:
                summary += ', <b>%d pending</b>'
            if self.num_failed:
                summary += ', <r>%d FAILED</r>'
            else:
                summary += ', <b>%d failed</b>'
            if self.scheduler.maxFailures():
                summary += '\n<r>MAX FAILURES REACHED</r>'

            summary += fatal_error

            print(util.colorText( summary % (self.num_passed, self.num_skipped, self.num_pending, self.num_failed),  "", html = True, \
                             colored=self.options.colored, code=self.options.code ))

            # Perform any write-to-disc operations
            self.writeResults()