Пример #1
0
    def setUp(self):
        self.shortDescription()

        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(self.test_metrics.timer.
                                                       start_time))
        self.fixture_log.info("{0}".format(self.logDescription()))
        self.fixture_log.info("{0}".format('=' * 56))

        """ @todo: Get rid of this hard coded value for the statistics """
        # set up the stats log
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(
                self._testMethodName),
            "{0}/../statistics/".format(engine_config.log_directory))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)
Пример #2
0
    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(self.logger.log,
                       [('Test Case', test_name),
                        ('Created At', self.metrics.timer.start_time),
                        (test_description, '')])
Пример #3
0
    def setUp(self):
        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(
            self.test_metrics.timer.start_time))
        if self.shortDescription():
            self.fixture_log.info("{0}".format(self.shortDescription()))
        self.fixture_log.info("{0}".format('=' * 56))
        ''' @todo: Get rid of this hard coded value for the statistics '''
        # set up the stats log
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(self._testMethodName),
            "{0}/../statistics/".format(engine_config.log_directory))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)
Пример #4
0
    def setUp(self):
        self.shortDescription()

        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(
            self.test_metrics.timer.start_time))
        self.fixture_log.info("{0}".format(self.logDescription()))
        self.fixture_log.info("{0}".format('=' * 56))
        """ @todo: Get rid of this hard coded value for the statistics """
        # set up the stats log
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(self._testMethodName),
            "{0}/statistics/".format(root_log_dir))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)
Пример #5
0
    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Created At', self.metrics.timer.start_time),
             (test_description, '')])
Пример #6
0
class FixtureReporter(object):
    """Provides logging and metrics reporting for any test fixture"""
    def __init__(self, parent_object):
        self.logger = _FixtureLogger(parent_object)
        self.metrics = TestRunMetrics()
        self.report_name = str(get_object_namespace(parent_object))

    def start(self):
        """Starts logging and metrics reporting for the fixture"""
        self.logger.start()
        self.metrics.timer.start()

        log_info_block(self.logger.log,
                       [('Fixture', self.report_name),
                        ('Created At', self.metrics.timer.start_time)])

    def stop(self):
        """Logs all collected metrics and stats, then stops logging and metrics
        reporting for the fixture.
        """

        self.metrics.timer.stop()
        if (self.metrics.total_passed == self.metrics.total_tests):
            self.metrics.result = TestResultTypes.PASSED
        else:
            self.metrics.result = TestResultTypes.FAILED

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name), ('Result', self.metrics.result),
             ('Start Time', self.metrics.timer.start_time),
             ('Elapsed Time', self.metrics.timer.get_elapsed_time()),
             ('Total Tests', self.metrics.total_tests),
             ('Total Passed', self.metrics.total_passed),
             ('Total Failed', self.metrics.total_failed),
             ('Total Errored', self.metrics.total_errored)])
        self.logger.stop()

    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(self.logger.log,
                       [('Test Case', test_name),
                        ('Created At', self.metrics.timer.start_time),
                        (test_description, '')])

    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """
        try:
            self.test_metrics.timer.stop()
        except AttributeError:
            warn("\nTest metrics not being logged! "
                 "stop_test_metrics is being called without "
                 "start_test_metrics having been previously called.\n\n")
            log_info_block(self.logger.log, [
                ('Test Case', test_name), ('Result', test_result),
                ('Start Time', "Unknown, start_test_metrics was not called"),
                ('Elapsed Time', "Unknown, start_test_metrics was not called")
            ])
            return

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name), ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)
Пример #7
0
class FixtureReporter(object):
    """Provides logging and metrics reporting for any test fixture"""

    def __init__(self, parent_object):
        self.logger = _FixtureLogger(parent_object)
        self.metrics = TestRunMetrics()
        self.report_name = str(get_object_namespace(parent_object))

    def start(self):
        """Starts logging and metrics reporting for the fixture"""
        self.logger.start()
        self.metrics.timer.start()

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name),
             ('Created At', self.metrics.timer.start_time)])

    def stop(self):
        """Logs all collected metrics and stats, then stops logging and metrics
        reporting for the fixture.
        """

        self.metrics.timer.stop()
        if (self.metrics.total_passed == self.metrics.total_tests):
            self.metrics.result = TestResultTypes.PASSED
        else:
            self.metrics.result = TestResultTypes.FAILED

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name),
             ('Result', self.metrics.result),
             ('Start Time', self.metrics.timer.start_time),
             ('Elapsed Time', self.metrics.timer.get_elapsed_time()),
             ('Total Tests', self.metrics.total_tests),
             ('Total Passed', self.metrics.total_passed),
             ('Total Failed', self.metrics.total_failed),
             ('Total Errored', self.metrics.total_errored)])
        self.logger.stop()

    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Created At', self.metrics.timer.start_time),
             (test_description, '')])

    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """

        self.test_metrics.timer.stop()

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)
Пример #8
0
class BaseTestFixture(unittest.TestCase):
    """
    @summary: Foundation for TestRepo Test Fixture.
    @note: This is the base class for ALL test cases in TestRepo. Add new
           functionality carefully.
    @see: http://docs.python.org/library/unittest.html#unittest.TestCase
    """
    def shortDescription(self):
        """
        @summary: Returns a formatted description of the test
        """
        short_desc = None

        if os.environ["VERBOSE"] == "true" and self._testMethodDoc:
            temp = self._testMethodDoc.strip("\n")
            short_desc = re.sub(r"[ ]{2,}", "", temp).strip("\n")
        return short_desc

    def logDescription(self):
        log_desc = None
        if self._testMethodDoc:
            log_desc = "\n{0}".format(
                re.sub(r"[ ]{2,}", "", self._testMethodDoc).strip("\n"))
        return log_desc

    @classmethod
    def assertClassSetupFailure(cls, message):
        """
        @summary: Use this if you need to fail from a Test Fixture's
                  setUpClass() method
        """
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def assertClassTeardownFailure(cls, message):
        """
        @summary: Use this if you need to fail from a Test Fixture's
                  tearUpClass() method
        """
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def setUpClass(cls):
        super(BaseTestFixture, cls).setUpClass()

        #Master Config Provider

        #Setup root log handler only if the root logger doesn't already haves
        if cclogging.getLogger('').handlers == []:
            cclogging.getLogger('').addHandler(
                cclogging.setup_new_cchandler('cc.master'))

        #Setup fixture log, which is really just a copy of the master log
        #for the duration of this test fixture
        cls.fixture_log = cclogging.getLogger('')
        cls._fixture_log_handler = cclogging.setup_new_cchandler(
            cclogging.get_object_namespace(cls))
        cls.fixture_log.addHandler(cls._fixture_log_handler)

        """
        @todo: Upgrade the metrics to be more unittest compatible.
        Currently the unittest results are not available at the fixture level,
        only the test case or the test suite and runner level.
        """
        # Setup the fixture level metrics
        cls.fixture_metrics = TestRunMetrics()
        cls.fixture_metrics.timer.start()

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture...: {0}".format(
                             str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Created At: {0}"
                             .format(cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("{0}".format('=' * 56))

    @classmethod
    def tearDownClass(cls):
        # Kill the timers and calculate the metrics objects
        cls.fixture_metrics.timer.stop()
        if(cls.fixture_metrics.total_passed ==
           cls.fixture_metrics.total_tests):
            cls.fixture_metrics.result = TestResultTypes.PASSED
        else:
            cls.fixture_metrics.result = TestResultTypes.FAILED

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture.....: {0}".format(
                             str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Result......: {0}"
                             .format(cls.fixture_metrics.result))
        cls.fixture_log.info("Start Time..: {0}"
                             .format(cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info(
            "Elapsed Time: {0}".format(
                cls.fixture_metrics.timer.get_elapsed_time()))
        cls.fixture_log.info("Total Tests.: {0}"
                             .format(cls.fixture_metrics.total_tests))
        cls.fixture_log.info("Total Passed: {0}"
                             .format(cls.fixture_metrics.total_passed))
        cls.fixture_log.info("Total Failed: {0}"
                             .format(cls.fixture_metrics.total_failed))
        cls.fixture_log.info("{0}".format('=' * 56))

        #Remove the fixture log handler from the fixture log
        cls.fixture_log.removeHandler(cls._fixture_log_handler)

        #Call super teardown after we've finished out additions to teardown
        super(BaseTestFixture, cls).tearDownClass()

    def setUp(self):
        self.shortDescription()

        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(self.test_metrics.timer.
                                                       start_time))
        self.fixture_log.info("{0}".format(self.logDescription()))
        self.fixture_log.info("{0}".format('=' * 56))

        """ @todo: Get rid of this hard coded value for the statistics """
        # set up the stats log
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(
                self._testMethodName),
            "{0}/../statistics/".format(engine_config.log_directory))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)

    def tearDown(self):
        # Kill the timer and other custom destroy jazz
        self.test_metrics.timer.stop()

        """
        @todo: This MUST be upgraded this from resultForDoCleanups into a
               better pattern or working with the result object directly.
               This is related to the todo in L{TestRunMetrics}
        """
        # Build metrics
        if any(r for r in self._resultForDoCleanups.failures
               if self._test_name_matches_result(self._testMethodName, r)):
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.FAILED
        elif any(r for r in self._resultForDoCleanups.errors
                 if self._test_name_matches_result(self._testMethodName, r)):
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.ERRORED
        else:
            self.fixture_metrics.total_passed += 1
            self.test_metrics.result = TestResultTypes.PASSED

        # Report
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case...: {0}".
                              format(self._testMethodName))
        self.fixture_log.info("Result......: {0}".
                              format(self.test_metrics.result))
        self.fixture_log.info("Start Time...: {0}".
                              format(self.test_metrics.timer.start_time))
        self.fixture_log.info(
            "Elapsed Time: {0}".format(
                self.test_metrics.timer.get_elapsed_time()))
        self.fixture_log.info("{0}".format('=' * 56))

        # Write out our statistics
        self.stats_log.report(self.test_metrics)

        # Let the base handle whatever hoodoo it needs
        super(BaseTestFixture, self).tearDown()

    def _test_name_matches_result(self, name, test_result):
        """Checks if a test result matches a specific test name."""
        # Try to get the result portion of the tuple
        try:
            result = test_result[0]
        except IndexError:
            return False

        # Verify the object has the correct property
        if hasattr(result, '_testMethodName'):
            return result._testMethodName == name
        else:
            return False
Пример #9
0
class BaseTestFixture(unittest.TestCase):
    '''
    @summary: Foundation for TestRepo Test Fixture.
    @note: This is the base class for ALL test cases in TestRepo. Add new
           functionality carefully.
    @see: http://docs.python.org/library/unittest.html#unittest.TestCase
    '''
    @classmethod
    def assertClassSetupFailure(cls, message):
        '''
        @summary: Use this if you need to fail from a Test Fixture's
                  setUpClass() method
        '''
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def assertClassTeardownFailure(cls, message):
        '''
        @summary: Use this if you need to fail from a Test Fixture's
                  tearUpClass() method
        '''
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    def shortDescription(self):
        '''
        @summary: Returns a one-line description of the test 
        '''
        if self._testMethodDoc is not None:
            if self._testMethodDoc.startswith("\n") is True:
                self._testMethodDoc = " ".join(
                    self._testMethodDoc.splitlines()).strip()
        return unittest.TestCase.shortDescription(self)

    @classmethod
    def setUpClass(cls):
        super(BaseTestFixture, cls).setUpClass()

        #Master Config Provider

        #Setup root log handler only if the root logger doesn't already haves
        if cclogging.getLogger('').handlers == []:
            cclogging.getLogger('').addHandler(
                cclogging.setup_new_cchandler('cc.master'))

        #Setup fixture log, which is really just a copy of the master log
        #for the duration of this test fixture
        cls.fixture_log = cclogging.getLogger('')
        cls._fixture_log_handler = cclogging.setup_new_cchandler(
            cclogging.get_object_namespace(cls))
        cls.fixture_log.addHandler(cls._fixture_log_handler)
        '''
        @todo: Upgrade the metrics to be more unittest compatible. Currently the 
        unittest results are not available at the fixture level, only the test case
        or the test suite and runner level.
        '''
        # Setup the fixture level metrics
        cls.fixture_metrics = TestRunMetrics()
        cls.fixture_metrics.timer.start()

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture...: {0}".format(
            str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Created At: {0}".format(
            cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("{0}".format('=' * 56))

    @classmethod
    def tearDownClass(cls):
        # Kill the timers and calculate the metrics objects
        cls.fixture_metrics.timer.stop()
        if (cls.fixture_metrics.total_passed == cls.fixture_metrics.total_tests
            ):
            cls.fixture_metrics.result = TestResultTypes.PASSED
        else:
            cls.fixture_metrics.result = TestResultTypes.FAILED

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture.....: {0}".format(
            str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Result......: {0}".format(
            cls.fixture_metrics.result))
        cls.fixture_log.info("Start Time..: {0}".format(
            cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("Elapsed Time: {0}".format(
            cls.fixture_metrics.timer.get_elapsed_time()))
        cls.fixture_log.info("Total Tests.: {0}".format(
            cls.fixture_metrics.total_tests))
        cls.fixture_log.info("Total Passed: {0}".format(
            cls.fixture_metrics.total_passed))
        cls.fixture_log.info("Total Failed: {0}".format(
            cls.fixture_metrics.total_failed))
        cls.fixture_log.info("{0}".format('=' * 56))

        #Remove the fixture log handler from the fixture log
        cls.fixture_log.removeHandler(cls._fixture_log_handler)

        #Call super teardown after we've finished out additions to teardown
        super(BaseTestFixture, cls).tearDownClass()

    def setUp(self):
        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(
            self.test_metrics.timer.start_time))
        if self.shortDescription():
            self.fixture_log.info("{0}".format(self.shortDescription()))
        self.fixture_log.info("{0}".format('=' * 56))
        ''' @todo: Get rid of this hard coded value for the statistics '''
        # set up the stats log
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(self._testMethodName),
            "{0}/../statistics/".format(engine_config.log_directory))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)

    def tearDown(self):
        # Kill the timer and other custom destroy jazz
        self.test_metrics.timer.stop()
        ''' 
        @todo: This MUST be upgraded this from resultForDoCleanups into a
               better pattern or working with the result object directly.
               This is related to the todo in L{TestRunMetrics}
        '''
        # Build metrics
        if self._resultForDoCleanups.wasSuccessful():
            self.fixture_metrics.total_passed += 1
            self.test_metrics.result = TestResultTypes.PASSED
        else:
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.FAILED

        # Report
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case...: {0}".format(self._testMethodName))
        self.fixture_log.info("Result......: {0}".format(
            self.test_metrics.result))
        self.fixture_log.info("Start Time...: {0}".format(
            self.test_metrics.timer.start_time))
        self.fixture_log.info("Elapsed Time: {0}".format(
            self.test_metrics.timer.get_elapsed_time()))
        self.fixture_log.info("{0}".format('=' * 56))

        # Write out our statistics
        self.stats_log.report(self.test_metrics)

        # Let the base handle whatever hoodoo it needs
        super(BaseTestFixture, self).tearDown()
Пример #10
0
class BaseTestFixture(unittest.TestCase):
    '''
    @summary: Foundation for TestRepo Test Fixture.
    @note: This is the base class for ALL test cases in TestRepo. Add new
           functionality carefully.
    @see: http://docs.python.org/library/unittest.html#unittest.TestCase
    '''
    @classmethod
    def assertClassSetupFailure(cls, message):
        '''
        @summary: Use this if you need to fail from a Test Fixture's
                  setUpClass() method
        '''
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def assertClassTeardownFailure(cls, message):
        '''
        @summary: Use this if you need to fail from a Test Fixture's
                  tearUpClass() method
        '''
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    def shortDescription(self):
        '''
        @summary: Returns a one-line description of the test
        '''
        if self._testMethodDoc is not None:
            if self._testMethodDoc.startswith("\n") is True:
                self._testMethodDoc = " ".join(
                    self._testMethodDoc.splitlines()).strip()
        return unittest.TestCase.shortDescription(self)

    @classmethod
    def setUpClass(cls):
        super(BaseTestFixture, cls).setUpClass()

        #Master Config Provider

        #Setup root log handler only if the root logger doesn't already haves
        if cclogging.getLogger('').handlers == []:
            cclogging.getLogger('').addHandler(
                cclogging.setup_new_cchandler('cc.master'))

        #Setup fixture log, which is really just a copy of the master log
        #for the duration of this test fixture
        cls.fixture_log = cclogging.getLogger('')
        cls._fixture_log_handler = cclogging.setup_new_cchandler(
            cclogging.get_object_namespace(cls))
        cls.fixture_log.addHandler(cls._fixture_log_handler)

        '''
        @todo: Upgrade the metrics to be more unittest compatible.
        Currently the unittest results are not available at the fixture level,
        only the test case or the test suite and runner level.
        '''
        # Setup the fixture level metrics
        cls.fixture_metrics = TestRunMetrics()
        cls.fixture_metrics.timer.start()

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture...: {0}".format(
                             str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Created At: {0}"
                             .format(cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("{0}".format('=' * 56))

    @classmethod
    def tearDownClass(cls):
        # Kill the timers and calculate the metrics objects
        cls.fixture_metrics.timer.stop()
        if(cls.fixture_metrics.total_passed ==
           cls.fixture_metrics.total_tests):
            cls.fixture_metrics.result = TestResultTypes.PASSED
        else:
            cls.fixture_metrics.result = TestResultTypes.FAILED

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture.....: {0}".format(
                             str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Result......: {0}"
                             .format(cls.fixture_metrics.result))
        cls.fixture_log.info("Start Time..: {0}"
                             .format(cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info(
            "Elapsed Time: {0}".format(
                cls.fixture_metrics.timer.get_elapsed_time()))
        cls.fixture_log.info("Total Tests.: {0}"
                             .format(cls.fixture_metrics.total_tests))
        cls.fixture_log.info("Total Passed: {0}"
                             .format(cls.fixture_metrics.total_passed))
        cls.fixture_log.info("Total Failed: {0}"
                             .format(cls.fixture_metrics.total_failed))
        cls.fixture_log.info("{0}".format('=' * 56))

        #Remove the fixture log handler from the fixture log
        cls.fixture_log.removeHandler(cls._fixture_log_handler)

        #Call super teardown after we've finished out additions to teardown
        super(BaseTestFixture, cls).tearDownClass()

    def setUp(self):
        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(self.test_metrics.timer.
                                                       start_time))
        if self.shortDescription():
            self.fixture_log.info("{0}".format(self.shortDescription()))
        self.fixture_log.info("{0}".format('=' * 56))

        ''' @todo: Get rid of this hard coded value for the statistics '''
        # set up the stats log
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(
                self._testMethodName),
            "{0}/../statistics/".format(engine_config.log_directory))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)

    def tearDown(self):
        # Kill the timer and other custom destroy jazz
        self.test_metrics.timer.stop()

        '''
        @todo: This MUST be upgraded this from resultForDoCleanups into a
               better pattern or working with the result object directly.
               This is related to the todo in L{TestRunMetrics}
        '''
        # Build metrics
        if self._resultForDoCleanups.wasSuccessful():
            self.fixture_metrics.total_passed += 1
            self.test_metrics.result = TestResultTypes.PASSED
        else:
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.FAILED

        # Report
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case...: {0}".
                              format(self._testMethodName))
        self.fixture_log.info("Result......: {0}".
                              format(self.test_metrics.result))
        self.fixture_log.info("Start Time...: {0}".
                              format(self.test_metrics.timer.start_time))
        self.fixture_log.info(
            "Elapsed Time: {0}".format(
                self.test_metrics.timer.get_elapsed_time()))
        self.fixture_log.info("{0}".format('=' * 56))

        # Write out our statistics
        self.stats_log.report(self.test_metrics)

        # Let the base handle whatever hoodoo it needs
        super(BaseTestFixture, self).tearDown()
Пример #11
0
class BaseTestFixture(unittest.TestCase):
    """
    @summary: Foundation for TestRepo Test Fixture.
    @note: This is the base class for ALL test cases in TestRepo. Add new
           functionality carefully.
    @see: http://docs.python.org/library/unittest.html#unittest.TestCase
    """
    def shortDescription(self):
        """
        @summary: Returns a formatted description of the test
        """
        short_desc = None

        if os.environ["VERBOSE"] == "true" and self._testMethodDoc:
            temp = self._testMethodDoc.strip("\n")
            short_desc = re.sub(r"[ ]{2,}", "", temp).strip("\n")
        return short_desc

    def logDescription(self):
        log_desc = None
        if self._testMethodDoc:
            log_desc = "\n{0}".format(
                re.sub(r"[ ]{2,}", "", self._testMethodDoc).strip("\n"))
        return log_desc

    @classmethod
    def assertClassSetupFailure(cls, message):
        """
        @summary: Use this if you need to fail from a Test Fixture's
                  setUpClass() method
        """
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def assertClassTeardownFailure(cls, message):
        """
        @summary: Use this if you need to fail from a Test Fixture's
                  tearUpClass() method
        """
        cls.fixture_log.error("FATAL: %s:%s" % (cls.__name__, message))
        raise AssertionError("FATAL: %s:%s" % (cls.__name__, message))

    @classmethod
    def setUpClass(cls):
        super(BaseTestFixture, cls).setUpClass()

        #Master Config Provider

        #Setup root log handler if the root logger doesn't already have one
        master_log_file_name = os.getenv('CAFE_MASTER_LOG_FILE_NAME')
        if cclogging.getLogger('').handlers == []:
            cclogging.getLogger('').addHandler(
                cclogging.setup_new_cchandler(master_log_file_name))

        #Setup fixture log, which is really just a copy of the master log
        #for the duration of this test fixture
        cls.fixture_log = cclogging.getLogger('')
        cls._fixture_log_handler = cclogging.setup_new_cchandler(
            cclogging.get_object_namespace(cls))
        cls.fixture_log.addHandler(cls._fixture_log_handler)
        """
        @todo: Upgrade the metrics to be more unittest compatible.
        Currently the unittest results are not available at the fixture level,
        only the test case or the test suite and runner level.
        """
        # Setup the fixture level metrics
        cls.fixture_metrics = TestRunMetrics()
        cls.fixture_metrics.timer.start()

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture...: {0}".format(
            str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Created At: {0}".format(
            cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("{0}".format('=' * 56))

    @classmethod
    def tearDownClass(cls):
        # Kill the timers and calculate the metrics objects
        cls.fixture_metrics.timer.stop()
        if (cls.fixture_metrics.total_passed == cls.fixture_metrics.total_tests
            ):
            cls.fixture_metrics.result = TestResultTypes.PASSED
        else:
            cls.fixture_metrics.result = TestResultTypes.FAILED

        # Report
        cls.fixture_log.info("{0}".format('=' * 56))
        cls.fixture_log.info("Fixture.....: {0}".format(
            str(cclogging.get_object_namespace(cls))))
        cls.fixture_log.info("Result......: {0}".format(
            cls.fixture_metrics.result))
        cls.fixture_log.info("Start Time..: {0}".format(
            cls.fixture_metrics.timer.start_time))
        cls.fixture_log.info("Elapsed Time: {0}".format(
            cls.fixture_metrics.timer.get_elapsed_time()))
        cls.fixture_log.info("Total Tests.: {0}".format(
            cls.fixture_metrics.total_tests))
        cls.fixture_log.info("Total Passed: {0}".format(
            cls.fixture_metrics.total_passed))
        cls.fixture_log.info("Total Failed: {0}".format(
            cls.fixture_metrics.total_failed))
        cls.fixture_log.info("{0}".format('=' * 56))

        #Remove the fixture log handler from the fixture log
        cls.fixture_log.removeHandler(cls._fixture_log_handler)

        #Call super teardown after we've finished out additions to teardown
        super(BaseTestFixture, cls).tearDownClass()

    def setUp(self):
        self.shortDescription()

        # Setup the timer and other custom init jazz
        self.fixture_metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()

        # Log header information
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case.: {0}".format(self._testMethodName))
        self.fixture_log.info("Created.At: {0}".format(
            self.test_metrics.timer.start_time))
        self.fixture_log.info("{0}".format(self.logDescription()))
        self.fixture_log.info("{0}".format('=' * 56))
        """ @todo: Get rid of this hard coded value for the statistics """
        # set up the stats log
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.statistics.csv".format(self._testMethodName),
            "{0}/statistics/".format(root_log_dir))

        # Let the base handle whatever hoodoo it needs
        unittest.TestCase.setUp(self)

    def tearDown(self):
        # Kill the timer and other custom destroy jazz
        self.test_metrics.timer.stop()
        """
        @todo: This MUST be upgraded this from resultForDoCleanups into a
               better pattern or working with the result object directly.
               This is related to the todo in L{TestRunMetrics}
        """
        # Build metrics
        if any(r for r in self._resultForDoCleanups.failures
               if self._test_name_matches_result(self._testMethodName, r)):
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.FAILED
        elif any(r for r in self._resultForDoCleanups.errors
                 if self._test_name_matches_result(self._testMethodName, r)):
            self.fixture_metrics.total_failed += 1
            self.test_metrics.result = TestResultTypes.ERRORED
        else:
            self.fixture_metrics.total_passed += 1
            self.test_metrics.result = TestResultTypes.PASSED

        # Report
        self.fixture_log.info("{0}".format('=' * 56))
        self.fixture_log.info("Test Case...: {0}".format(self._testMethodName))
        self.fixture_log.info("Result......: {0}".format(
            self.test_metrics.result))
        self.fixture_log.info("Start Time...: {0}".format(
            self.test_metrics.timer.start_time))
        self.fixture_log.info("Elapsed Time: {0}".format(
            self.test_metrics.timer.get_elapsed_time()))
        self.fixture_log.info("{0}".format('=' * 56))

        # Write out our statistics
        self.stats_log.report(self.test_metrics)

        # Let the base handle whatever hoodoo it needs
        super(BaseTestFixture, self).tearDown()

    def _test_name_matches_result(self, name, test_result):
        """Checks if a test result matches a specific test name."""
        # Try to get the result portion of the tuple
        try:
            result = test_result[0]
        except IndexError:
            return False

        # Verify the object has the correct property
        if hasattr(result, '_testMethodName'):
            return result._testMethodName == name
        else:
            return False