示例#1
0
    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """

        self.test_metrics.timer.stop()

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)
示例#2
0
    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """
        try:
            self.test_metrics.timer.stop()
        except AttributeError:
            warn("\nTest metrics not being logged! "
                 "stop_test_metrics is being called without "
                 "start_test_metrics having been previously called.\n\n")
            log_info_block(self.logger.log, [
                ('Test Case', test_name), ('Result', test_result),
                ('Start Time', "Unknown, start_test_metrics was not called"),
                ('Elapsed Time', "Unknown, start_test_metrics was not called")
            ])
            return

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name), ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)
示例#3
0
    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """

        self.test_metrics.timer.stop()

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name), ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)
示例#4
0
    def start(self):
        """Starts logging and metrics reporting for the fixture"""
        self.logger.start()
        self.metrics.timer.start()

        log_info_block(self.logger.log,
                       [('Fixture', self.report_name),
                        ('Created At', self.metrics.timer.start_time)])
示例#5
0
    def start(self):
        """Starts logging and metrics reporting for the fixture"""
        self.logger.start()
        self.metrics.timer.start()

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name),
             ('Created At', self.metrics.timer.start_time)])
示例#6
0
    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(self.logger.log,
                       [('Test Case', test_name),
                        ('Created At', self.metrics.timer.start_time),
                        (test_description, '')])
示例#7
0
    def run_command(self, cmd, *args):
        """Sends a command directly to this instance's command line
        @param cmd: Command to sent to command line
        @type cmd: C{str}
        @param args: Optional list of args to be passed with the command
        @type args: C{list}
        @raise exception: If unable to close process after running the command
        @return: The full response details from the command line
        @rtype: L{CommandLineResponse}
        @note: PRIVATE. Can be over-ridden in a child class
        """

        # Wait for the process to complete and then read the output
        os_response = self.run_command_async(cmd, *args)
        std_out, std_err = os_response.process.communicate()
        os_response.standard_out = str(std_out).splitlines()
        os_response.standard_error = str(std_err).splitlines()
        os_response.return_code = os_response.process.returncode

        info = [
            ("return code", logsafe_str(os_response.return_code)),
            ("standard out",
             logsafe_str("\n{0}".format("\n".join(os_response.standard_out)))),
            ("standard error",
             logsafe_str("\n{0}".format("\n".join(
                 os_response.standard_error))))
        ]
        log_info_block(self._log,
                       info,
                       heading='COMMAND LINE RESPONSE',
                       log_level=DEBUG,
                       one_line=True)

        # Clean up the process to avoid any leakage/wonkiness with
        # stdout/stderr
        try:
            os_response.process.kill()
        except OSError:
            # An OS Error is valid if the process has exited. We only
            # need to be concerned about other exceptions
            sys.exc_clear()

        os_response.process = None
        return os_response
示例#8
0
    def start_test_metrics(self, class_name, test_name, test_description=None):
        """Creates a new Metrics object and starts reporting to it.  Useful
        for creating metrics for individual tests.
        """

        test_description = test_description or "No Test description."
        self.metrics.total_tests += 1
        self.test_metrics = TestRunMetrics()
        self.test_metrics.timer.start()
        root_log_dir = os.environ['CAFE_ROOT_LOG_PATH']
        self.stats_log = PBStatisticsLog(
            "{0}.{1}.statistics.csv".format(class_name, test_name),
            "{0}/statistics/".format(root_log_dir))

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Created At', self.metrics.timer.start_time),
             (test_description, '')])
示例#9
0
    def stop(self):
        """Logs all collected metrics and stats, then stops logging and metrics
        reporting for the fixture.
        """

        self.metrics.timer.stop()
        if (self.metrics.total_passed == self.metrics.total_tests):
            self.metrics.result = TestResultTypes.PASSED
        else:
            self.metrics.result = TestResultTypes.FAILED

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name), ('Result', self.metrics.result),
             ('Start Time', self.metrics.timer.start_time),
             ('Elapsed Time', self.metrics.timer.get_elapsed_time()),
             ('Total Tests', self.metrics.total_tests),
             ('Total Passed', self.metrics.total_passed),
             ('Total Failed', self.metrics.total_failed),
             ('Total Errored', self.metrics.total_errored)])
        self.logger.stop()
示例#10
0
    def _build_command(self, cmd, *args):
        # Process command we received
        command = "{0} {1}".format(self.base_command,
                                   cmd) if self.base_command else cmd
        if args and args[0]:
            for arg in args[0]:
                command += "{0} {1}".format(command, arg)

        keys = set(os.environ).intersection(self.env_var_dict)
        set_env_vars = dict([(k, os.environ[k]) for k in keys])

        info = [("command", logsafe_str(command)), ("args", logsafe_str(args)),
                ("set env vars", logsafe_str(set_env_vars))]

        log_info_block(self._log,
                       info,
                       heading='COMMAND LINE REQUEST',
                       log_level=DEBUG,
                       one_line=True)

        return command
示例#11
0
    def _build_command(self, cmd, *args):
        #Process command we received
        command = "{0} {1}".format(
            self.base_command, cmd) if self.base_command else cmd
        if args and args[0]:
            for arg in args[0]:
                command += "{0} {1}".format(command, arg)

        keys = set(os.environ).intersection(self.env_var_dict)
        set_env_vars = dict([(k, os.environ[k]) for k in keys])

        info = [
            ("command", logsafe_str(command)),
            ("args", logsafe_str(args)),
            ("set env vars", logsafe_str(set_env_vars))]

        log_info_block(
            self._log, info, heading='COMMAND LINE REQUEST',
            log_level=DEBUG, one_line=True)

        return command
示例#12
0
    def run_command(self, cmd, *args):
        """Sends a command directly to this instance's command line
        @param cmd: Command to sent to command line
        @type cmd: C{str}
        @param args: Optional list of args to be passed with the command
        @type args: C{list}
        @raise exception: If unable to close process after running the command
        @return: The full response details from the command line
        @rtype: L{CommandLineResponse}
        @note: PRIVATE. Can be over-ridden in a child class
        """

        # Wait for the process to complete and then read the output
        os_response = self.run_command_async(cmd, *args)
        std_out, std_err = os_response.process.communicate()
        os_response.standard_out = str(std_out).splitlines()
        os_response.standard_error = str(std_err).splitlines()
        os_response.return_code = os_response.process.returncode

        info = [
            ("return code", logsafe_str(os_response.return_code)),
            ("standard out", logsafe_str("\n{0}".format(
                "\n".join(os_response.standard_out)))),
            ("standard error", logsafe_str("\n{0}".format(
                "\n".join(os_response.standard_error))))]
        log_info_block(
            self._log, info, heading='COMMAND LINE RESPONSE',
            log_level=DEBUG, one_line=True)

        # Clean up the process to avoid any leakage/wonkiness with
        # stdout/stderr
        try:
            os_response.process.kill()
        except OSError:
            # An OS Error is valid if the process has exited. We only
            # need to be concerned about other exceptions
            sys.exc_clear()

        os_response.process = None
        return os_response
示例#13
0
    def stop(self):
        """Logs all collected metrics and stats, then stops logging and metrics
        reporting for the fixture.
        """

        self.metrics.timer.stop()
        if (self.metrics.total_passed == self.metrics.total_tests):
            self.metrics.result = TestResultTypes.PASSED
        else:
            self.metrics.result = TestResultTypes.FAILED

        log_info_block(
            self.logger.log,
            [('Fixture', self.report_name),
             ('Result', self.metrics.result),
             ('Start Time', self.metrics.timer.start_time),
             ('Elapsed Time', self.metrics.timer.get_elapsed_time()),
             ('Total Tests', self.metrics.total_tests),
             ('Total Passed', self.metrics.total_passed),
             ('Total Failed', self.metrics.total_failed),
             ('Total Errored', self.metrics.total_errored)])
        self.logger.stop()
示例#14
0
文件: base.py 项目: dwalleck/opencafe
    def stop_test_metrics(self, test_name, test_result):
        """Stops reporting on the Metrics object created in start_test_metrics.
        Logs all collected metrics.
        Useful for logging metrics for individual test at the test's conclusion
        """
        try:
            self.test_metrics.timer.stop()
        except AttributeError:
            warn(
                "\nTest metrics not being logged! "
                "stop_test_metrics is being called without "
                "start_test_metrics having been previously called.\n\n")
            log_info_block(
                self.logger.log,
                [('Test Case', test_name),
                 ('Result', test_result),
                 ('Start Time', "Unknown, start_test_metrics was not called"),
                 ('Elapsed Time', "Unknown, start_test_metrics was not called")
                 ])
            return

        if test_result == TestResultTypes.PASSED:
            self.metrics.total_passed += 1

        if test_result == TestResultTypes.ERRORED:
            self.metrics.total_errored += 1

        if test_result == TestResultTypes.FAILED:
            self.metrics.total_failed += 1

        self.test_metrics.result = test_result

        log_info_block(
            self.logger.log,
            [('Test Case', test_name),
             ('Result', self.test_metrics.result),
             ('Start Time', self.test_metrics.timer.start_time),
             ('Elapsed Time', self.test_metrics.timer.get_elapsed_time())])
        self.stats_log.report(self.test_metrics)