Example #1
0
    def compile_results(self, run_time, datagen_time, results):
        """Summarizes results and writes results to file if --result used"""
        all_results = []
        result_dict = {"tests": 0, "errors": 0, "failures": 0}
        for dic in results:
            result = dic["result"]
            tests = [suite for suite in self.suites
                     if suite.cafe_uuid == dic["cafe_uuid"]][0]
            result_parser = SummarizeResults(
                result_dict=vars(result), tests=tests, execution_time=run_time,
                datagen_time=datagen_time)
            all_results += result_parser.gather_results()
            summary = result_parser.summary_result()
            for key in result_dict:
                result_dict[key] += summary[key]

            if result.stream.buf.strip():
                # this line can be replace to add an extensible stdout/err log
                sys.stderr.write("{0}\n\n".format(
                    result.stream.buf.strip()))

        if self.cl_args.result is not None:
            reporter = Reporter(result_parser, all_results)
            reporter.generate_report(
                self.cl_args.result, self.cl_args.result_directory)
        return self.print_results(
            run_time=run_time, datagen_time=datagen_time, **result_dict)
Example #2
0
    def compile_results(self, run_time, datagen_time, results):
        """Summarizes results and writes results to file if --result used"""
        all_results = []
        result_dict = {"tests": 0, "errors": 0, "failures": 0, "skipped": 0}
        for dic in results:
            result = dic["result"]
            tests = [
                suite for suite in self.suites
                if suite.cafe_uuid == dic["cafe_uuid"]
            ][0]
            result_parser = SummarizeResults(result_dict=vars(result),
                                             tests=tests,
                                             execution_time=run_time,
                                             datagen_time=datagen_time)
            all_results += result_parser.gather_results()
            summary = result_parser.summary_result()
            for key in result_dict:
                result_dict[key] += summary[key]

            if result.stream.buf.strip():
                # this line can be replace to add an extensible stdout/err log
                sys.stderr.write("{0}\n\n".format(result.stream.buf.strip()))

        if self.cl_args.result is not None:
            reporter = Reporter(result_parser, all_results)
            reporter.generate_report(self.cl_args.result,
                                     self.cl_args.result_directory)
        return self.print_results(run_time=run_time,
                                  datagen_time=datagen_time,
                                  **result_dict)
Example #3
0
    def run_serialized(self,
                       master_suite,
                       test_runner,
                       result_type=None,
                       results_path=None):

        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(vars(result), master_suite,
                                             total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        self._log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code
    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests('test_report_pass'))
        test_suite.addTest(FakeTests('test_report_fail'))
        test_suite.addTest(FakeTests('test_report_skip'))
        test_suite.addTest(FakeTests('test_report_error'))

        self.failure_trace = 'Traceback: ' + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = 'Traceback: ' + str(uuid4())
        result = {
            'testsRun': 4,
            'errors': [(FakeTests('test_report_error'), self.error_trace)],
            'skipped': [(FakeTests('test_report_skip'), self.skip_msg)],
            'failures': [(FakeTests('test_report_fail'), self.failure_trace)]}

        self.result_parser = SummarizeResults(
            master_testsuite=test_suite, result_dict=result,
            execution_time=1.23)
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(
            result_parser=self.result_parser, all_results=self.all_results,)

        self.results_dir = os.getcwd() + os.path.sep + 'test-reporting-results'
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)
Example #5
0
    def run_parallel(self,
                     test_suites,
                     test_runner,
                     result_type=None,
                     results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(target=self.execute_test,
                           args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in list(results.items()):
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(vars(result), tests,
                                                 (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code
Example #6
0
    def run_parallel(
            self, test_suites, test_runner, result_type=None,
            results_path=None):

        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.dict()
        manager.dict()
        start = time.time()

        test_mapping = {}
        for test_suite in test_suites:
            # Give each test suite an uuid so it can be
            # matched to the correct test result
            test_id = str(uuid.uuid4())
            test_mapping[test_id] = test_suite

            proc = Process(
                target=self.execute_test,
                args=(test_runner, test_id, test_suite, results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = self.dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for test_id, result in results.items():
                tests = test_mapping[test_id]
                result_parser = SummarizeResults(
                    vars(result), tests, (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(
                result_parser=result_parser, all_results=all_results)
            reporter.generate_report(
                result_type=result_type, path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code
Example #7
0
    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests("test_report_pass"))
        test_suite.addTest(FakeTests("test_report_fail"))
        test_suite.addTest(FakeTests("test_report_skip"))
        test_suite.addTest(FakeTests("test_report_error"))

        self.failure_trace = "Traceback: " + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = "Traceback: " + str(uuid4())
        result = {
            "testsRun": 4,
            "errors": [(FakeTests("test_report_error"), self.error_trace)],
            "skipped": [(FakeTests("test_report_skip"), self.skip_msg)],
            "failures": [(FakeTests("test_report_fail"), self.failure_trace)],
        }

        self.result_parser = SummarizeResults(
            tests=test_suite, result_dict=result, execution_time=1.23, datagen_time=4.56
        )
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(result_parser=self.result_parser, all_results=self.all_results)

        self.results_dir = os.getcwd() + os.path.sep + "test-reporting-results"
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)
Example #8
0
    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests('test_report_pass'))
        test_suite.addTest(FakeTests('test_report_fail'))
        test_suite.addTest(FakeTests('test_report_skip'))
        test_suite.addTest(FakeTests('test_report_error'))

        self.failure_trace = 'Traceback: ' + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = 'Traceback: ' + str(uuid4())
        result = {'testsRun': 4,
                  'errors': [(FakeTests('test_report_error'),
                             self.error_trace)],
                  'skipped': [(FakeTests('test_report_skip'),
                               self.skip_msg)],
                  'failures': [(FakeTests('test_report_fail'),
                                self.failure_trace)]}

        self.result_parser = SummarizeResults(master_testsuite=test_suite,
                                              result_dict=result,
                                              execution_time=1.23)
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(result_parser=self.result_parser,
                                 all_results=self.all_results,)

        self.results_dir = os.getcwd() + '/test-reporting-results'
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)
Example #9
0
    def run_parallel(self, test_suites, test_runner, result_type=None,
                     results_path=None):
        exit_code = 0
        proc = None
        unittest.installHandler()
        processes = []
        manager = Manager()
        results = manager.list()
        start = time.time()

        for test_suite in test_suites:
            proc = Process(target=execute_test, args=(
                test_runner,
                test_suite,
                results))
            processes.append(proc)
            proc.start()

        for proc in processes:
            proc.join()

        finish = time.time()

        errors, failures, _ = dump_results(start, finish, results)

        if result_type is not None:
            all_results = []
            for tests, result in zip(test_suites, results):
                result_parser = SummarizeResults(
                    vars(result),
                    tests,
                    (finish - start))
                all_results += result_parser.gather_results()

            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        if failures or errors:
            exit_code = 1

        return exit_code
Example #10
0
    def run_serialized(self, master_suite, test_runner, result_type=None,
                       results_path=None):
        exit_code = 0
        unittest.installHandler()
        start_time = time.time()
        result = test_runner.run(master_suite)
        total_execution_time = time.time() - start_time

        if result_type is not None:
            result_parser = SummarizeResults(vars(result), master_suite,
                                             total_execution_time)
            all_results = result_parser.gather_results()
            reporter = Reporter(result_parser=result_parser,
                                all_results=all_results)
            reporter.generate_report(result_type=result_type,
                                     path=results_path)

        log_results(result)
        if not result.wasSuccessful():
            exit_code = 1

        return exit_code
Example #11
0
class ReportingTests(unittest.TestCase):
    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests("test_report_pass"))
        test_suite.addTest(FakeTests("test_report_fail"))
        test_suite.addTest(FakeTests("test_report_skip"))
        test_suite.addTest(FakeTests("test_report_error"))

        self.failure_trace = "Traceback: " + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = "Traceback: " + str(uuid4())
        result = {
            "testsRun": 4,
            "errors": [(FakeTests("test_report_error"), self.error_trace)],
            "skipped": [(FakeTests("test_report_skip"), self.skip_msg)],
            "failures": [(FakeTests("test_report_fail"), self.failure_trace)],
        }

        self.result_parser = SummarizeResults(
            tests=test_suite, result_dict=result, execution_time=1.23, datagen_time=4.56
        )
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(result_parser=self.result_parser, all_results=self.all_results)

        self.results_dir = os.getcwd() + os.path.sep + "test-reporting-results"
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)

    def _file_contains_test_info(self, file_path):
        """ Checks for generic test information (names and messages)
        in the specified report file.
        """
        return self._file_contains(
            file_path=file_path,
            target_strings=[
                "test_report_pass",
                "test_report_fail",
                "test_report_skip",
                "test_report_error",
                self.failure_trace,
                self.skip_msg,
                self.error_trace,
            ],
        )

    def _file_contains(self, file_path, target_strings):
        """ Checks that the specified file contains all strings in the
        target_strings list.
        """
        not_found = []
        with open(file_path) as in_file:
            contents = in_file.read()
        for target_string in target_strings:
            if target_string not in contents:
                not_found.append(target_string)
        if len(not_found) > 0:
            return (False, not_found)
        return (True, not_found)

    @tags("smoke", "cli", execution="slow, fast", suite="test, integration")
    def test_create_json_report(self):
        """ Creates a json report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(result_type="json", path=self.results_dir)
        results_file = self.results_dir + os.path.sep + "results.json"
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains_test_info(file_path=results_file)
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    @tags("cli", execution="slow")
    def test_create_xml_report(self):
        """ Creates an xml report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(result_type="xml", path=self.results_dir)
        results_file = self.results_dir + os.path.sep + "results.xml"
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains_test_info(file_path=results_file)
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    @tags("smoke", "cli", "functional", execution="fast")
    def test_create_json_report_w_file_name(self):
        """ Creates a json report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + os.path.sep + str(uuid4()) + ".json"
        self.reporter.generate_report(result_type="json", path=results_file)
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains_test_info(file_path=results_file)
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    @tags("cli", "functional")
    def test_create_xml_report_w_file_name(self):
        """ Creates an xml report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + os.path.sep + str(uuid4()) + ".xml"
        self.reporter.generate_report(result_type="xml", path=results_file)
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains_test_info(file_path=results_file)
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    def test_timing_metrics_in_json_report(self):
        """
        Creates a json report and verifies that the created
        report contains timing metrics.
        """
        self.reporter.generate_report(result_type="json", path=self.results_dir)
        results_file = self.results_dir + os.path.sep + "results.json"
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains(file_path=results_file, target_strings=["datagen_time", "total_time"])
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    def test_timing_metrics_in_xml_report(self):
        """
        Creates an xml report and verifies that the created
        report contains timing metrics.
        """
        self.reporter.generate_report(result_type="xml", path=self.results_dir)
        results_file = self.results_dir + os.path.sep + "results.xml"
        self.assertTrue(os.path.exists(results_file))
        results = self._file_contains(file_path=results_file, target_strings=["datagen_time", "total_time"])
        if results[0] is False:
            self.assertTrue(results[0], "{0} not found in report results file".format(", ".join(results[1])))
        else:
            self.assertTrue(results[0])

    def tearDown(self):
        """ Deletes created reports and directories. """
        if os.path.exists(self.results_dir):
            self.results_dir = shutil.rmtree(self.results_dir)
Example #12
0
class ReportingTests(unittest.TestCase):
    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests('test_report_pass'))
        test_suite.addTest(FakeTests('test_report_fail'))
        test_suite.addTest(FakeTests('test_report_skip'))
        test_suite.addTest(FakeTests('test_report_error'))

        self.failure_trace = 'Traceback: ' + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = 'Traceback: ' + str(uuid4())
        result = {'testsRun': 4,
                  'errors': [(FakeTests('test_report_error'),
                             self.error_trace)],
                  'skipped': [(FakeTests('test_report_skip'),
                               self.skip_msg)],
                  'failures': [(FakeTests('test_report_fail'),
                                self.failure_trace)]}

        self.result_parser = SummarizeResults(master_testsuite=test_suite,
                                              result_dict=result,
                                              execution_time=1.23)
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(result_parser=self.result_parser,
                                 all_results=self.all_results,)

        self.results_dir = os.getcwd() + '/test-reporting-results'
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)

    def _file_contains_test_info(self, file_path):
        """ Checks for generic test information (names and messages)
        in the specified report file.
        """
        return self._file_contains(file_path=file_path,
                                   target_strings=
                                   ['test_report_pass', 'test_report_fail',
                                    'test_report_skip', 'test_report_error',
                                    self.failure_trace, self.skip_msg,
                                    self.error_trace])

    def _file_contains(self, file_path, target_strings):
        """ Checks that the specified file contains all strings in the
        target_strings list.
        """
        for target_string in target_strings:
            if target_string in open(file_path).read():
                return True
        return False

    def test_create_json_report(self):
        """ Creates a json report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(result_type='json',
                                      path=self.results_dir)
        results_file = self.results_dir + '/results.json'
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    def test_create_xml_report(self):
        """ Creates an xml report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(result_type='xml',
                                      path=self.results_dir)
        results_file = self.results_dir + '/results.xml'
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    def test_create_json_report_w_file_name(self):
        """ Creates a json report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + str(uuid4()) + '.json'
        self.reporter.generate_report(result_type='json',
                                      path=results_file)
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    def test_create_xml_report_w_file_name(self):
        """ Creates an xml report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + str(uuid4()) + '.xml'
        self.reporter.generate_report(result_type='xml',
                                      path=results_file)
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    def tearDown(self):
        """ Deletes created reports and directories. """
        if os.path.exists(self.results_dir):
            self.results_dir = shutil.rmtree(self.results_dir)
Example #13
0
class ReportingTests(unittest.TestCase):

    def setUp(self):
        """ Creates a SummarizeResults parser with fake tests and initializes
        the reporter. Also creates a directory for the created reports.
        """
        test_suite = unittest.suite.TestSuite()
        test_suite.addTest(FakeTests('test_report_pass'))
        test_suite.addTest(FakeTests('test_report_fail'))
        test_suite.addTest(FakeTests('test_report_skip'))
        test_suite.addTest(FakeTests('test_report_error'))

        self.failure_trace = 'Traceback: ' + str(uuid4())
        self.skip_msg = str(uuid4())
        self.error_trace = 'Traceback: ' + str(uuid4())
        result = {
            'testsRun': 4,
            'errors': [(FakeTests('test_report_error'), self.error_trace)],
            'skipped': [(FakeTests('test_report_skip'), self.skip_msg)],
            'failures': [(FakeTests('test_report_fail'), self.failure_trace)]}

        self.result_parser = SummarizeResults(
            master_testsuite=test_suite, result_dict=result,
            execution_time=1.23)
        self.all_results = self.result_parser.gather_results()
        self.reporter = Reporter(
            result_parser=self.result_parser, all_results=self.all_results,)

        self.results_dir = os.getcwd() + os.path.sep + 'test-reporting-results'
        if not os.path.exists(self.results_dir):
            os.makedirs(self.results_dir)

    def _file_contains_test_info(self, file_path):
        """ Checks for generic test information (names and messages)
        in the specified report file.
        """
        return self._file_contains(
            file_path=file_path, target_strings=[
                'test_report_pass', 'test_report_fail', 'test_report_skip',
                'test_report_error', self.failure_trace, self.skip_msg,
                self.error_trace])

    def _file_contains(self, file_path, target_strings):
        """ Checks that the specified file contains all strings in the
        target_strings list.
        """
        for target_string in target_strings:
            if target_string in open(file_path).read():
                return True
        return False

    @tags('smoke', 'cli', execution='slow, fast', suite="test, integration")
    def test_create_json_report(self):
        """ Creates a json report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(
            result_type='json', path=self.results_dir)
        results_file = self.results_dir + os.path.sep + 'results.json'
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    @tags("cli", execution='slow')
    def test_create_xml_report(self):
        """ Creates an xml report and checks that the created report contains
        the proper test information.
        """
        self.reporter.generate_report(result_type='xml', path=self.results_dir)
        results_file = self.results_dir + os.path.sep + 'results.xml'
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    @tags('smoke', 'cli', 'functional', execution='fast')
    def test_create_json_report_w_file_name(self):
        """ Creates a json report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + os.path.sep + str(uuid4()) + '.json'
        self.reporter.generate_report(result_type='json', path=results_file)
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    @tags('cli', 'functional')
    def test_create_xml_report_w_file_name(self):
        """ Creates an xml report with a specified file name and checks that
        the created report contains the proper test information.
        """
        results_file = self.results_dir + os.path.sep + str(uuid4()) + '.xml'
        self.reporter.generate_report(result_type='xml', path=results_file)
        self.assertTrue(os.path.exists(results_file))
        self.assertTrue(self._file_contains_test_info(file_path=results_file))

    def tearDown(self):
        """ Deletes created reports and directories. """
        if os.path.exists(self.results_dir):
            self.results_dir = shutil.rmtree(self.results_dir)