Esempio n. 1
0
    def _collect_test_suite(self, scenario_result):
        if isinstance(scenario_result, GeneralError):
            test_case = TestCase("", "")
            test_case.add_error_info(scenario_result.message)
            test_suite = TestSuite("", "")
            test_suite.test_cases.append(test_case)
            return test_suite

        test_suite = TestSuite(scenario_result.name)
        for test_result in scenario_result.test_results:
            test_case = TestCase(test_result.name, test_result.name)
            for result in test_result.results:
                if isinstance(result, Failed):
                    test_case.add_failure_info("ASSERTION {} failed".format(result.pretty_name),
                                               "EXPECTED {}\nGOT {}".format(result.expected,
                                                                            result.actual))
                elif isinstance(result, (Error, ConnectionError)):
                    test_case.add_error_info("ASSERTION {} failed".format(result.pretty_name),
                                             "ERROR {}".format(result.error))
                elif isinstance(result, Passed):
                    pass
                # TODO: What to do below?
                else:
                    raise Exception("Unknown state")
            test_suite.test_cases.append(test_case)
        return test_suite
Esempio n. 2
0
 def test_multiple_errors(self):
     """Tests multiple errors in one test case"""
     tc = TestCase('Multiple error', allow_multiple_subelements=True)
     tc.add_error_info("First error", "First error message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple error'},
                      errors=[{
                          "message": "First error",
                          "output": "First error message",
                          "type": "error"
                      }])
     tc.add_error_info("Second error", "Second error message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple error'},
                      errors=[{
                          "message": "First error",
                          "output": "First error message",
                          "type": "error"
                      }, {
                          "message": "Second error",
                          "output": "Second error message",
                          "type": "error"
                      }])
Esempio n. 3
0
    def process_report(requests, thresholds):
        functional_test_cases, threshold_test_cases = [], []
        test_suites = []
        for req in requests:
            if requests[req]['KO'] != 0:
                functional_test_cases.append(TestCase(name=requests[req]['request_name'],
                                                      stdout="PASSED: {}. FAILED: {}".format(str(requests[req]['OK']),
                                                                                             str(requests[req]['KO'])),
                                                      stderr="FAILED: {}".format(str(requests[req]['KO']))))
                functional_test_cases[-1].add_failure_info("Request failed {} times".format(str(requests[req]['KO'])))
            else:
                functional_test_cases.append(
                    TestCase(name=requests[req]['request_name'], stdout="PASSED: {}".format(str(requests[req]['OK'])),
                             stderr="FAILED: {}".format(str(requests[req]['KO']))))

        test_suites.append(TestSuite("Functional errors ", functional_test_cases))

        for th in thresholds:
            threshold_test_cases.append(TestCase(name="Threshold for {}, target - {}".format(th['scope'], th['target']),
                                                 stdout="Value: {} {}. Threshold value: {} {}".format(str(th['value']),
                                                                                                th['metric'],
                                                                                                str(th['threshold']),
                                                                                                th['metric'])))
            if th['status'] == 'FAILED':
                threshold_test_cases[-1].add_failure_info("{} for {} exceeded threshold of {} {}. Test result - {} {}"
                                                          .format(th['target'], th['scope'], str(th['threshold']),
                                                                  th['metric'], str(th['value']), th['metric']))

        test_suites.append(TestSuite("Thresholds ", threshold_test_cases))
        with open("/tmp/reports/jmeter.xml", 'w') as f:
            TestSuite.to_file(f, test_suites, prettyprint=True)
Esempio n. 4
0
 def test_multiple_failures(self):
     """Tests multiple failures in one test case"""
     tc = TestCase('Multiple failures', allow_multiple_subelements=True)
     tc.add_failure_info("First failure", "First failure message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple failures'},
                      failures=[{
                          "message": "First failure",
                          "output": "First failure message",
                          "type": "failure"
                      }])
     tc.add_failure_info("Second failure", "Second failure message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple failures'},
                      failures=[{
                          "message": "First failure",
                          "output": "First failure message",
                          "type": "failure"
                      }, {
                          "message": "Second failure",
                          "output": "Second failure message",
                          "type": "failure"
                      }])
Esempio n. 5
0
def main():
    cwd = os.getcwd()
    args = parse_args()
    try:
        if os.path.exists(args.clairfile):
            with open(args.clairfile) as clairfile:
                clair_parsed_file = json.load(clairfile)
        if os.path.exists(
                os.path.join("clair-scanner-logs", "/clair_setup_errors.log")):
            with open(
                    os.path.join("clair-scanner-logs",
                                 "/clair_setup_errors.log"),
                    'r') as clairfile_errors:
                clair_parsed_error_file = clairfile_errors.readlines()
        else:
            clair_parsed_error_file = None
    except:
        logger.exception("Failed to parse clair / clair_error file.  Exiting.")

    current_sorted_level = None
    current_suite = None
    test_suites = []
    if clair_parsed_error_file:
        current_suite = TestSuite("SetupError")
        new_step = TestCase(name="SetupError",
                            classname="SetupError",
                            status="unapproved",
                            stderr=clair_parsed_error_file)
        new_step.log = clair_parsed_error_file
        new_step.category = "SetupError"
        new_step.failure_type = "unapproved"
        new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format(
            vuln["link"])
        new_step.failure_output = clair_parsed_error_file
        current_suite.test_cases.append(new_step)
        test_suites.append(current_suite)
    for vuln in clair_parsed_file["vulnerabilities"]:
        if current_sorted_level != vuln["severity"]:
            if current_suite:
                test_suites.append(current_suite)
            current_suite = TestSuite(name=vuln["severity"])
            current_sorted_level = vuln["severity"]
        new_step = TestCase(name=vuln["vulnerability"],
                            classname=vuln["severity"],
                            status="unapproved",
                            url=vuln["link"],
                            stderr=vuln["description"])
        new_step.log = vuln
        new_step.category = vuln["severity"]
        new_step.failure_type = "unapproved"
        new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format(
            vuln["link"])
        new_step.failure_output = vuln["description"]
        current_suite.test_cases.append(new_step)
    # try to write new file
    try:
        with open(args.output, 'w') as outfile:
            outfile.write(TestSuite.to_xml_string(test_suites))
    except:
        logger.exception("Filed saving file.")
Esempio n. 6
0
    def test_init_failure_type(self):
        tc = TestCase('Failure-Type')
        tc.add_failure_info(failure_type='com.example.Error')
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(self, tcs[0], {'name': 'Failure-Type'})

        tc.add_failure_info("failure message")
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(self,
                         tcs[0], {'name': 'Failure-Type'},
                         failure_message="failure message",
                         failure_type='com.example.Error')
Esempio n. 7
0
    def test_multiple_suites_to_string(self):
        tss = [
            TestSuite('suite1', [TestCase('Test1')]),
            TestSuite('suite2', [TestCase('Test2')])
        ]
        suites = serialize_and_read(tss)

        self.assertEqual('suite1', suites[0][0].attributes['name'].value)
        verify_test_case(self, suites[0][1][0], {'name': 'Test1'})

        self.assertEqual('suite2', suites[1][0].attributes['name'].value)
        verify_test_case(self, suites[1][1][0], {'name': 'Test2'})
Esempio n. 8
0
    def test_init_error_type(self):
        tc = TestCase('Error-Type')
        tc.add_error_info(error_type='com.example.Error')
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(self, tcs[0], {'name': 'Error-Type'})

        tc.add_error_info("error message")
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(self,
                         tcs[0], {'name': 'Error-Type'},
                         error_message="error message",
                         error_type='com.example.Error')
Esempio n. 9
0
def generate_junit_report(test_name, total_thresholds, report_name):
    test_cases = []
    file_name = f"junit_report_{report_name}.xml"
    logger.info(f"Generate report {file_name}")

    for item in total_thresholds["details"]:
        message = item['message']
        test_case = TestCase(
            item['name'],
            classname=f"{item['scope']}",
            status="PASSED",
            stdout=
            f"{item['scope']} {item['name'].lower()} {item['aggregation']} {item['actual']} "
            f"{item['rule']} {item['expected']}")
        if message:
            test_case.status = "FAILED"
            test_case.add_failure_info(message)
        test_cases.append(test_case)

    ts = TestSuite(test_name, test_cases)
    os.makedirs(f"{REPORT_PATH}/junit", exist_ok=True)
    with open(f"{REPORT_PATH}/junit/{file_name}", 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)

    return file_name
Esempio n. 10
0
    def test_init_unicode(self):
        tc = TestCase(name=decode('Test äöü', 'utf-8'),
                      classname=decode('some.class.name.äöü', 'utf-8'),
                      elapsed_sec=123.345,
                      stdout=decode('I am stdöüt!', 'utf-8'),
                      stderr=decode('I am stdärr!', 'utf-8'))
        tc.add_skipped_info(message=decode('Skipped äöü', 'utf-8'),
                            output=decode('I skippäd!', 'utf-8'))
        tc.add_error_info(message=decode('Skipped error äöü', 'utf-8'),
                          output=decode('I skippäd with an error!', 'utf-8'))

        (ts, tcs) = serialize_and_read(TestSuite('Test Unicode', [tc]))[0]
        verify_test_case(
            self,
            tcs[0], {
                'name': decode('Test äöü', 'utf-8'),
                'classname': decode('some.class.name.äöü', 'utf-8'),
                'time': ("%f" % 123.345)
            },
            stdout=decode('I am stdöüt!', 'utf-8'),
            stderr=decode('I am stdärr!', 'utf-8'),
            skipped_message=decode('Skipped äöü', 'utf-8'),
            skipped_output=decode('I skippäd!', 'utf-8'),
            error_message=decode('Skipped error äöü', 'utf-8'),
            error_output=decode('I skippäd with an error!', 'utf-8'))
Esempio n. 11
0
 def test_init_skipped_output(self):
     tc = TestCase('Skipped-Output')
     tc.add_skipped_info(output="I skipped!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Skipped-Output'},
                      skipped_output="I skipped!")
Esempio n. 12
0
 def test_init_skipped_message(self):
     tc = TestCase('Skipped-Message')
     tc.add_skipped_info("skipped message")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Skipped-Message'},
                      skipped_message="skipped message")
Esempio n. 13
0
 def run_tests(self):
     test_cases = []
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             t_start = time.time()
             result = self.run_test(index)
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = test_output.getvalue()
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed', output=test_output.getvalue())
             test_output.close()
         test_cases += [test_case];
     return TestSuite(self.name, test_cases)
Esempio n. 14
0
    def v2_playbook_on_stats(self, stats):
        """
        Implementation of the callback endpoint to be
        fired when a playbook is finished. As we are
        only running one playbook at a time, we know
        we are done logging and can aggregate the jUnit
        test suite and serialize it.

        :param stats: statistics about the run
        """
        suite = TestSuite(self.playbook_name, self.test_cases)

        base_dir = getenv('OCT_CONFIG_HOME',
                          abspath(join(expanduser('~'), '.config')))
        log_dir = abspath(join(base_dir, 'origin-ci-tool', 'logs', 'junit'))
        if not exists(log_dir):
            mkdir(log_dir)

        log_filename = ''
        for _ in range(10):
            log_basename = '{}.xml'.format(''.join(
                choice(ascii_letters) for i in range(10)))
            log_filename = join(log_dir, log_basename)
            if not exists(log_filename):
                # TODO: determine a better way to do this
                break

        with open(log_filename, 'w') as result_file:
            TestSuite.to_file(result_file, [suite])
Esempio n. 15
0
def exporter_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    test_suites = []
    test_cases = []

    targets = sorted(test_result_ext.keys())
    for target in targets:
        test_cases = []
        tests = sorted(test_result_ext[target].keys())
        for test in tests:
            test_results = test_result_ext[target][test]
            classname = 'test.%s.%s' % (target, test)
            elapsed_sec = test_results['elapsed_time']
            _stdout = test_results['single_test_output']
            _stderr = ''
            # Test case
            tc = TestCase(test, classname, elapsed_sec, _stdout, _stderr)
            # Test case extra failure / error info
            if test_results['single_test_result'] == 'FAIL':
                message = test_results['single_test_result']
                tc.add_failure_info(message, _stdout)
            elif test_results['single_test_result'] != 'OK':
                message = test_results['single_test_result']
                tc.add_error_info(message, _stdout)

            test_cases.append(tc)
        ts = TestSuite("test.suite.%s" % target, test_cases)
        test_suites.append(ts)
    return TestSuite.to_xml_string(test_suites)
Esempio n. 16
0
    def get_test_suites(self):
        test_cases = {}
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = record.check_name
            if check_name not in test_cases:
                test_cases[check_name] = []

            test_name = "{} {} {}".format(self.check_type, check_name,
                                          record.resource)
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result['result'] == CheckResult.FAILED:
                test_case.add_failure_info(
                    "Resource \"{}\" failed in check \"{}\"".format(
                        record.resource, check_name))
            if record.check_result['result'] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    "Resource \"{}\" skipped in check \"{}\"\n Suppress comment: {}"
                    .format(record.resource, check_name,
                            record.check_result['suppress_comment']))
            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(name=key,
                          test_cases=test_cases[key],
                          package=test_cases[key][0].classname))
        return test_suites
Esempio n. 17
0
 def test_init_failure_output(self):
     tc = TestCase('Failure-Output')
     tc.add_failure_info(output="I failed!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Failure-Output'},
                      failure_output="I failed!")
Esempio n. 18
0
 def test_init_failure_message(self):
     tc = TestCase('Failure-Message')
     tc.add_failure_info("failure message")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Failure-Message'},
                      failure_message="failure message")
Esempio n. 19
0
 def test_assertions(self):
     suites = serialize_and_read(
         TestSuite(name='test',
                   test_cases=[TestCase(name='Test1', assertions=5)]))[0]
     self.assertEquals(
         '5', suites[0].getElementsByTagName('testcase')
         [0].attributes['assertions'].value)
Esempio n. 20
0
    def test_attribute_disable(self):
        tc = TestCase('Disabled-Test')
        tc.is_enabled = False
        tss = [TestSuite('suite1', [tc])]
        suites = serialize_and_read(tss)

        self.assertEqual('1', suites[0][0].attributes['disabled'].value)
Esempio n. 21
0
def print_result_cache_junitxml(dict_synonyms, suspicious_policy,
                                untested_policy):
    test_cases = []
    l = list(select(x for x in Mutant))
    for filename, mutants in groupby(l,
                                     key=lambda x: x.line.sourcefile.filename):
        for mutant in mutants:
            tc = TestCase("Mutant #{}".format(mutant.id),
                          file=filename,
                          line=mutant.line.line_number,
                          stdout=mutant.line.line)
            if mutant.status == BAD_SURVIVED:
                tc.add_failure_info(message=mutant.status,
                                    output=get_unified_diff(
                                        mutant.id, dict_synonyms))
            if mutant.status == BAD_TIMEOUT:
                tc.add_error_info(message=mutant.status,
                                  error_type="timeout",
                                  output=get_unified_diff(
                                      mutant.id, dict_synonyms))
            if mutant.status == OK_SUSPICIOUS:
                if suspicious_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(suspicious_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))
            if mutant.status == UNTESTED:
                if untested_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(untested_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))

            test_cases.append(tc)

    ts = TestSuite("mutmut", test_cases)
    print(TestSuite.to_xml_string([ts]))
def save_result_xml(exit_code, test_cases, result_file_path):
    if exit_code > 0:
        for testcase in test_cases:
            testcase.add_error_info('Test run failed.')
    ts = TestSuite("Azure Arc Conformance Suite", test_cases)
    with open(result_file_path, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
Esempio n. 23
0
    def test_single_suite_no_test_cases_utf8(self):
        properties = {'foö': 'bär'}
        package = 'mypäckage'
        timestamp = 1398382805

        test_suite = TestSuite(name='äöü',
                               test_cases=[],
                               hostname='löcalhost',
                               id='äöü',
                               properties=properties,
                               package=package,
                               timestamp=timestamp)
        (ts, tcs) = serialize_and_read(test_suite,
                                       to_file=True,
                                       prettyprint=True,
                                       encoding='utf-8')[0]
        self.assertEqual(ts.tagName, 'testsuite')
        self.assertEqual(ts.attributes['package'].value,
                         decode(package, 'utf-8'))
        self.assertEqual(ts.attributes['timestamp'].value, str(timestamp))
        self.assertEqual(
            ts.childNodes[0].childNodes[0].attributes['name'].value,
            decode('foö', 'utf-8'))
        self.assertEqual(
            ts.childNodes[0].childNodes[0].attributes['value'].value,
            decode('bär', 'utf-8'))
Esempio n. 24
0
 def test_init_classname(self):
     (ts, tcs) = serialize_and_read(
         TestSuite('test', [TestCase('Test1', 'some.class.name')]))[0]
     verify_test_case(self, tcs[0], {
         'name': 'Test1',
         'classname': 'some.class.name'
     })
Esempio n. 25
0
    def exporter_junit_ioper(self,
                             test_result_ext,
                             test_suite_properties=None):
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = 'test.ioper.%s.%s.%s' % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ''
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == 'FAIL':
                    tc.add_failure_info(description, _stdout)
                elif result == 'ERROR':
                    tc.add_error_info(description, _stdout)
                elif result == 'SKIP':
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
Esempio n. 26
0
 def test_init_error_output(self):
     tc = TestCase('Error-Output')
     tc.add_error_info(output="I errored!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Error-Output'},
                      error_output="I errored!")
Esempio n. 27
0
def main():
  parser = argparse.ArgumentParser()
  parser.add_argument('swift', help='path to swift executable')
  parser.add_argument('output', help='where to write xUnit output')
  args = parser.parse_args()

  test_cases = [
      benchmark(
          TestCase('debug build'),
          [args.swift, 'build', '--product', 'TensorFlow']
      ),
      benchmark(
          TestCase('release build'),
          [args.swift, 'build', '-c', 'release', '--product', 'TensorFlow']
      ),

      # The point of "release build -Onone" is to compile TensorFlow in
      # "-whole-module-optimization" mode without "-O".
      benchmark(
          TestCase('release build -Onone'),
          [args.swift, 'build', '-c', 'release', '--product', 'TensorFlow',
           '-Xswiftc', '-Onone']
      ),
  ]

  test_suite = TestSuite('swift-apis compile time', test_cases)

  with open(args.output, 'w') as f:
    TestSuite.to_file(f, [test_suite])
Esempio n. 28
0
def junit_report(results):
    test_cases = []
    for result in results:
        if result["result"] != 'Failed' and result["data"] != 'Failed':
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: {}, data: {}".format(result["result"],
                                                     result["data"]))
        else:
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: Failed, data: Failed")
            tc.add_failure_info("Failed")
        test_cases.append(tc)
    ts = TestSuite("autoupdate", test_cases)
    with open(
            os.path.abspath(os.path.dirname(__file__)) + '/junit_output.xml',
            'w') as f:
        TestSuite.to_file(f, [ts])
Esempio n. 29
0
    def get_test_suites(self, use_bc_ids=False) -> List[TestSuite]:
        test_cases = defaultdict(list)
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = f"{record.get_output_id(use_bc_ids)}/{record.check_name}"

            test_name = f"{self.check_type} {check_name} {record.resource}"
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result["result"] == CheckResult.FAILED:
                if record.file_path and record.file_line_range:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name} - {record.file_path}:{record.file_line_range} - Guideline: {record.guideline}"
                    )
                else:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name}"
                    )
            if record.check_result["result"] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    f'Resource {record.resource} skipped in check {check_name} \n Suppress comment: {record.check_result["suppress_comment"]} - Guideline: {record.guideline}'
                )

            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(
                    name=key,
                    test_cases=test_cases[key],
                    package=test_cases[key][0].classname,
                ))
        return test_suites
Esempio n. 30
0
 def test_init_error_message(self):
     tc = TestCase('Error-Message')
     tc.add_error_info("error message")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Error-Message'},
                      error_message="error message")