def serialize_and_read(test_suites, to_file=False, prettyprint=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        with os.fdopen(fd, 'w') as f:
            TestSuite.to_file(f, test_suites)

        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        if prettyprint is not None:
            xml_string = TestSuite.to_xml_string(test_suites, prettyprint=prettyprint)
        else:
            xml_string = TestSuite.to_xml_string(test_suites)
        print("Serialized XML to string:\n%s" % xml_string)
        xmldoc = minidom.parseString(xml_string)

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
    def test_to_xml_string_test_suites_not_a_list(self):
        test_suites = TestSuite('suite1', [TestCase('Test1')])

        try:
            TestSuite.to_xml_string(test_suites)
        except Exception as exc:
            self.assertEqual(str(exc), 'test_suites must be a list of test suites')
def serialize_and_read(test_suites, to_file=False, prettyprint=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        with os.fdopen(fd, 'w') as f:
            TestSuite.to_file(f, test_suites)

        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        if prettyprint is not None:
            xml_string = TestSuite.to_xml_string(test_suites,
                                                 prettyprint=prettyprint)
        else:
            xml_string = TestSuite.to_xml_string(test_suites)
        print("Serialized XML to string:\n%s" % xml_string)
        xmldoc = minidom.parseString(xml_string)

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
Example #4
0
    def test_to_xml_string_test_suites_not_a_list(self):
        test_suites = TestSuite('suite1', [TestCase('Test1')])

        try:
            TestSuite.to_xml_string(test_suites)
        except Exception as exc:
            self.assertEqual(str(exc),
                             'test_suites must be a list of test suites')
Example #5
0
    def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None):
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = "test.ioper.%s.%s.%s" % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ""
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == "FAIL":
                    tc.add_failure_info(description, _stdout)
                elif result == "ERROR":
                    tc.add_error_info(description, _stdout)
                elif result == "SKIP" or result == "NOT_SUPPORTED":
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
def main():
    parser = argparse.ArgumentParser(description='dummy test')
    parser.add_argument('-classes', type=int, default=5, help='number of classes')
    parser.add_argument('-testcases', type=int, default=10, help='number of testcases')
    parser.add_argument('-pass_rate', type=int, default=75, help='pass rate')
    parser.add_argument('-error_rate', type=int, default=20, help='error rate')
    parser.add_argument('-failure_rate', type=int, default=10, help='failure rate')
    parser.add_argument('-skip_rate', type=int, default=10, help='skip rate')
    parser.add_argument('-outputfile', type=str, default='test_results.xml', help='output file')
    parser.add_argument('-print', action='store_true', help='print the test results')
    args = parser.parse_args()

    ts = TestSuite(name='my test suite', hostname=platform.node(), timestamp=datetime.now())
    for i in range(args.classes):
        for j in range(args.testcases):
            tc = TestCase(classname=f"myclass{i}",
                          name=f"mytest{j}",
                          elapsed_sec=random.randint(100, 1000),
                          stdout = "stdout output",
                          stderr = "stderr output")
            if random.randint(0, 100) < args.pass_rate:
                if random.randint(0, 100) < args.error_rate:
                    tc.add_error_info(message=f"error {i} {j}", output="error output message", error_type="ERR1")
                elif random.randint(0, 100) < args.failure_rate:
                    tc.add_failure_info(message=f"failure {i} {j}", output="failure output message", failure_type="FAIL1")
                elif random.randint(0, 100) < args.skip_rate:
                    tc.add_skipped_info(message=f"skipped {i} {j}", output="skipped output message")
            ts.test_cases.append(tc)

    # pretty printing is on by default but can be disabled using prettyprint=False
    if args.print:
        print(TestSuite.to_xml_string([ts]))

    with open(args.outputfile, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
Example #7
0
def generate_junit_xml(inputfile):
    target = None
    suite = None
    infos = []
    errors = []
    testcases = []

    for line in inputfile:
        tag = line[0:3]
        props = line[3:].split(':')
        if tag == "[!]":
            if len(props) == 2:
                if props[0].strip().lower() == "target":
                    target = os.path.basename(props[1].strip())
                elif props[0].strip().lower() == "group":
                    suite = props[1].strip()
                else:
                    infos.append(line)
            else:
                infos.append(line)
        if tag == "[x]":
            errors.append(line)
        if tag == "[+]":
            testcases.append(TestCase(name=props[0].strip(), classname=target, stdout=line))
        if tag == "[-]":
            tc = TestCase(name=props[0].strip(), classname=target)
            tc.add_failure_info(message=props[1].strip(), output=line, failure_type="failed")
            testcases.append(tc)

    ts = TestSuite(name=suite, test_cases=testcases, stdout="\n".join(infos), stderr="\n".join(errors))
    return TestSuite.to_xml_string([ts])
Example #8
0
def main():
    cwd = os.getcwd()
    args = parse_args()
    try:
        if os.path.exists(args.clairfile):
            with open(args.clairfile) as clairfile:
                clair_parsed_file = json.load(clairfile)
        if os.path.exists(
                os.path.join("clair-scanner-logs", "/clair_setup_errors.log")):
            with open(
                    os.path.join("clair-scanner-logs",
                                 "/clair_setup_errors.log"),
                    'r') as clairfile_errors:
                clair_parsed_error_file = clairfile_errors.readlines()
        else:
            clair_parsed_error_file = None
    except:
        logger.exception("Failed to parse clair / clair_error file.  Exiting.")

    current_sorted_level = None
    current_suite = None
    test_suites = []
    if clair_parsed_error_file:
        current_suite = TestSuite("SetupError")
        new_step = TestCase(name="SetupError",
                            classname="SetupError",
                            status="unapproved",
                            stderr=clair_parsed_error_file)
        new_step.log = clair_parsed_error_file
        new_step.category = "SetupError"
        new_step.failure_type = "unapproved"
        new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format(
            vuln["link"])
        new_step.failure_output = clair_parsed_error_file
        current_suite.test_cases.append(new_step)
        test_suites.append(current_suite)
    for vuln in clair_parsed_file["vulnerabilities"]:
        if current_sorted_level != vuln["severity"]:
            if current_suite:
                test_suites.append(current_suite)
            current_suite = TestSuite(name=vuln["severity"])
            current_sorted_level = vuln["severity"]
        new_step = TestCase(name=vuln["vulnerability"],
                            classname=vuln["severity"],
                            status="unapproved",
                            url=vuln["link"],
                            stderr=vuln["description"])
        new_step.log = vuln
        new_step.category = vuln["severity"]
        new_step.failure_type = "unapproved"
        new_step.failure_message = "Please have the following security issue reviewed by Splunk: {}".format(
            vuln["link"])
        new_step.failure_output = vuln["description"]
        current_suite.test_cases.append(new_step)
    # try to write new file
    try:
        with open(args.output, 'w') as outfile:
            outfile.write(TestSuite.to_xml_string(test_suites))
    except:
        logger.exception("Filed saving file.")
Example #9
0
def main():
    SignalHandler.init()
    args = parseArguments()
    scriptFiles = getScriptFiles(args.start, args.scriptName)
    testSuits = createTestSuits(scriptFiles, args.start)
    markTestSkipedByPattern(testSuits, args.pattern)
    genTestExecuter = [TestExecuter(ts, tc, args.upgradeScriptFile) for ts in testSuits for tc in ts.test_cases if not tc.is_skipped()]
    if (args.jobs == None): 
        runner = Serial(genTestExecuter, failfast=args.failfast)
    else: 
        runner = Parallel(genTestExecuter, failfast=args.failfast, max_workers = args.jobs)
    startTime = time.time()
    processStartTime = time.process_time()
    res = runner.run(".", printer = Printer(args.verbose != None))
    processTime = time.process_time() - processStartTime
    executionTime = time.time() - startTime
    if (args.failfast and res == False) or SignalHandler.failFast(): 
        for ts in testSuits: 
            for tc in ts.test_cases:
                if tc.elapsed_sec == None:
                    if (SignalHandler.failFast()):
                        tc.add_skipped_info("skipped due to " + SignalHandler.signalName())
                    else:
                        tc.add_skipped_info("skipped due to --failfast argument")
    report = getReport(testSuits, processTime, executionTime)
    Printer(args.verbose != None).print(report["text"])
    if (args.reportfile):
        ensure_dir(args.reportfile)
        with open(args.reportfile, "w") as fw:
            fw.write(TestSuite.to_xml_string(testSuits))
    exit(report["error"] > 0)
Example #10
0
def parse(infile, outfile, format_type, classname, suitename):
    testcases = list()
    testcase_logs = list()
    current = None
    test_block_delimiter = known_formats[format_type]['tb_delimiter']

    # separate log file into test blocks by test block delimiter
    for line in infile:
        if test_block_delimiter(line):
            if current:  # non-empty list
                testcase_logs.append(current)
            current = list()
        if current is not None:
            current.append(line)

    # add last record if present
    if current not in testcase_logs:
        testcase_logs.append(current)

    # create test cases from test blocks
    for entry in testcase_logs:
        testcases.append(known_formats[format_type]['test_parser'](entry,
                                                                   classname))

    # generate test suite result using provided test cases
    test_suite = TestSuite(suitename, testcases)

    # get rid of unnecessary 'disabled' strings in formatted xml string
    s = TestSuite.to_xml_string([test_suite])
    s = s.replace(' disabled=\"0\"', '')

    # write xml to outfile
    outfile.write(s)
Example #11
0
def exporter_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    test_suites = []
    test_cases = []

    targets = sorted(test_result_ext.keys())
    for target in targets:
        test_cases = []
        tests = sorted(test_result_ext[target].keys())
        for test in tests:
            test_results = test_result_ext[target][test]
            classname = 'test.%s.%s' % (target, test)
            elapsed_sec = test_results['elapsed_time']
            _stdout = test_results['single_test_output']
            _stderr = ''
            # Test case
            tc = TestCase(test, classname, elapsed_sec, _stdout, _stderr)
            # Test case extra failure / error info
            if test_results['single_test_result'] == 'FAIL':
                message = test_results['single_test_result']
                tc.add_failure_info(message, _stdout)
            elif test_results['single_test_result'] != 'OK':
                message = test_results['single_test_result']
                tc.add_error_info(message, _stdout)

            test_cases.append(tc)
        ts = TestSuite("test.suite.%s" % target, test_cases)
        test_suites.append(ts)
    return TestSuite.to_xml_string(test_suites)
Example #12
0
def writeJUnitSLAContent(slas, test, filepath):

    logger = logging.getLogger("root")
    if slas is None: return

    logger.info("writeJUnitSLAContent: " + test.id + " to " + filepath)

    try:
        indicators = slas['indicators']
        perrun = slas['perrun']
        perinterval = slas['perinterval']

        suites = []

        for sla in perrun:
            ts = getSLATestSuites(test, "PerRun", sla)
            suites.append(ts)

        for sla in perinterval:
            ts = getSLATestSuites(test, "PerInterval", sla)
            suites.append(ts)

        logger.debug(TestSuite.to_xml_string(suites))

        with open(filepath, 'w') as f:
            TestSuite.to_file(f, suites, prettyprint=True)

        return True
    except:
        logger.error("Unexpected error at 'writeJUnitSLAContent':",
                     sys.exc_info()[0])

    return False
Example #13
0
    def exporter_junit_ioper(self,
                             test_result_ext,
                             test_suite_properties=None):
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = 'test.ioper.%s.%s.%s' % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ''
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == 'FAIL':
                    tc.add_failure_info(description, _stdout)
                elif result == 'ERROR':
                    tc.add_error_info(description, _stdout)
                elif result == 'SKIP':
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
Example #14
0
def _write_junit_report(site, config, output_file, total_time):
    pages = site.pages
    test_cases = []

    for results, resource in pages.items():
        origins = [source.origin.geturl() for source in resource.sources]
        if resource.status == 200:
            test_case = TestCase(name=resource.url_split.geturl(),
                                 classname=results.hostname,
                                 elapsed_sec=resource.response_time,
                                 stdout=resource.status,
                                 status="passed")
        else:
            stderr_message = "Link found on:\n{}".format("\n".join(origins))
            test_case = TestCase(name=resource.url_split.geturl(),
                                 classname=results.hostname,
                                 elapsed_sec=resource.response_time,
                                 stderr=stderr_message,
                                 status="failed")
            if resource.exception:
                message = str(resource.exception)
            else:
                message = "Expected 200 OK but got {}".format(resource.status)
            test_case.add_failure_info(message=message,
                                       failure_type="UnexpectedStatusCode")
        test_cases.append(test_case)
    test_suite = TestSuite("pylinkvalidator test suite", test_cases)
    output_file.write(TestSuite.to_xml_string([test_suite]))
    print_summary(site, config, total_time)
Example #15
0
def parse(infile, outfile, format_type, classname, suitename):
    testcases = list()
    testcase_logs = list()
    current = None
    test_block_delimiter = known_formats[format_type]['tb_delimiter']

    # separate log file into test blocks by test block delimiter
    for line in infile:
        if test_block_delimiter(line):
            if current: # non-empty list
                testcase_logs.append(current)
            current = list()
        if current is not None:
            current.append(line)

    # add last record if present
    if current not in testcase_logs:
        testcase_logs.append(current)

    # create test cases from test blocks
    for entry in testcase_logs:
        testcases.append(known_formats[format_type]['test_parser'](entry, classname))

    # generate test suite result using provided test cases
    test_suite = TestSuite(suitename, testcases)

    # get rid of unnecessary 'disabled' strings in formatted xml string
    s = TestSuite.to_xml_string([test_suite])
    s = s.replace(' disabled=\"0\"', '')

    # write xml to outfile
    outfile.write(s)
Example #16
0
def print_result_cache_junitxml(dict_synonyms, suspicious_policy,
                                untested_policy):
    test_cases = []
    l = list(select(x for x in Mutant))
    for filename, mutants in groupby(l,
                                     key=lambda x: x.line.sourcefile.filename):
        for mutant in mutants:
            tc = TestCase("Mutant #{}".format(mutant.id),
                          file=filename,
                          line=mutant.line.line_number,
                          stdout=mutant.line.line)
            if mutant.status == BAD_SURVIVED:
                tc.add_failure_info(message=mutant.status,
                                    output=get_unified_diff(
                                        mutant.id, dict_synonyms))
            if mutant.status == BAD_TIMEOUT:
                tc.add_error_info(message=mutant.status,
                                  error_type="timeout",
                                  output=get_unified_diff(
                                      mutant.id, dict_synonyms))
            if mutant.status == OK_SUSPICIOUS:
                if suspicious_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(suspicious_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))
            if mutant.status == UNTESTED:
                if untested_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(untested_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))

            test_cases.append(tc)

    ts = TestSuite("mutmut", test_cases)
    print(TestSuite.to_xml_string([ts]))
Example #17
0
    def create_report(self,
                      report_file=None,
                      suite_name='Test Suite',
                      suite_package='root'):
        """
        Create a test suite and write report to file specified
        in report_file.  Returns the resulting report string (or None if not
        activated.)
        """
        if self.activated:
            if '.xml'.casefold() not in report_file.casefold():
                report_file = report_file.split('.')[0] + '.xml'

            ts = [
                TestSuite(suite_name, self.test_cases, package=suite_package)
            ]

            xmls = TestSuite.to_xml_string(ts, prettyprint=True)
            if report_file:
                if hasattr(report_file, 'write'):
                    # assume file-like
                    report_file.write(xmls)
                else:
                    # Note: depending on system, mode may need to be set to wb
                    with open(report_file, mode='w') as f:
                        f.write(xmls)
                    print('Test report written to {}'.format(
                        os.path.abspath(f.name)))
            return xmls
Example #18
0
def generate_junit_report_from_cfn_nag(report):

    total_failures = 0
    """Generate Test Case from cfn_nag report"""

    test_cases = []

    for file_findings in report:
        for violation in file_findings["file_results"]['violations']:
            total_failures += 1
            for i, resource_id in enumerate(violation['logical_resource_ids']):

                test_case = TestCase("%s - %s" %
                                     (violation['id'], violation['message']),
                                     classname=resource_id)

                test_case.add_failure_info(
                    output="%s#L%s" %
                    (file_findings['filename'], violation['line_numbers'][i]))

                test_cases.append(test_case)

    test_suite = TestSuite("cfn-nag test suite", test_cases)

    if total_failures > 0:
        f = open("CFN_NAG_FAILURE", "a")
        f.close()

    return TestSuite.to_xml_string([test_suite], prettyprint=False)
Example #19
0
def build_junit_xml_output(rule_matches: List[RuleMatch],
                           rules: FrozenSet[Rule]) -> str:
    """
    Format matches in JUnit XML format.
    """
    test_cases = [match.to_junit_xml() for match in rule_matches]
    ts = TestSuite("semgrep results", test_cases)
    return cast(str, TestSuite.to_xml_string([ts]))
Example #20
0
    def execute(self, log, keyvals, testDef):
        testDef.logger.verbose_print("JunitXML Reporter")
        # pickup the options
        cmds = {}
        testDef.parseOptions(log, self.options, keyvals, cmds)
        if cmds['filename'] is not None:
            self.fh = open(cmds['filename'] if os.path.isabs(cmds['filename']) \
                           else os.path.join(testDef.options['scratchdir'],cmds['filename']), 'w')
        if testDef.options['description'] is not None:
            print(testDef.options['description'], file=self.fh)
            print(file=self.fh)
       
        # Use the Junit classname field to store the list of inifiles
        try:
            classname = testDef.log['inifiles']
        except KeyError:
            classname = None
        # get the entire log of results
        fullLog = testDef.logger.getLog(None)
        testCases = []
        # TODO: ain't nobody got time for that.  8-).
        time = 0
        for lg in fullLog:
            if 'stdout' in lg and lg['stdout'] is not None:
                stdout = "\n".join(lg['stdout'])
            else:
                stdout = None
            if 'stderr' in lg and lg['stderr'] is not None:
                stderr = "\n".join(lg['stderr'])
            else:
                stderr = None
            if 'time' in lg and lg['time'] is not None:
                time = lg['time']
            else:
                time = 0
            tc = TestCase(lg['section'], classname, time, stdout, stderr)
            try:
                if 0 != lg['status']:
                    # Find sections prefixed with 'TestRun'
                    if re.match("TestRun", lg['section']):
                        tc.add_failure_info("Test reported failure")
                    else:
                        tc.add_error_info("Test error")
            except KeyError:
                sys.exit(lg['section'] + " is missing status!")
            testCases.append(tc)

        # TODO:  Pull in the resource manager jobid.
        jobid = "job1"
        ts = TestSuite(jobid, testCases)
        print(TestSuite.to_xml_string([ts]), file=self.fh)

        if cmds['filename'] is not None:
            self.fh.close()
        log['status'] = 0
        return
Example #21
0
    def execute(self, log, keyvals, testDef):
        testDef.logger.verbose_print("JunitXML Reporter")
        # pickup the options
        cmds = {}
        testDef.parseOptions(log, self.options, keyvals, cmds)
        if cmds['filename'] is not None:
            self.fh = open(cmds['filename'] if os.path.isabs(cmds['filename']) \
                           else os.path.join(cmds['scratch'],cmds['filename']), 'w')
        if testDef.options['description'] is not None:
            print(testDef.options['description'], file=self.fh)
            print(file=self.fh)
       
        # Use the Junit classname field to store the list of inifiles
        try:
            classname = testDef.log['inifiles']
        except KeyError:
            classname = None
        # get the entire log of results
        fullLog = testDef.logger.getLog(None)
        testCases = []
        # TODO: ain't nobody got time for that.  8-).
        time = 0
        for lg in fullLog:
            if 'stdout' in lg and lg['stdout'] is not None:
                stdout = "\n".join(lg['stdout'])
            else:
                stdout = None
            if 'stderr' in lg and lg['stderr'] is not None:
                stderr = "\n".join(lg['stderr'])
            else:
                stderr = None
            if 'time' in lg and lg['time'] is not None:
                time = lg['time']
            else:
                time = 0
            tc = TestCase(lg['section'], classname, time, stdout, stderr)
            try:
                if 0 != lg['status']:
                    # Find sections prefixed with 'TestRun'
                    if re.match("TestRun", lg['section']):
                        tc.add_failure_info("Test reported failure")
                    else:
                        tc.add_error_info("Test error")
            except KeyError:
                sys.exit(lg['section'] + " is missing status!")
            testCases.append(tc)

        # TODO:  Pull in the resource manager jobid.
        jobid = "job1"
        ts = TestSuite(jobid, testCases)
        print(TestSuite.to_xml_string([ts]), file=self.fh)

        if cmds['filename'] is not None:
            self.fh.close()
        log['status'] = 0
        return
def run_api_tests(args, data_format):
    endpoints = []
    for i in range(len(args.host)):
        endpoints.append({"host": args.host[i], "port": args.port[i], "version": args.version[i]})
    results = run_tests(args.suite, endpoints, [args.selection])
    if data_format == "xml":
        formatted_test_results = format_test_results(results, endpoints, "junit", args)
        return TestSuite.to_xml_string([formatted_test_results], prettyprint=True)
    else:
        formatted_test_results = format_test_results(results, endpoints, "json", args)
        return json.loads(formatted_test_results)
Example #23
0
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result['description']
                            classname = '%s.%s.%s.%s' % (self.package, target,
                                                         toolchain,
                                                         test_result['id'])
                            elapsed_sec = test_result['elapsed_time']
                            _stdout = test_result['output']

                            if 'target_name_unique' in test_result:
                                _stderr = test_result['target_name_unique']
                            else:
                                _stderr = test_result['target_name']

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec,
                                          _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result['result']
                            if test_result['result'] == 'FAIL':
                                tc.add_failure_info(message, _stdout)
                            elif test_result['result'] == 'SKIP' or test_result[
                                    "result"] == 'NOT_SUPPORTED':
                                tc.add_skipped_info(message, _stdout)
                            elif test_result['result'] != 'OK':
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite(
                    "test.suite.%s.%s" % (target, toolchain),
                    test_cases,
                    properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
Example #24
0
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query
                       .join(models.Task)
                       .filter(models.TaskResult.task_id == models.Task.id)
                       .filter(models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = jsonutils.dumps(additional_results)
            test_path = \
                u'{playbook_file}.{play_name}'.format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(
                name=task_name,
                classname=test_path,
                elapsed_sec=result.duration.seconds,
                stdout=result_str)
            if result.status == 'skipped':
                test_case.add_skipped_info(message=result.result)
            elif ((result.status in ('failed', 'unreachable') and
                    result.ignore_errors is False and
                    'EXPECTED FAILURE' not in task_name and
                    'TOGGLE RESULT' not in task_name) or
                    (result.status == 'ok' and 'TOGGLE RESULT' in task_name)):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite('Ansible Tasks', test_cases)

        # TODO: junit_xml doesn't order the TestCase parameters.
        # This makes it so the order of the parameters for the same exact
        # TestCase is not guaranteed to be the same and thus results in a
        # different stdout (or file). This is easily reproducible on Py3.
        xml_string = six.text_type(test_suite.to_xml_string([test_suite]))
        if args.output_file == '-':
            if six.PY2:
                sys.stdout.write(encodeutils.safe_encode(xml_string))
            else:
                sys.stdout.buffer.write(encodeutils.safe_encode(xml_string))
        else:
            with open(args.output_file, 'wb') as f:
                f.write(encodeutils.safe_encode(xml_string))
Example #25
0
def get_junit_xml(res):
    # notes = res.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_WARNING)
    notes = res.get_notes_by_tag(MCDPManualConstants.NOTE_TAG_ERROR)

    test_cases = []
    for i, note in enumerate(notes):
        tc = junit_test_case_from_note(i, note)
        test_cases.append(tc)

    ts = TestSuite("notes", test_cases)

    return TestSuite.to_xml_string([ts])
Example #26
0
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result["description"]
                            classname = "%s.%s.%s.%s" % (self.package, target, toolchain, test_result["id"])
                            elapsed_sec = test_result["elapsed_time"]
                            _stdout = test_result["output"]

                            if "target_name_unique" in test_result:
                                _stderr = test_result["target_name_unique"]
                            else:
                                _stderr = test_result["target_name"]

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result["result"]
                            if test_result["result"] == "FAIL":
                                tc.add_failure_info(message, _stdout)
                            elif test_result["result"] == "SKIP" or test_result["result"] == "NOT_SUPPORTED":
                                tc.add_skipped_info(message, _stdout)
                            elif test_result["result"] != "OK":
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite(
                    "test.suite.%s.%s" % (target, toolchain),
                    test_cases,
                    properties=test_suite_properties[target][toolchain],
                )
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
Example #27
0
    def execute(self, log, keyvals, testDef):
        testDef.logger.verbose_print("JunitXML Reporter")
        # pickup the options
        cmds = {}
        testDef.parseOptions(log, self.options, keyvals, cmds)
        if cmds['filename'] is not None:
            self.fh = open(cmds['filename'] if os.path.isabs(cmds['filename']) \
                           else os.path.join(testDef.options['scratchdir'],cmds['filename']), 'w')
       
        # get the entire log of results
        fullLog = testDef.logger.getLog(None)
        testCases = []
        time = 0
        for lg in fullLog:
            if 'stdout' in lg and lg['stdout'] is not None:
                stdout = "\n".join(lg['stdout'])
            else:
                stdout = None
            if 'stderr' in lg and lg['stderr'] is not None:
                stderr = "\n".join(lg['stderr'])
            else:
                stderr = None
            if 'time' in lg and lg['time'] is not None:
                time = lg['time']
            else:
                time = 0
            # Use the hostname of the system we are running on as root of the classname
            # Use the filename without the extension as the next layer of the classname
            hostname = os.uname()[1]
            classname = hostname + "." + cmds['filename'].split('.')[0]
            tc = TestCase(lg['section'], classname, time, stdout, stderr)
            try:
                if 0 != lg['status']:
                    # Find sections prefixed with 'TestRun'
                    if re.match("TestRun", lg['section']):
                        tc.add_failure_info("Test reported failure")
                    else:
                        tc.add_error_info("Test error")
            except KeyError:
                sys.exit(lg['section'] + " is missing status!")
            testCases.append(tc)

        # TODO:  Pull in the resource manager jobid.
        jobid = "job1"
        ts = TestSuite(jobid, testCases)
        print(TestSuite.to_xml_string([ts]), file=self.fh)

        if cmds['filename'] is not None:
            self.fh.close()
        log['status'] = 0
        return
Example #28
0
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query.join(models.Task).filter(
                models.TaskResult.task_id == models.Task.id).filter(
                    models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = jsonutils.dumps(additional_results)
            test_path = \
                u'{playbook_file}.{play_name}'.format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(name=task_name,
                                 classname=test_path,
                                 elapsed_sec=result.duration.seconds,
                                 stdout=result_str)
            if result.status == 'skipped':
                test_case.add_skipped_info(message=result.result)
            elif ((result.status in ('failed', 'unreachable')
                   and result.ignore_errors is False
                   and 'EXPECTED FAILURE' not in task_name
                   and 'TOGGLE RESULT' not in task_name)
                  or (result.status == 'ok' and 'TOGGLE RESULT' in task_name)):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite('Ansible Tasks', test_cases)

        # TODO: junit_xml doesn't order the TestCase parameters.
        # This makes it so the order of the parameters for the same exact
        # TestCase is not guaranteed to be the same and thus results in a
        # different stdout (or file). This is easily reproducible on Py3.
        xml_string = six.text_type(test_suite.to_xml_string([test_suite]))
        if args.output_file == '-':
            if six.PY2:
                sys.stdout.write(encodeutils.safe_encode(xml_string))
            else:
                sys.stdout.buffer.write(encodeutils.safe_encode(xml_string))
        else:
            with open(args.output_file, 'wb') as f:
                f.write(encodeutils.safe_encode(xml_string))
Example #29
0
def serialize_and_read(test_suites,
                       to_file=False,
                       prettyprint=False,
                       encoding=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        os.close(fd)
        with codecs.open(filename, mode='w', encoding=encoding) as f:
            TestSuite.to_file(f,
                              test_suites,
                              prettyprint=prettyprint,
                              encoding=encoding)
        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        xml_string = TestSuite.to_xml_string(test_suites,
                                             prettyprint=prettyprint,
                                             encoding=encoding)
        if PY2:
            assert isinstance(xml_string, unicode)
        print("Serialized XML to string:\n%s" % xml_string)
        if encoding:
            xml_string = xml_string.encode(encoding)
        xmldoc = minidom.parseString(xml_string)

    def remove_blanks(node):
        for x in node.childNodes:
            if x.nodeType == minidom.Node.TEXT_NODE:
                if x.nodeValue:
                    x.nodeValue = x.nodeValue.strip()
            elif x.nodeType == minidom.Node.ELEMENT_NODE:
                remove_blanks(x)

    remove_blanks(xmldoc)
    xmldoc.normalize()

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
Example #30
0
def process_to_xml(process, test_name, suite_name):
    t0 = time()
    stdout, stderr = process.communicate()
    duration = time() - t0
    tc = TestCase(
        name=test_name,
        elapsed_sec=duration,
        stdout=stdout.decode(),
        stderr=stderr.decode(),
    )
    if process.returncode != 0:
        tc.add_failure_info(stderr.decode())
    return TestSuite.to_xml_string(
        [TestSuite(name=suite_name, test_cases=[tc])])
Example #31
0
def create_junit_results(data, output=None, **kwargs):
    """
    Creates a Junit result, can write to a file if desired, or return xml string. (used by Jenkins)
    input either dict(dict(dict())) or dict(list(dict()))
    dict = {suite: {test: {stderr,stdout,time,class,err,fail,skip}}}
    list = {suite: [(test, {stderr,stdout,time,class,err,fail,skip})]}
    :param data: A dictionary with dict or list hierarchy
    :param output: A filename to write results to  /path/to/file/*.junit.xml
    :return: Returns an XML string if no output, else nothing.
    """
    log.debug('creating junit results: output={}'.format(output))
    stdout_format = kwargs.pop('stdout_format', None)
    test_class = kwargs.pop('test_class', None)
    package = kwargs.pop('package', None)
    from junit_xml import TestSuite, TestCase
    test_suites = []
    for suite, tests in data.items():
        test_cases = []
        for test, result in (tests if isinstance(tests, list) else tests.items()):
            tc = TestCase(test)
            stdout = result.get('stdout')
            if stdout_format is not None and callable(stdout_format):
                if hasattr(stdout_format, 'func_code') and 'kwargs' in stdout_format.func_code.co_varnames:
                    stdout = stdout_format(stdout, suite_name=suite, test_name=test, **kwargs)
                else:
                    stdout = stdout_format(stdout)
            tc.stdout = stdout
            tc.stderr = result.get('stderr')
            tc.elapsed_sec = result.get('time')
            tc.classname = result.get('class', test_class)
            err = result.get('err')
            if err:
                tc.add_error_info(*err if isinstance(err, (list, tuple)) else [err])
            fail = result.get('fail')
            if fail:
                tc.add_failure_info(*fail if isinstance(fail, (list, tuple)) else [fail])
            skip = result.get('skip')
            if skip:
                tc.add_skipped_info(*skip if isinstance(skip, (list, tuple)) else [skip])
            test_cases.append(tc)
        ts = TestSuite(suite, test_cases, package=package)
        test_suites.append(ts)

    if output:
        check_makedir(os.path.dirname(output))
        with open(output, 'w') as out:
            TestSuite.to_file(out, test_suites)
        return output
    else:
        return TestSuite.to_xml_string(test_suites)
Example #32
0
def junit_xml(compmake_db):
    jobs = list(all_jobs(compmake_db))
    logger.info('Loaded %d jobs' % len(jobs))
    if len(jobs) < 10:
        logger.error('too few jobs')
        sys.exit(128)

    test_cases = []
    for job_id in jobs:
        tc = junit_test_case_from_compmake(compmake_db, job_id)
        test_cases.append(tc)

    ts = TestSuite("comptests_test_suite", test_cases)

    return TestSuite.to_xml_string([ts])
    def show(self):
        measurement_data = "<measurement><name>Events</name><value>%s</value></measurement>" % (
            self.event_count)
        monkey_test = TestCase('monkey', stdout=measurement_data)

        # handle failure msg
        if (self.is_success is False):
            monkey_test.add_failure_info(self.failure_msg)

        ts = TestSuite("com.skysoft.kkbox.android", [monkey_test])

        # pretty printing is on by default but can be disabled using prettyprint=False
        junit_data = TestSuite.to_xml_string([ts])

        print(junit_data)
Example #34
0
    def _generate_report(self):
        """ generate a TestSuite report from the collected TaskData and HostData """

        test_cases = []

        for task_uuid, task_data in self._task_data.items():
            for host_uuid, host_data in task_data.host_data.items():
                test_cases.append(self._build_test_case(task_data, host_data))

        test_suite = TestSuite(self._playbook_name, test_cases)
        report = TestSuite.to_xml_string([test_suite])

        output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))

        with open(output_file, 'wb') as xml:
            xml.write(to_bytes(report, errors='surrogate_or_strict'))
 def test_to_xml_string(self):
     test_suites = [TestSuite('suite1', [TestCase('Test1')]),
                    TestSuite('suite2', [TestCase('Test2')])]
     xml_string = TestSuite.to_xml_string(test_suites)
     expected_xml_string = textwrap.dedent("""
         <?xml version="1.0" ?>
         <testsuites>
         \t<testsuite errors="0" failures="0" name="suite1" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test1"/>
         \t</testsuite>
         \t<testsuite errors="0" failures="0" name="suite2" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test2"/>
         \t</testsuite>
         </testsuites>
     """.strip("\n"))
     self.assertEqual(xml_string, expected_xml_string)
Example #36
0
    def _generate_report(self):
        """ generate a TestSuite report from the collected TaskData and HostData """

        test_cases = []

        for task_uuid, task_data in self._task_data.items():
            for host_uuid, host_data in task_data.host_data.items():
                test_cases.append(self._build_test_case(task_data, host_data))

        test_suite = TestSuite(self._playbook_name, test_cases)
        report = TestSuite.to_xml_string([test_suite])

        output_file = os.path.join(self._output_dir, '%s-%s.xml' % (self._playbook_name, time.time()))

        with open(output_file, 'wb') as xml:
            xml.write(to_bytes(report, errors='strict'))
Example #37
0
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result['description']
                            classname = '%s.%s.%s.%s'% (self.package, target, toolchain, test_result['id'])
                            elapsed_sec = test_result['elapsed_time']
                            _stdout = test_result['output']

                            if 'target_name_unique' in test_result:
                                _stderr = test_result['target_name_unique']
                            else:
                                _stderr = test_result['target_name']

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result['result']
                            if test_result['result'] == 'FAIL':
                                tc.add_failure_info(message, _stdout)
                            elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED':
                                tc.add_skipped_info(message, _stdout)
                            elif test_result['result'] != 'OK':
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
def _test():
    test_case1 = TestCase('Testname1', 'SetupCluster.Name')
    test_case2 = TestCase('Testname2', 'SetupCluster.Name')
    test_case3 = TestCase('Testname3', 'SetupCluster.Misc')
    test_case4 = TestCase('Testname4', 'SetupCluster.Misc')

    test_cases = [test_case1, test_case2, test_case3, test_case4]
    ts = TestSuite("My Test Suite", test_cases)

    #Run and verify test case
    assertCase(test_case1, _exampleFunc(True))
    assertCase(test_case2, _exampleFunc(False))
    assertCase(test_case3, _exampleFunc(True))
    #Skip test case
    skipCase(test_case4, "Skip Testname4.", "Testname2 is failed.")

    print(ts.to_xml_string([ts]))
 def test_to_xml_string(self):
     test_suites = [
         TestSuite('suite1', [TestCase('Test1')]),
         TestSuite('suite2', [TestCase('Test2')])
     ]
     xml_string = TestSuite.to_xml_string(test_suites)
     expected_xml_string = textwrap.dedent("""
         <?xml version="1.0" ?>
         <testsuites>
         \t<testsuite errors="0" failures="0" name="suite1" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test1"/>
         \t</testsuite>
         \t<testsuite errors="0" failures="0" name="suite2" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test2"/>
         \t</testsuite>
         </testsuites>
     """.strip("\n"))
     self.assertEqual(xml_string, expected_xml_string)
Example #40
0
 def test_to_xml_string(self):
     test_suites = [TestSuite(name='suite1', test_cases=[TestCase(name='Test1')]),
                    TestSuite(name='suite2', test_cases=[TestCase(name='Test2')])]
     xml_string = TestSuite.to_xml_string(test_suites)
     if PY2:
         self.assertTrue(isinstance(xml_string, unicode))
     expected_xml_string = textwrap.dedent("""
         <?xml version="1.0" ?>
         <testsuites disabled="0" errors="0" failures="0" tests="2" time="0.0">
         \t<testsuite disabled="0" errors="0" failures="0" name="suite1" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test1"/>
         \t</testsuite>
         \t<testsuite disabled="0" errors="0" failures="0" name="suite2" skipped="0" tests="1" time="0">
         \t\t<testcase name="Test2"/>
         \t</testsuite>
         </testsuites>
     """.strip("\n"))  # NOQA
     self.assertEqual(xml_string, expected_xml_string)
Example #41
0
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query
                       .join(models.Task)
                       .filter(models.TaskResult.task_id == models.Task.id)
                       .filter(models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = json.dumps(additional_results)
            test_path = \
                "{playbook_file}.{play_name}".format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(
                name=task_name,
                classname=test_path,
                elapsed_sec=result.duration.seconds,
                stdout=result_str)
            if result.status == "skipped":
                test_case.add_skipped_info(message=result.result)
            elif (result.status in ("failed", "unreachable") and
                    result.ignore_errors is False):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite("Ansible Tasks", test_cases)

        xml_string = test_suite.to_xml_string([test_suite])
        if args.output_file == "-":
            sys.stdout.write(xml_string)
        else:
            with open(args.output_file, "w") as f:
                f.write(xml_string)
Example #42
0
def main():
    # Read list of html files in from stdin line by line.
    # This is to enable piping result from `find`
    # Use a multiprocessing pool to get some parallelism
    pool = Pool()

    queries = []
    test_cases = []

    for line in sys.stdin:
        queries.append(pool.apply_async(run_test, [line.strip()]))

    for query in queries:
        test_cases.append(query.get(timeout=10))

    # Dump test results to stdout
    ts = [TestSuite("my test suite", test_cases)]
    print(TestSuite.to_xml_string(ts))
    return 0
Example #43
0
def serialize_and_read(test_suites, to_file=False, prettyprint=False, encoding=None):
    """writes the test suite to an XML string and then re-reads it using minidom,
       returning => (test suite element, list of test case elements)"""
    try:
        iter(test_suites)
    except TypeError:
        test_suites = [test_suites]

    if to_file:
        fd, filename = tempfile.mkstemp(text=True)
        os.close(fd)
        with codecs.open(filename, mode='w', encoding=encoding) as f:
            TestSuite.to_file(f, test_suites, prettyprint=prettyprint, encoding=encoding)
        print("Serialized XML to temp file [%s]" % filename)
        xmldoc = minidom.parse(filename)
        os.remove(filename)
    else:
        xml_string = TestSuite.to_xml_string(
            test_suites, prettyprint=prettyprint, encoding=encoding)
        if PY2:
            assert isinstance(xml_string, unicode)
        print("Serialized XML to string:\n%s" % xml_string)
        if encoding:
            xml_string = xml_string.encode(encoding)
        xmldoc = minidom.parseString(xml_string)

    def remove_blanks(node):
        for x in node.childNodes:
            if x.nodeType == minidom.Node.TEXT_NODE:
                if x.nodeValue:
                    x.nodeValue = x.nodeValue.strip()
            elif x.nodeType == minidom.Node.ELEMENT_NODE:
                remove_blanks(x)
    remove_blanks(xmldoc)
    xmldoc.normalize()

    ret = []
    suites = xmldoc.getElementsByTagName("testsuites")[0]
    for suite in suites.getElementsByTagName("testsuite"):
        cases = suite.getElementsByTagName("testcase")
        ret.append((suite, cases))
    return ret
def junit_xml(compmake_db):
    from junit_xml import TestSuite

    jobs = list(all_jobs(compmake_db))
    logger.info('Loaded %d jobs' % len(jobs))
    N = 10
    if len(jobs) < N:
        logger.error('too few jobs (I expect at least %s)' % N)
        sys.exit(128)

    test_cases = []
    for job_id in jobs:
        tc = junit_test_case_from_compmake(compmake_db, job_id)
        test_cases.append(tc)

    ts = TestSuite("comptests_test_suite", test_cases)

    res = TestSuite.to_xml_string([ts])
    check_isinstance(res, six.text_type)
    return res
Example #45
0
def xmlwrite(target, everyline):
	xmltargetpath = os.getcwd() + os.sep + target + '_testresults.xml'
	xmltargetfile = open(xmltargetpath, 'w+')
	test_cases = []
	for line in everyline.split('\n'):
		words = line.split(' ')
		if line.find('success') != -1:
			time_taken = get_time(line.split('-')[-1][7:])
			name = words[0] + ' - ' + line.split('-')[1].split('name:')[1].strip()
			print name
			test_cases.append(TestCase(name, target, time_taken, None))
		elif line.find('failed') != -1:
			time_taken = get_time(line.split('-')[-1][7:])
			name = words[0] + ' - ' + line.split('-')[1].split('name:')[1].strip()
			message = ("-".join(line.split(' - ')[2:-1])).strip()
			tc = TestCase(name, target, time_taken, None)
			tc.add_failure_info(None, message)
			test_cases.append(tc)
	ts = TestSuite("testing this suite", test_cases)
	xmltargetfile.write(TestSuite.to_xml_string([ts]))
	xmltargetfile.close()
Example #46
0
	def report(self, report_dir=None):
		test_cases = []
		if report_dir is None:
			report_dir = self.config.report_dir
		for report_file in os.listdir(report_dir):
			if report_file.endswith(".pkl"):
				f = open(os.path.join(report_dir, report_file), "r")
				result_dict = cPickle.load(f)
				f.close()
				tests = result_dict.keys()
				tests.sort()
				for test in tests:
					in_entry = result_dict[test]
					report_entry = TestCase(test, in_entry["CLASS_NAME"], in_entry["TIME"])
					if in_entry["RESULT"] == "FAIL":
						report_entry.add_failure_info(in_entry["MESSAGE"], in_entry["TRACE"])
					elif in_entry["RESULT"] == "ERROR":
						report_entry.add_error_info(in_entry["MESSAGE"], in_entry["TRACE"])
					test_cases.append(report_entry)
		ts = TestSuite("my test suite", test_cases)
		f_xml = open(os.path.join(report_dir, "results.xml"), "w")
		f_xml.write(TestSuite.to_xml_string([ts]))
		f_xml.close()
Example #47
0
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        toolchains = sorted(test_result_ext.keys())
        for toolchain in toolchains:
            targets = sorted(test_result_ext[toolchain].keys())
            for target in targets:
                test_cases = []
                tests = sorted(test_result_ext[toolchain][target].keys())
                for test in tests:
                    test_results = test_result_ext[toolchain][target][test]
                    test_ids = sorted(test_results.keys())
                    for test_no in test_ids:
                        test_result = test_results[test_no]
                        name = test_result['test_description']
                        classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id'])
                        elapsed_sec = test_result['elapsed_time']
                        _stdout = test_result['single_test_output']
                        _stderr = ''
                        # Test case
                        tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                        # Test case extra failure / error info
                        if test_result['single_test_result'] == 'FAIL':
                            message = test_result['single_test_result']
                            tc.add_failure_info(message, _stdout)
                        elif test_result['single_test_result'] != 'OK':
                            message = test_result['single_test_result']
                            tc.add_error_info(message, _stdout)

                        test_cases.append(tc)
                ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
Example #48
0
 def failureSimulation(self,failure_interval, wait_before_start, servers, min_servers, servers_to_fail_simultaneously, kill_method, initial_clean, junit_report):
   """
   Run the failure loop for a given role
   """
   if(servers == ""):
     print "--servers not specified!\n\n"
     return
   
   logging.debug("Failure interval: " + str(failure_interval))
   logging.debug("Wait before start: " + str(wait_before_start)) 
   logging.debug("Server list: " + servers)
   logging.debug("Minimum number of servers: " + str(min_servers))
   logging.debug("Number of servers to fail simultaneously: " + str(servers_to_fail_simultaneously)) 
   logging.debug("Kill method: " + kill_method)
   logging.debug("Initial clean: " + str(initial_clean)) 
   logging.debug("Role name: "+ self.roleName)
   logging.debug("Junit Report: "+ junit_report)
   
   testCases = []
   testNum = 0
   
   cluster = Cluster()
   serverArray = servers.split(",")
   cluster = cluster.getServersByHostname(serverArray)
   
   if min_servers >= cluster.getNumServers():
     raise ValueError("Minimum Number of servers is too high!\nMinimum Servers to stay up: "
                      +str(min_servers)+"\nNumber of "+self.roleName+" servers in cluster: "+str(cluster.getNumServers()))
     exit(-1)
   
   if servers_to_fail_simultaneously > cluster.getNumServers() - min_servers:
     raise ValueError("--servers_to_fail_simultaneously is set too high")
     exit(-1)   
     
   if initial_clean:
     cluster.cleanProcess(self.roleName)
   
   while True:
     start = time()
     logging.debug("Sleeping for "+str(failure_interval)+" seconds")
     sleep(failure_interval)
     
     #pick random servers to kill
     serversToKill = sample(serverArray, servers_to_fail_simultaneously)
     logging.debug("Servers selected to kill: "+ ','.join(serversToKill))
     
     #Stop the running process based on kill_method
     for hostname in serversToKill:
       if kill_method == "restart" :
         logging.debug("Shutting down "+self.roleName + " on " +hostname)
         cluster.shutdownProcessOnHost(self.roleName, hostname)
       elif kill_method == "kill":
         logging.debug("Killing "+self.roleName + " on " +hostname)
         cluster.killProcessOnHost(self.roleName, hostname)
       else:
         if randint(0,1) == 0:
           logging.debug("Shutting down "+self.roleName + " on " +hostname)
           cluster.shutdownProcessOnHost(self.roleName, hostname)
         else:
           logging.debug("Killing "+self.roleName + " on " +hostname)
           cluster.killProcessOnHost(self.roleName, hostname)
     
     #Ensure the process has stopped
     for hostname in serversToKill:
       #Create basis for test case
       tc = TestCase('Test'+str(testNum), self.roleName+'FailureSimulator', time()-start, 
                     'Shutting down '+self.roleName+" with kill_method "+kill_method+" on host "+hostname, '')
       #If the process is still running, then try killing it one more time
       if(cluster.isProcessRunningOnHost(self.roleName, hostname)):
         logging.debug("Killing "+self.roleName + " on " +hostname+" one last time")
         cluster.killProcessOnHost(self.roleName, hostname)
         #If the process is *still* running then report a failure
         if(cluster.isProcessRunningOnHost(self.roleName, hostname)):
           tc.add_failure_info(self.roleName+" process is still running on"+hostname, "")
       testCases.append(tc)
       testNum+=1
       
     #Start the process again
     start = time()
     sleep(wait_before_start)
     for hostname in serversToKill:
       logging.debug("Starting " + self.roleName + " on " + hostname)
       cluster.startProcessOnHost(self.roleName, hostname)
     
     #Ensure the process has started, otherwise report a failure
     for hostname in serversToKill:
       tc = TestCase('Test'+str(testNum), self.roleName+'FailureSimulator', time()-start, 
                     'Starting '+self.roleName+" on host: "+hostname, '')
       if( not cluster.isProcessRunningOnHost(self.roleName, hostname)):
         tc.add_failure_info(self.roleName+" process is still running on"+hostname, "")
       testCases.append(tc)
       testNum+=1
     
     if(not junit_report == "" ):
       logging.debug("Writing junit report to: "+junit_report)
       if not os.path.exists(junit_report):
         os.makedirs(junit_report)
       f = open(junit_report,'w')
       ts = TestSuite(self.roleName+" Test Suite", testCases)
       f.write(TestSuite.to_xml_string([ts]))
       f.close()
Example #49
0
from junit_xml import TestSuite, TestCase

test_cases1 = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
test_cases2 = [TestCase('Test2', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
test_cases3 = [TestCase('Test3', 'some.class.name', 'abc', 'I am stdout!', 'I am stderr!')]

ts1 = TestSuite("my test suite", test_cases1)
ts2 = TestSuite("my test suite", test_cases2)
ts3 = TestSuite("my test suite", test_cases3)
# pretty printing is on by default but can be disabled using prettyprint=False
print(TestSuite.to_xml_string([ts1]))
print(TestSuite.to_xml_string([ts2]))
print(TestSuite.to_xml_string([ts3]))
# you can also write the XML to a file and not pretty print it
#print(TestSuite.to_xml_string([ts]))
with open('output1.xml', 'w') as f1:
    TestSuite.to_file(f1, [ts1], prettyprint=False)
with open('output2.xml', 'w') as f2:
    TestSuite.to_file(f2, [ts2], prettyprint=False)

#with open('output3.xml', 'w') as f3:
#  ##  TestSuite.to_file(f3, [ts3], prettyprint=False)
Example #50
0
from junit_xml import  TestCase, TestSuite

caso_prueba = [TestCase('Prueba1', 'some.class.name', 123.345, 'Soy una salida', 'Soy un error')]
ts = TestSuite("Mi caso de prueba", caso_prueba)
print(TestSuite.to_xml_string([ts]))
    >>> Running case #1: 'manifest'...
    >>> 'manifest': 1 passed, 0 failed

    >>> Running case #2: 'test_manifest_fragment'...

    >>> Running case #3: 'test_firware_fragment'...
    >>> 'test_firware_fragment': 1 passed, 0 failed

    >>> Running case #4: 'test_keytable'...
    dummy output
    >>> 'test_keytable': 0 passed, 1 failed

    >>> Test cases: 4 passed, 0 failed

    """
    print TestSuite.to_xml_string([TestSuite("name", parse_result(result))])

    result = \
    """
    {{__testcase_count;4}}
    >>> Running 4 test cases...

    >>> Running case #1: 'simple_get_hash'...
    {{__testcase_start;simple_get_hash}}

    >>> Running case #2: 'simple_get_date'...
    {{__testcase_start;simple_get_date}}
    {{__testcase_finish;simple_get_date;1;0}}
    >>> 'simple_get_date': 1 passed, 0 failed

    >>> Running case #3: 'simple_get_fragment'...
Example #52
0
  def __init__ (self,
                cmd_list,
                nprocs=1,
                out=sys.stdout,
                log=None,
                verbosity=DEFAULT_VERBOSITY,
                output_junit_xml=False) :
    if (log is None) : log = null_out()
    self.out = multi_out()
    self.log = log
    self.out.register("stdout", out)
    self.out.register("log", log)
    self.verbosity = verbosity
    self.quiet = (verbosity == 0)
    self.results = []

    # Filter cmd list for duplicates.
    self.cmd_list = []
    for cmd in cmd_list :
      if (not cmd in self.cmd_list) :
        self.cmd_list.append(cmd)
      else :
        print >> self.out, "Test %s repeated, skipping"%cmd

    # Set number of processors.
    if (nprocs is Auto) :
      nprocs = cpu_count()
    nprocs = min(nprocs, len(self.cmd_list))

    # Starting summary.
    if (self.verbosity > 0) :
      print >> self.out, "Running %d tests on %s processors:"%(len(self.cmd_list), nprocs)
      for cmd in self.cmd_list:
        print >> self.out, "  %s"%cmd
      print >> self.out, ""

    t_start = time.time()
    if nprocs > 1:
      # Run the tests with multiprocessing pool.
      pool = Pool(processes=nprocs)
      for command in self.cmd_list:
        pool.apply_async(
          run_command,
          [command, verbosity, out],
          callback=self.save_result)
      try:
        pool.close()
      except KeyboardInterrupt:
        print >> self.out, "Caught KeyboardInterrupt, terminating"
        pool.terminate()
      finally:
        pool.join()
    else:
      # Run tests serially.
      for command in self.cmd_list:
        rc = run_command(command, verbosity=verbosity, out=out)
        self.save_result(rc)

    # Print ending summary.
    t_end = time.time()
    print >> self.out, "="*80
    print >> self.out, ""
    print >> self.out, "Tests finished. Elapsed time: %.2fs" %(t_end-t_start)
    print >> self.out, ""
    extra_stderr = 0
    test_cases = []
    # Process results for errors and warnings.
    extra_stderr = len([result for result in self.results if result.stderr_lines])
    longjobs = [result for result in self.results if result.wall_time > MAX_TIME]
    warnings = [result for result in self.results if self.check_alert(result) == 1]
    failures = [result for result in self.results if self.check_alert(result) == 2]
    self.finished = len(self.results)
    self.failure = len(failures)
    self.warning = len(warnings)

    # Output JUnit XML
    if output_junit_xml:
      from junit_xml import TestSuite, TestCase
      for result in self.results:
        tc = TestCase(name=result.command,
                      classname=result.command,
                      elapsed_sec=result.wall_time,
                      stdout='\n'.join(result.stdout_lines),
                      stderr='\n'.join(result.stderr_lines))
        if result.return_code != 0:
          tc.add_failure_info(message='exit code %d' %result.return_code)
        #if len(result.stderr_lines):
          #tc.add_error_info(output='\n'.join(result.stderr_lines))
        test_cases.append(tc)
      ts = TestSuite("libtbx.run_tests_parallel", test_cases=test_cases)
      with open('output.xml', 'wb') as f:
        print >> f, TestSuite.to_xml_string([ts], prettyprint=True)

    # Run time distribution.
    if (libtbx.env.has_module("scitbx")) :
      from scitbx.array_family import flex
      print >> self.out, "Distribution of test runtimes:"
      hist = flex.histogram(flex.double([result.wall_time for result in self.results]), n_slots=10)
      hist.show(f=self.out, prefix="  ", format_cutoffs="%.1fs")
      print >> self.out, ""

    # Long job warning.
    if longjobs:
      print >> self.out, ""
      print >> self.out, "Warning: the following jobs took at least %d seconds:"%MAX_TIME
      for result in sorted(longjobs, key=lambda result:result.wall_time):
        print >> self.out, "  %s: %.1fs"%(result.command, result.wall_time)
      print >> self.out, "Please try to reduce overall runtime - consider splitting up these tests."

    # Failures.
    if failures:
      print >> self.out, ""
      print >> self.out, "Error: the following jobs returned non-zero exit codes or suspicious stderr output:"
      print >> self.out, ""
      for result in warnings:
        self.display_result(result, alert=1, out=self.out, log_return=self.out, log_stderr=self.out)
      for result in failures:
        self.display_result(result, alert=2, out=self.out, log_return=self.out, log_stderr=self.out)
      print >> self.out, ""
      print >> self.out, "Please verify these tests manually."
      print >> self.out, ""

    # Summary
    print >> self.out, "Summary:"
    print >> self.out, "  Tests run                    :",self.finished
    print >> self.out, "  Failures                     :",self.failure
    print >> self.out, "  Warnings (possible failures) :",self.warning
    print >> self.out, "  Stderr output (discouraged)  :",extra_stderr
    if (self.finished != len(self.cmd_list)) :
      print >> self.out, "*" * 80
      print >> self.out, "  WARNING: NOT ALL TESTS FINISHED!"
      print >> self.out, "*" * 80
Example #53
0
def exporter_testcase_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @param test_result_ext Extended report from Greentea
    @param test_spec Dictionary of test build names to test suite properties
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    test_suites = []

    for target_name in test_result_ext:
        test_results = test_result_ext[target_name]
        for test_suite_name in test_results:
            test = test_results[test_suite_name]

            # tc_elapsed_sec = test['elapsed_time']
            tc_stdout = str() #test['single_test_output']

            try:
                tc_stdout = test['single_test_output'].decode('unicode_escape').encode('ascii','ignore')
            except UnicodeDecodeError as e:
                err_mgs = "(UnicodeDecodeError) exporter_testcase_junit:", str(e)
                tc_stdout = err_mgs
                print err_mgs

            # testcase_result stores info about test case results
            testcase_result = test['testcase_result']
            #   "testcase_result": {
            #       "STRINGS004": {
            #           "duration": 0.009999990463256836,
            #           "time_start": 1453073018.275,
            #           "time_end": 1453073018.285,
            #           "result": 1
            #       },

            test_cases = []

            for tc_name in sorted(testcase_result.keys()):
                duration = testcase_result[tc_name].get('duration', 0.0)
                utest_log = testcase_result[tc_name].get('utest_log', '')
                result_text = testcase_result[tc_name].get('result_text', "UNDEF")

                try:
                    tc_stderr = '\n'.join(utest_log).decode('unicode_escape').encode('ascii','ignore')
                except UnicodeDecodeError as e:
                    err_mgs = "(UnicodeDecodeError) exporter_testcase_junit:" + str(e)
                    tc_stderr = err_mgs
                    print err_mgs

                tc_class = target_name + '.' + test_suite_name

                if result_text == 'SKIPPED':
                    # Skipped test cases do not have logs and we do not want to put
                    # whole log inside JUNIT for skipped test case
                    tc_stderr = str()

                tc = TestCase(tc_name, tc_class, duration, tc_stdout, tc_stderr)

                if result_text == 'FAIL':
                    tc.add_failure_info(result_text)
                elif result_text == 'SKIPPED':
                    tc.add_skipped_info(result_text)
                elif result_text != 'OK':
                    tc.add_error_info(result_text)

                test_cases.append(tc)

            ts_name = target_name
            test_build_properties = test_suite_properties[target_name] if target_name in test_suite_properties else None
            ts = TestSuite(ts_name, test_cases, properties=test_build_properties)
            test_suites.append(ts)

    return TestSuite.to_xml_string(test_suites)
Example #54
0
def get_junit_xml_string(n_passes=1, n_fails=1, n_skips=1, n_errors=1):
    cases = _gen_cases(n_passes, n_fails, n_skips, n_errors)
    suite = TestSuite("fake-junit-xml-suite", cases)
    return TestSuite.to_xml_string([suite])
 def tests_finished(self):
     print(TestSuite.to_xml_string(self.test_suites))
Example #56
0
  def __init__ (self,
                cmd_list,
                nprocs=1,
                out=sys.stdout,
                log=None,
                quiet=False,
                output_junit_xml=False) :
    if (log is None) : log = null_out()
    self.out = multi_out()
    self.log = log
    self.out.register("stdout", out)
    self.out.register("log", log)
    self.quiet = quiet
    self.cmd_list = []
    for cmd in cmd_list :
      if (not cmd in self.cmd_list) :
        self.cmd_list.append(cmd)
      else :
        print >> self.out, "  test %s repeated, skipping" % cmd
    nprocs = min(nprocs, len(self.cmd_list))
    print >> self.out, "\n  Starting command list"
    print >> self.out, "    NProcs :",nprocs
    print >> self.out, "    Cmds   :",len(self.cmd_list)
    t_start = time.time()
    if nprocs>1:
      pool = Pool(processes=nprocs)
    self.results = []
    for command in self.cmd_list:
      if nprocs>1:
        pool.apply_async(
          run_command,
          [command, (not quiet), out],
          callback=self.save_result)
      else:
        rc = run_command(command, verbose=(not quiet), out=out)
        self.save_result(rc)
    if nprocs>1:
      try :
        try :
          pool.close()
        except KeyboardInterrupt :
          print >> self.out, "Caught KeyboardInterrupt, terminating"
          pool.terminate()
      finally :
        pool.join()
      print >> self.out, '\nProcesses have joined : %d\n' % len(self.results)
    t_end = time.time()
    print >> self.out, ""
    print >> self.out, "Elapsed time: %.2fs" %(t_end-t_start)
    print >> self.out, ""
    finished = 0
    warning = 0
    extra_stderr = 0
    failure = 0
    failures = []
    long_jobs = []
    long_runtimes = []
    runtimes = []
    if output_junit_xml:
      from junit_xml import TestSuite, TestCase
      test_cases = []
    for result in self.results :
      finished += 1
      runtimes.append(result.wall_time)
      if (result.return_code != 0) :
        failure += 1
        failures.append(result)
      else :
        if (len(result.error_lines) != 0) :
          warning += 1
          failures.append(result)
        if (len(result.stderr_lines) != 0):
          extra_stderr += 1
      if (result.wall_time > max_time) :
        long_jobs.append(result.command)
        long_runtimes.append(result.wall_time)
      if output_junit_xml:
        tc = TestCase(name=result.command,
                      classname=result.command,
                      elapsed_sec=result.wall_time,
                      stdout='\n'.join(result.stdout_lines),
                      stderr='\n'.join(result.stderr_lines))
        if result.return_code != 0:
          tc.add_failure_info(message='exit code %d' %result.return_code)
        #if len(result.stderr_lines):
          #tc.add_error_info(output='\n'.join(result.stderr_lines))
        test_cases.append(tc)

    if output_junit_xml:
      ts = TestSuite("libtbx.run_tests_parallel", test_cases=test_cases)
      with open('output.xml', 'wb') as f:
        print >> f, TestSuite.to_xml_string([ts], prettyprint=True)

    if (libtbx.env.has_module("scitbx")) :
      from scitbx.array_family import flex
      print >> self.out, "Distribution of test runtimes:"
      hist = flex.histogram(flex.double(runtimes), n_slots=20)
      hist.show(f=self.out, prefix="  ", format_cutoffs="%.1fs")
      print >> self.out, ""
    if (len(long_jobs) > 0) :
      print >> self.out, ""
      print >> self.out, "WARNING: the following jobs took at least %d seconds:" % \
        max_time
      jobs_and_timings = list(zip(long_jobs, long_runtimes))
      jobs_and_timings.sort(lambda x,y: cmp(x[1], y[1]))
      for cmd, runtime in jobs_and_timings :
        print >> self.out, "  " + cmd + " : %.1fs" % runtime
      print >> self.out, "Please try to reduce overall runtime - consider splitting up these tests."
    if (len(failures) > 0) :
      print >> self.out, ""
      print >> self.out, "ERROR: the following jobs returned non-zero exit codes or suspicious stderr output:"
      for result in failures :
        print >> self.out, ""
        print >> self.out, result.command + "(exit code %d):" % result.return_code
        for line in result.stderr_lines :
          print >> self.out, "  " + line
        for line in result.error_lines :
          print >> self.out, "  " + line
        print >> self.out, ""
      print >> self.out, "Please verify these tests manually."
      print >> self.out, ""
    print >> self.out, "Summary:"
    print >> self.out, "  Tests run                    :",finished
    print >> self.out, "  Failures                     :",failure
    print >> self.out, "  Warnings (possible failures) :",warning
    print >> self.out, "  Stderr output (discouraged)  :",extra_stderr
    if (finished != len(self.cmd_list)) :
      print >> self.out, "*" * 80
      print >> self.out, "  WARNING: NOT ALL TESTS FINISHED!"
      print >> self.out, "*" * 80
Example #57
0
def exporter_testcase_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @param test_result_ext Extended report from Greentea
    @param test_suite_properties Data from yotta module.json file
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    ym_name = test_suite_properties.get('name', 'unknown')

    test_suites = []

    for target_name in test_result_ext:
        test_results = test_result_ext[target_name]
        for test_suite_name in test_results:
            test = test_results[test_suite_name]

            # tc_elapsed_sec = test['elapsed_time']
            tc_stdout = ''  #test['single_test_output']
            try:
                tc_stderr = test['single_test_output'].decode('unicode_escape').encode('ascii','ignore')
            except UnicodeDecodeError as e:
                print "exporter_testcase_junit:", str(e)

            # testcase_result stores info about test case results
            testcase_result = test['testcase_result']
            #   "testcase_result": {
            #       "STRINGS004": {
            #           "duration": 0.009999990463256836,
            #           "time_start": 1453073018.275,
            #           "time_end": 1453073018.285,
            #           "result": 1
            #       },

            test_cases = []

            for tc_name in sorted(testcase_result.keys()):
                duration = testcase_result[tc_name].get('duration', 0.0)
                # result = testcase_result[tc_name].get('result', 0)
                # passed = testcase_result[tc_name].get('passed', 0)
                # failed = testcase_result[tc_name].get('failed', 0)
                utest_log = testcase_result[tc_name].get('utest_log', '')
                result_text = testcase_result[tc_name].get('result_text', "UNDEF")

                try:
                    tc_stdout = '\n'.join(utest_log).decode('unicode_escape').encode('ascii','ignore')
                except UnicodeDecodeError as e:
                    print "exporter_testcase_junit:", str(e)

                tc_class = ym_name + '.' + target_name + '.' + test_suite_name
                tc = TestCase(tc_name, tc_class, duration, tc_stdout, tc_stderr)

                message = ''
                if result_text == 'FAIL':
                    tc.add_failure_info(message, tc_stdout)
                elif result_text != 'OK':
                    tc.add_error_info(message, tc_stdout)

                test_cases.append(tc)

            ts_name = ym_name + '.' + target_name
            ts = TestSuite(ts_name, test_cases)
            test_suites.append(ts)

    return TestSuite.to_xml_string(test_suites)
Example #58
0
        test_name = os.path.splitext(test_file)[0]
        expected_output = test_directory + os.sep + test_name + ".spec"

        if not os.path.isfile(expected_output):
            raise Exception("Could not find expected_output file for test %r" % test_name)

        # Actually run the test
        code, sout, serr = run_with_timeout([executable, test_directory + os.sep + test_file], os.getcwd(), timeout=10)

        if code != 0:
            failed = True
            log_errord(test_name, serr, sout)
        else:
            with open(expected_output, "r") as f:
                expected = f.read()

            if sout != expected:
                failed = True
                log_failed(test_name, sout, expected)
            else:
                log_passed(test_name)

    if xml_out:
        test_suite = TestSuite("Functional Tests", test_cases)
        print TestSuite.to_xml_string([test_suite])

    if failed and not xml_out:
        sys.exit(1)
    else:
        sys.exit(0)