コード例 #1
0
def main():
    parser = argparse.ArgumentParser(description='dummy test')
    parser.add_argument('-classes', type=int, default=5, help='number of classes')
    parser.add_argument('-testcases', type=int, default=10, help='number of testcases')
    parser.add_argument('-pass_rate', type=int, default=75, help='pass rate')
    parser.add_argument('-error_rate', type=int, default=20, help='error rate')
    parser.add_argument('-failure_rate', type=int, default=10, help='failure rate')
    parser.add_argument('-skip_rate', type=int, default=10, help='skip rate')
    parser.add_argument('-outputfile', type=str, default='test_results.xml', help='output file')
    parser.add_argument('-print', action='store_true', help='print the test results')
    args = parser.parse_args()

    ts = TestSuite(name='my test suite', hostname=platform.node(), timestamp=datetime.now())
    for i in range(args.classes):
        for j in range(args.testcases):
            tc = TestCase(classname=f"myclass{i}",
                          name=f"mytest{j}",
                          elapsed_sec=random.randint(100, 1000),
                          stdout = "stdout output",
                          stderr = "stderr output")
            if random.randint(0, 100) < args.pass_rate:
                if random.randint(0, 100) < args.error_rate:
                    tc.add_error_info(message=f"error {i} {j}", output="error output message", error_type="ERR1")
                elif random.randint(0, 100) < args.failure_rate:
                    tc.add_failure_info(message=f"failure {i} {j}", output="failure output message", failure_type="FAIL1")
                elif random.randint(0, 100) < args.skip_rate:
                    tc.add_skipped_info(message=f"skipped {i} {j}", output="skipped output message")
            ts.test_cases.append(tc)

    # pretty printing is on by default but can be disabled using prettyprint=False
    if args.print:
        print(TestSuite.to_xml_string([ts]))

    with open(args.outputfile, 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)
コード例 #2
0
    def _collect_test_suite(self, scenario_result):
        if isinstance(scenario_result, GeneralError):
            test_case = TestCase("", "")
            test_case.add_error_info(scenario_result.message)
            test_suite = TestSuite("", "")
            test_suite.test_cases.append(test_case)
            return test_suite

        test_suite = TestSuite(scenario_result.name)
        for test_result in scenario_result.test_results:
            test_case = TestCase(test_result.name, test_result.name)
            for result in test_result.results:
                if isinstance(result, Failed):
                    test_case.add_failure_info("ASSERTION {} failed".format(result.pretty_name),
                                               "EXPECTED {}\nGOT {}".format(result.expected,
                                                                            result.actual))
                elif isinstance(result, (Error, ConnectionError)):
                    test_case.add_error_info("ASSERTION {} failed".format(result.pretty_name),
                                             "ERROR {}".format(result.error))
                elif isinstance(result, Passed):
                    pass
                # TODO: What to do below?
                else:
                    raise Exception("Unknown state")
            test_suite.test_cases.append(test_case)
        return test_suite
コード例 #3
0
ファイル: test_exporters.py プロジェクト: yugo-ren/mbed
    def exporter_junit_ioper(self,
                             test_result_ext,
                             test_suite_properties=None):
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = 'test.ioper.%s.%s.%s' % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ''
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == 'FAIL':
                    tc.add_failure_info(description, _stdout)
                elif result == 'ERROR':
                    tc.add_error_info(description, _stdout)
                elif result == 'SKIP':
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #4
0
def generate_junit_xml(inputfile):
    target = None
    suite = None
    infos = []
    errors = []
    testcases = []

    for line in inputfile:
        tag = line[0:3]
        props = line[3:].split(':')
        if tag == "[!]":
            if len(props) == 2:
                if props[0].strip().lower() == "target":
                    target = os.path.basename(props[1].strip())
                elif props[0].strip().lower() == "group":
                    suite = props[1].strip()
                else:
                    infos.append(line)
            else:
                infos.append(line)
        if tag == "[x]":
            errors.append(line)
        if tag == "[+]":
            testcases.append(TestCase(name=props[0].strip(), classname=target, stdout=line))
        if tag == "[-]":
            tc = TestCase(name=props[0].strip(), classname=target)
            tc.add_failure_info(message=props[1].strip(), output=line, failure_type="failed")
            testcases.append(tc)

    ts = TestSuite(name=suite, test_cases=testcases, stdout="\n".join(infos), stderr="\n".join(errors))
    return TestSuite.to_xml_string([ts])
コード例 #5
0
def test_init_illegal_unicode_char():
    tc = Case("Failure-Message")
    tc.add_failure_info(u("failure message with illegal unicode char: [\x02]"))
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(
        tcs[0], {"name": "Failure-Message"},
        failure_message=u("failure message with illegal unicode char: []"))
コード例 #6
0
def write_junitxml(output_junitxml, results):
    """Write output file as JUnitXML format"""
    if not JUNIT_XML_FOUND:
        log = logging.getLogger(__name__ + ".write_junitxml")
        log.warning('junitxml output disabled: the `junit_xml` python module '
                    'is missing.')
        return
    test_cases = []
    duration_re = re.compile('([0-9]+):([0-9]+):([0-9]+).([0-9]+)')
    for vitem in results:
        if vitem.get('Validations'):
            parsed_duration = 0
            test_duration = vitem.get('Duration', '')
            matched_duration = duration_re.match(test_duration)
            if matched_duration:
                parsed_duration = (int(matched_duration[1])*3600
                                   + int(matched_duration[2])*60
                                   + int(matched_duration[3])
                                   + float('0.{}'.format(matched_duration[4])))

            test_stdout = vitem.get('Status_by_Host', '')

            test_case = TestCase('validations', vitem['Validations'],
                                 parsed_duration, test_stdout)
            if vitem['Status'] == 'FAILED':
                test_case.add_failure_info('FAILED')
            test_cases.append(test_case)

    ts = TestSuite("Validations", test_cases)
    with open(output_junitxml, 'w') as output:
        output.write(to_xml_report_string([ts]))
コード例 #7
0
ファイル: mbed_report_api.py プロジェクト: c1728p9/greentea
def exporter_junit(test_result_ext, test_suite_properties=None):
    """! Export test results in JUnit XML compliant format
    @details This function will import junit_xml library to perform report conversion
    @return String containing Junit XML formatted test result output
    """
    from junit_xml import TestSuite, TestCase

    test_suites = []
    test_cases = []

    targets = sorted(test_result_ext.keys())
    for target in targets:
        test_cases = []
        tests = sorted(test_result_ext[target].keys())
        for test in tests:
            test_results = test_result_ext[target][test]
            classname = 'test.%s.%s' % (target, test)
            elapsed_sec = test_results['elapsed_time']
            _stdout = test_results['single_test_output']
            _stderr = ''
            # Test case
            tc = TestCase(test, classname, elapsed_sec, _stdout, _stderr)
            # Test case extra failure / error info
            if test_results['single_test_result'] == 'FAIL':
                message = test_results['single_test_result']
                tc.add_failure_info(message, _stdout)
            elif test_results['single_test_result'] != 'OK':
                message = test_results['single_test_result']
                tc.add_error_info(message, _stdout)

            test_cases.append(tc)
        ts = TestSuite("test.suite.%s" % target, test_cases)
        test_suites.append(ts)
    return TestSuite.to_xml_string(test_suites)
コード例 #8
0
ファイル: nautest.py プロジェクト: samgiles/naulang
 def log_failed(test_name, actual, expected):
     if xml_out:
         tc = TestCase(test_name, executable, 0, actual, "")
         tc.add_failure_info(message="Test Failed\nExpected:\n%s\nActual:\n%s" % (expected, actual))
         test_cases.append(tc)
     else:
         print "Test %s failed:\nexpected:\n%s\nactual:\n%s\n---------\n" % (test_name, expected, actual)
コード例 #9
0
def run_test(instance, manifest, action):

    start = datetime.now()
    result = {}
    if 'launch' == action.name:
        pass
    else:
        instance.run_workflow(action.name, parameters=action.parameters)
        sleep(10) # Give some time to update status

    run_ok = instance.ready(timeout=120000)
    delta = duration(start)
    test = TestCase(action.name, manifest.name, delta, 'Workflow launched:' + manifest.source, '')

    if not run_ok:
        test.add_failure_info('Action ' + action.name + ' reached timeout before got "Running" state')
        test_cases.append(test)
        return False
    test_cases.append(test)

    log("Comparing results for action: %s" % action.name,'debug')
    for key in action.expected.keys():
        try:
            result = compare(action.expected[key], instance.returnValues[key])
        except KeyError:
            result['status'] = False
            result['details'] = "No key '%s' in returnValues" % key
        test = TestCase(action.name+'.'+key, manifest.name, 0, result['details'], '')
        if not result['status']:
            test.add_failure_info(key + ": " + result['details'])
        test_cases.append(test)
    return True
コード例 #10
0
 def handle_event(self, context: ExecutionContext,
                  event: events.ExecutionEvent) -> None:
     if isinstance(event, events.Initialized):
         self.start_time = event.start_time
     if isinstance(event, events.AfterExecution):
         test_case = TestCase(
             f"{event.result.method} {event.result.path}",
             elapsed_sec=event.elapsed_time,
             allow_multiple_subelements=True,
         )
         if event.status == Status.failure:
             checks = deduplicate_failures(event.result.checks)
             for idx, check in enumerate(checks, 1):
                 # `check.message` is always not empty for events with `failure` status
                 test_case.add_failure_info(
                     message=f"{idx}. {check.message}")
         if event.status == Status.error:
             test_case.add_error_info(
                 message=event.result.errors[-1].exception,
                 output=event.result.errors[-1].exception_with_traceback)
         self.test_cases.append(test_case)
     if isinstance(event, events.Finished):
         test_suites = [
             TestSuite("schemathesis",
                       test_cases=self.test_cases,
                       hostname=platform.node())
         ]
         to_xml_report_file(file_descriptor=self.file_handle,
                            test_suites=test_suites,
                            prettyprint=True)
コード例 #11
0
ファイル: report.py プロジェクト: tronxd/checkov
    def get_test_suites(self, use_bc_ids=False) -> List[TestSuite]:
        test_cases = defaultdict(list)
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = f"{record.get_output_id(use_bc_ids)}/{record.check_name}"

            test_name = f"{self.check_type} {check_name} {record.resource}"
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result["result"] == CheckResult.FAILED:
                if record.file_path and record.file_line_range:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name} - {record.file_path}:{record.file_line_range} - Guideline: {record.guideline}"
                    )
                else:
                    test_case.add_failure_info(
                        f"Resource {record.resource} failed in check {check_name}"
                    )
            if record.check_result["result"] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    f'Resource {record.resource} skipped in check {check_name} \n Suppress comment: {record.check_result["suppress_comment"]} - Guideline: {record.guideline}'
                )

            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(
                    name=key,
                    test_cases=test_cases[key],
                    package=test_cases[key][0].classname,
                ))
        return test_suites
コード例 #12
0
 def Reg_Create_Snow_order(self):
     test_case_result = TestCase(name="Reg_Create_Snow_order", classname=self.__class__.__name__)
     global serviceInstanceName
     provider = "snow"
     passed = False
     try:
         for i in range(1, 2):
             print "Placing order: " + str(i + 1)
             epoch_time = str(int(time.time()))
             serviceInstanceName = "testAPISnow" + epoch_time
             print "serviceInstanceName : " + str(serviceInstanceName)
             # To avoid 429 HTTP server error, we have to add a sleep between http requests
             time.sleep(4)
             print 111
             orderNumber, passed = self.api_client.createOrder(orderURL, serviceInstanceName, provider, i + 1)
             print 2222
             print orderNumber
             print 3333
             print passed
             time.sleep(4)
             if passed:
                 print "Approving order: " + str(orderNumber)
                 print 444
                 passed = self.api_client.approveOrder(orderNumber)
                 print 555
             if not passed:
                 responseBody = "Failure to create order"
                 print "Softlayer order creation failed. Approve will be skipped"
                 test_case_result.add_failure_info("Input " + str(i + 1) + " failed", responseBody)
                 break
     except:
         print "An Error Occured"
         passed=False
     status['APITest'] = passed
     return passed, test_case_result
コード例 #13
0
ファイル: runner.py プロジェクト: BuzzBurrowes/Arduino
 def run_tests(self):
     test_cases = []
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             t_start = time.time()
             result = self.run_test(index)
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = test_output.getvalue()
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed', output=test_output.getvalue())
             test_output.close()
         test_cases += [test_case];
     return TestSuite(self.name, test_cases)
コード例 #14
0
ファイル: nmos-test.py プロジェクト: thosil/nmos-testing
def write_test_results(results, args):
    exit_code = ExitCodes.OK
    test_cases = []
    for test_result in results["result"]:
        test_case = TestCase(test_result.name,
                             elapsed_sec=test_result.elapsed_time,
                             timestamp=test_result.timestamp)
        if test_result.name in args.ignore or test_result.state in [
                TestStates.DISABLED, TestStates.UNCLEAR, TestStates.MANUAL,
                TestStates.NA, TestStates.OPTIONAL
        ]:
            test_case.add_skipped_info(test_result.detail)
        elif test_result.state in [TestStates.WARNING, TestStates.FAIL]:
            test_case.add_failure_info(test_result.detail,
                                       failure_type=str(test_result.state))
            if test_result.state == TestStates.FAIL:
                exit_code = max(exit_code, ExitCodes.FAIL)
            elif test_result.state == TestStates.WARNING:
                exit_code = max(exit_code, ExitCodes.WARNING)
        elif test_result.state != TestStates.PASS:
            test_case.add_error_info(test_result.detail,
                                     error_type=str(test_result.state))
        test_cases.append(test_case)

    ts = TestSuite(results["name"] + ": " + results["base_url"], test_cases)
    with open(args.output, "w") as f:
        TestSuite.to_file(f, [ts], prettyprint=False)
        print(" * Test results written to file: {}".format(args.output))
    return exit_code
コード例 #15
0
ファイル: test_exporters.py プロジェクト: jaustin/mbed
    def exporter_junit_ioper(self, test_result_ext, test_suite_properties=None):
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        for platform in sorted(test_result_ext.keys()):
            # {platform : ['Platform', 'Result', 'Scope', 'Description'])
            test_cases = []
            for tr_result in test_result_ext[platform]:
                result, name, scope, description = tr_result

                classname = "test.ioper.%s.%s.%s" % (platform, name, scope)
                elapsed_sec = 0
                _stdout = description
                _stderr = ""
                # Test case
                tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                # Test case extra failure / error info
                if result == "FAIL":
                    tc.add_failure_info(description, _stdout)
                elif result == "ERROR":
                    tc.add_error_info(description, _stdout)
                elif result == "SKIP" or result == "NOT_SUPPORTED":
                    tc.add_skipped_info(description, _stdout)

                test_cases.append(tc)
            ts = TestSuite("test.suite.ioper.%s" % (platform), test_cases)
            test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #16
0
ファイル: helper.py プロジェクト: viennt01/backend
def junit_report(results):
    test_cases = []
    for result in results:
        if result["result"] != 'Failed' and result["data"] != 'Failed':
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: {}, data: {}".format(result["result"],
                                                     result["data"]))
        else:
            tc = TestCase(
                name="browser: {}, version: {}".format(result["browser"],
                                                       result["version"]),
                classname='TestCaseNum {}'.format(result["testCaseNum"]),
                elapsed_sec=round(result["elapsedTime"], 1),
                stdout="result: Failed, data: Failed")
            tc.add_failure_info("Failed")
        test_cases.append(tc)
    ts = TestSuite("autoupdate", test_cases)
    with open(
            os.path.abspath(os.path.dirname(__file__)) + '/junit_output.xml',
            'w') as f:
        TestSuite.to_file(f, [ts])
コード例 #17
0
def generate_junit_report_from_cfn_nag(report):

    total_failures = 0
    """Generate Test Case from cfn_nag report"""

    test_cases = []

    for file_findings in report:
        for violation in file_findings["file_results"]['violations']:
            total_failures += 1
            for i, resource_id in enumerate(violation['logical_resource_ids']):

                test_case = TestCase("%s - %s" %
                                     (violation['id'], violation['message']),
                                     classname=resource_id)

                test_case.add_failure_info(
                    output="%s#L%s" %
                    (file_findings['filename'], violation['line_numbers'][i]))

                test_cases.append(test_case)

    test_suite = TestSuite("cfn-nag test suite", test_cases)

    if total_failures > 0:
        f = open("CFN_NAG_FAILURE", "a")
        f.close()

    return TestSuite.to_xml_string([test_suite], prettyprint=False)
コード例 #18
0
ファイル: scalebench.py プロジェクト: summergirl21/Barrelfish
 def write_testcase(self, build, machine, test, path, passed, start_ts,
                    end_ts):
     delta = end_ts - start_ts
     tc = {
         'name': test.name,
         'class': machine.getName(),
         'time_elapsed': delta.total_seconds(),
         'stdout': '\n'.join(self._harness.process_output(test, path)),
         'stderr': "",
         'passed': passed
     }
     if have_junit_xml:
         ju_tc = TestCase(
             tc['name'],
             tc['class'],
             tc['time_elapsed'],
             tc['stdout'],
         )
         if not passed:
             errors = self._harness.extract_errors(test, path)
             errorstr = 'Failed'
             if errors is not None and len(errors) > 0:
                 errorstr += ': ' + ''.join(
                     [unicode(l, errors='replace') for l in errors])
             ju_tc.add_failure_info(message=errorstr)
         return ju_tc
     else:
         return tc
コード例 #19
0
ファイル: test_data.py プロジェクト: pombredanne/ci-tools
    def simple_report(self):
        """empty test report"""
        test_case_1 = TestCase("testcase1", elapsed_sec=1.5)

        test_case_2 = TestCase("testcase2", elapsed_sec=0.5)
        test_case_2.add_skipped_info("was skipped")

        test_case_3 = TestCase("testcase3", elapsed_sec=1.0)
        test_case_3.add_failure_info("failed")

        test_case_4 = TestCase("testcase4", elapsed_sec=0.25)
        test_case_4.add_error_info("errored")

        test_case_5 = TestCase("testcase5", elapsed_sec=0.1)

        test_cases = [
            test_case_1,
            test_case_2,
            test_case_3,
            test_case_4,
            test_case_5
        ]
        test_suites = [
            TestSuite('testsuite1', test_cases, timestamp=datetime.datetime.utcnow())
            ]
        return TestReport(NAME, {"module": test_suites}, BUILD_NUMBER, True)
コード例 #20
0
 def test_init_failure_output(self):
     tc = TestCase('Failure-Output')
     tc.add_failure_info(output="I failed!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Failure-Output'},
                      failure_output="I failed!")
コード例 #21
0
 def test_init_failure(self):
     tc = TestCase('Failure-Message-and-Output')
     tc.add_failure_info("failure message", "I failed!")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(
         self, tcs[0], {'name': 'Failure-Message-and-Output'},
         failure_message="failure message", failure_output="I failed!")
コード例 #22
0
 def test_multiple_failures(self):
     """Tests multiple failures in one test case"""
     tc = TestCase('Multiple failures', allow_multiple_subelements=True)
     tc.add_failure_info("First failure", "First failure message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple failures'},
                      failures=[{
                          "message": "First failure",
                          "output": "First failure message",
                          "type": "failure"
                      }])
     tc.add_failure_info("Second failure", "Second failure message")
     (_, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Multiple failures'},
                      failures=[{
                          "message": "First failure",
                          "output": "First failure message",
                          "type": "failure"
                      }, {
                          "message": "Second failure",
                          "output": "Second failure message",
                          "type": "failure"
                      }])
コード例 #23
0
ファイル: output.py プロジェクト: zuBux/drydock
 def write_xml_file(self):
     test_cases = []
     if os.path.isfile(self.output):
         logging.warn("File exists,deleting...")
         os.remove(self.output)
     with open(self.output, 'a') as f:
         for _, elements in self.log.items():
             for j in elements.viewitems():
                 if j[0] == 'date' or j[0] == 'profile' or j[0] == 'score':
                     # we really don't care
                     pass
                 else:
                     try:
                         test_case = TestCase(j[0], j[1]['descr'], '', '',
                                              '')
                         if j[1]['status'] == 'Fail':
                             test_case.add_failure_info(j[1]['output'])
                         else:
                             test_case = TestCase(j[0], '', '', '', '')
                         test_cases.append(test_case)
                     except KeyError:
                         # the world's smallest violin playin' for KeyError
                         pass
         ts = [TestSuite("Docker Security Benchmarks", test_cases)]
         TestSuite.to_file(f, ts)
コード例 #24
0
ファイル: cli_utils.py プロジェクト: shreegowtham27/DrHeader
def file_junit_report(rules, report):
    """
    Output file Junit xml report

    :param rules: set of rules to verify
    :param report: report generated by drheader
    :return: None
    """

    test_cases = []

    for header in rules:
        tc = []
        for item in report:
            if item.get('rule') == header:
                violation = item.copy()
                violation.pop('rule')
                message = violation.pop('message')
                tc = TestCase(name=header + ' :: ' + message)
                tc.add_failure_info(message, violation)
                test_cases.append(tc)
        if not tc:
            tc = TestCase(name=header)
            test_cases.append(tc)

    os.makedirs('reports', exist_ok=True)
    with open('reports/junit.xml', 'w') as f:
        TestSuite.to_file(f,
                          [TestSuite(name='DrHeader', test_cases=test_cases)],
                          prettyprint=False)
        f.close()
コード例 #25
0
ファイル: Runner.py プロジェクト: Light07/PythonAutomation
 def generateJUnitReport(self, lstRunResult, runResultDir):
     #create junit xml report file use junit-xml 1.4   pip install junit-xml
     resultFileName = runResultDir + os.path.sep + 'RunResult.xml'
     previousCaseModuleName = ''
     rowIndex = 0
     lstTestSuites = []
     testSuite = []
     for runResult in lstRunResult:
         #runResult (sheetName, moduleName, testCaseID, runResult, timeElapsedSec, failureMessage)
         #test
         testCaseName = runResult[2]
         className = runResult[1] + '.' + runResult[2]
         timeElapsedSec = runResult[4]
         failureMessage = runResult[5]
         testCase = TestCase(testCaseName, className, timeElapsedSec)
         testCase.add_failure_info(None, failureMessage)
         currTestCaseModuleName = runResult[1]
         if not currTestCaseModuleName == previousCaseModuleName:
             testSuite = TestSuite(currTestCaseModuleName)
             lstTestSuites.append(testSuite)
         testSuite.test_cases.append(testCase)
     #print TestSuite.to_xml_string(lstTestSuites)
     #Write the xml content to result file
     with open(runResultDir + os.path.sep + 'Result.xml', 'w') as f:
         TestSuite.to_file(f, lstTestSuites)
コード例 #26
0
ファイル: junitxml_utils.py プロジェクト: sam-falvo/tetra
def _gen_cases(n_passes, n_fails, n_skips, n_errors):
    result = []
    for i in range(n_passes):
        case = TestCase(name='TestPassed%s' % i,
                        classname='generated.xml.test.case.passes',
                        elapsed_sec=rand_duration())
        result.append(case)

    for i in range(n_skips):
        case = TestCase(name='TestSkipped%s' % i,
                        classname='generated.xml.test.case.skips',
                        elapsed_sec=rand_duration())
        case.add_skipped_info(message=rand_string('skipped!'))
        result.append(case)

    for i in range(n_fails):
        case = TestCase(name='TestFailed%s' % i,
                        classname='generated.xml.test.case.fails',
                        elapsed_sec=rand_duration())
        case.add_failure_info(message=rand_string('failure!'))
        result.append(case)

    for i in range(n_errors):
        case = TestCase(name='TestErrored%s' % i,
                        classname='generated.xml.test.case.errors',
                        elapsed_sec=rand_duration())
        case.add_error_info(message=rand_string('error!'))
        result.append(case)

    return result
コード例 #27
0
ファイル: report.py プロジェクト: shaneutt/checkov
    def get_test_suites(self):
        test_cases = {}
        test_suites = []
        records = self.passed_checks + self.failed_checks + self.skipped_checks
        for record in records:
            check_name = record.check_name
            if check_name not in test_cases:
                test_cases[check_name] = []

            test_name = "{} {} {}".format(self.check_type, check_name,
                                          record.resource)
            test_case = TestCase(name=test_name,
                                 file=record.file_path,
                                 classname=record.check_class)
            if record.check_result['result'] == CheckResult.FAILED:
                test_case.add_failure_info(
                    "Resource \"{}\" failed in check \"{}\"".format(
                        record.resource, check_name))
            if record.check_result['result'] == CheckResult.SKIPPED:
                test_case.add_skipped_info(
                    "Resource \"{}\" skipped in check \"{}\"\n Suppress comment: {}"
                    .format(record.resource, check_name,
                            record.check_result['suppress_comment']))
            test_cases[check_name].append(test_case)
        for key in test_cases.keys():
            test_suites.append(
                TestSuite(name=key,
                          test_cases=test_cases[key],
                          package=test_cases[key][0].classname))
        return test_suites
コード例 #28
0
def test_init_failure_output():
    tc = Case("Failure-Output")
    tc.add_failure_info(output="I failed!")
    #pylint: disable=unused-variable
    ts, tcs = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(tcs[0], {"name": "Failure-Output"},
                     failure_output="I failed!")
コード例 #29
0
def test_multiple_failures():
    """Tests multiple failures in one test case"""
    tc = Case("Multiple failures", allow_multiple_subelements=True)
    tc.add_failure_info("First failure", "First failure message")
    (_, tcs) = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(
        tcs[0],
        {"name": "Multiple failures"},
        failures=[{
            "message": "First failure",
            "output": "First failure message",
            "type": "failure"
        }],
    )
    tc.add_failure_info("Second failure", "Second failure message")
    (_, tcs) = serialize_and_read(Suite("test", [tc]))[0]
    verify_test_case(
        tcs[0],
        {"name": "Multiple failures"},
        failures=[
            {
                "message": "First failure",
                "output": "First failure message",
                "type": "failure"
            },
            {
                "message": "Second failure",
                "output": "Second failure message",
                "type": "failure"
            },
        ],
    )
コード例 #30
0
ファイル: converter.py プロジェクト: yuanxin16/polycube
def create_junit_test_file(tests_file: str):
    tests = []
    for file in tests_file.split("\n"):
        file_log = "test_log_" + "_".join(file.split("_")[2:])
        test = file_log_parser(file_log)
        with open(file) as file_buffer:  # Use file to refer to the file object
            config = file_buffer.read()
            current_section = {}
            lines = config.split()
            date = lines[1]
            is_relaunch = lines[2]
            debug = lines[3]
            for _, test_name, status, _, _, duration in zip(
                    *[iter(lines[6:])] * 6):
                if duration == "false":
                    continue
                else:
                    duration = duration[:-1]
                if test_name in test:
                    tc = TestCase(name=test_name,
                                  elapsed_sec=int(duration),
                                  status=status.split("+")[0],
                                  stdout=test[test_name])
                    if status != "Passed++++":
                        tc.add_failure_info(output=test[test_name])
                else:
                    tc = TestCase(name=test_name,
                                  elapsed_sec=int(duration),
                                  status=status.split("+")[0])
                tests.append(tc)
    return tests
コード例 #31
0
def _write_junit_report(site, config, output_file, total_time):
    pages = site.pages
    test_cases = []

    for results, resource in pages.items():
        origins = [source.origin.geturl() for source in resource.sources]
        if resource.status == 200:
            test_case = TestCase(name=resource.url_split.geturl(),
                                 classname=results.hostname,
                                 elapsed_sec=resource.response_time,
                                 stdout=resource.status,
                                 status="passed")
        else:
            stderr_message = "Link found on:\n{}".format("\n".join(origins))
            test_case = TestCase(name=resource.url_split.geturl(),
                                 classname=results.hostname,
                                 elapsed_sec=resource.response_time,
                                 stderr=stderr_message,
                                 status="failed")
            if resource.exception:
                message = str(resource.exception)
            else:
                message = "Expected 200 OK but got {}".format(resource.status)
            test_case.add_failure_info(message=message,
                                       failure_type="UnexpectedStatusCode")
        test_cases.append(test_case)
    test_suite = TestSuite("pylinkvalidator test suite", test_cases)
    output_file.write(TestSuite.to_xml_string([test_suite]))
    print_summary(site, config, total_time)
コード例 #32
0
    def print_matches(self, matches, rules=None, filenames=None):
        """Output all the matches"""

        if not rules:
            return None

        test_cases = []
        for rule in rules.all_rules:
            if not rules.is_rule_enabled(rule):
                if not rule.id:
                    continue
                test_case = TestCase(
                    name='{0} {1}'.format(rule.id, rule.shortdesc))

                if rule.experimental:
                    test_case.add_skipped_info(
                        message='Experimental rule - not enabled')
                else:
                    test_case.add_skipped_info(message='Ignored rule')
                test_cases.append(test_case)
            else:
                test_case = TestCase(name='{0} {1}'.format(
                    rule.id, rule.shortdesc),
                                     allow_multiple_subelements=True,
                                     url=rule.source_url)
                for match in matches:
                    if match.rule.id == rule.id:
                        test_case.add_failure_info(
                            message=self._failure_format(match),
                            failure_type=match.message)
                test_cases.append(test_case)

        test_suite = TestSuite('CloudFormation Lint', test_cases)

        return to_xml_report_string([test_suite], prettyprint=True)
コード例 #33
0
ファイル: conftest.py プロジェクト: lkeanfei/Assessment
def pytest_runtest_makereport(item, call):

    # print('runtest makerepot')
    outcome = yield
    rep = outcome.get_result()
    testScript = rep.fspath
    scriptName = os.path.basename(testScript)

    testname = ""
    try:
        testname = rep.item['name']
    except:
        testname = rep.nodeid

    if rep.when == 'call':
        scenario = rep.scenario
        steps = scenario["steps"]
        durInSeconds = rep.duration
        # for step in steps:
        #     print("********" + step["name"] + ": " + str(step["failed"]))

        stepsSummary = generateTestStepsSummary(steps)
        # print("********name " + testname + " : " + rep.outcome )
        # logging.getLogger().info('new Test CAse ' + testname)
        testCase = TestCase(testname, '', durInSeconds, stepsSummary)
        # logging.getLogger().info('Testname is ' + testname)

        if rep.failed:
            testCase.add_failure_info(rep.longreprtext)

        testCaseDictList[item.fspath.strpath].append(testCase)
コード例 #34
0
def generate_junit_report(test_name, total_thresholds, report_name):
    test_cases = []
    file_name = f"junit_report_{report_name}.xml"
    logger.info(f"Generate report {file_name}")

    for item in total_thresholds["details"]:
        message = item['message']
        test_case = TestCase(
            item['name'],
            classname=f"{item['scope']}",
            status="PASSED",
            stdout=
            f"{item['scope']} {item['name'].lower()} {item['aggregation']} {item['actual']} "
            f"{item['rule']} {item['expected']}")
        if message:
            test_case.status = "FAILED"
            test_case.add_failure_info(message)
        test_cases.append(test_case)

    ts = TestSuite(test_name, test_cases)
    os.makedirs(f"{REPORT_PATH}/junit", exist_ok=True)
    with open(f"{REPORT_PATH}/junit/{file_name}", 'w') as f:
        TestSuite.to_file(f, [ts], prettyprint=True)

    return file_name
コード例 #35
0
 def test_init_failure_message(self):
     tc = TestCase('Failure-Message')
     tc.add_failure_info("failure message")
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(self,
                      tcs[0], {'name': 'Failure-Message'},
                      failure_message="failure message")
コード例 #36
0
    def run_test_configs(self, project_name):
        mstdout = ['']
        test_cases = TestCase('run_test_configs', '', '', mstdout, '')
        mproj = self.get_project_by_name(project_name)
        configs = self.get_all_configs(mproj['id'])
        for config in configs:
            inconfig = config
            logging.info("-----------------------------")
            logging.info(inconfig)
            logging.info(inconfig['name'])
            mstdout.insert(-1, str(inconfig['name']))
            logging.info(inconfig['id'])
            mstdout.insert(-1, str(inconfig['id']))
            for item in inconfig['configs']:
                mitems = self.toutf8(item)
                logging.info(".." + mitems['id'])
                mstdout.insert(-1, mitems['id'])
                logging.info(".." + mitems['name'])
                mstdout.insert(-1, mitems['name'])
            frdmk64f_id = self.get_config_item_id_by_name(
                mproj['id'], inconfig['name'], "frdm_k64f")
            frdmk64_name = self.get_config_item_name_by_id(
                mproj['id'], inconfig['id'], frdmk64f_id)
            logging.info("frdmk64f id is %s" % (frdmk64f_id))
            mstdout.insert(-1, "frdmk64f id is %s" % (frdmk64f_id))
            if frdmk64f_id == None or frdmk64_name == None:
                test_cases.add_failure_info(
                    'get_config_item_id_by_name failure')

        return test_cases
コード例 #37
0
ファイル: cache.py プロジェクト: jmdacruz/mutmut
def print_result_cache_junitxml(dict_synonyms, suspicious_policy,
                                untested_policy):
    test_cases = []
    l = list(select(x for x in Mutant))
    for filename, mutants in groupby(l,
                                     key=lambda x: x.line.sourcefile.filename):
        for mutant in mutants:
            tc = TestCase("Mutant #{}".format(mutant.id),
                          file=filename,
                          line=mutant.line.line_number,
                          stdout=mutant.line.line)
            if mutant.status == BAD_SURVIVED:
                tc.add_failure_info(message=mutant.status,
                                    output=get_unified_diff(
                                        mutant.id, dict_synonyms))
            if mutant.status == BAD_TIMEOUT:
                tc.add_error_info(message=mutant.status,
                                  error_type="timeout",
                                  output=get_unified_diff(
                                      mutant.id, dict_synonyms))
            if mutant.status == OK_SUSPICIOUS:
                if suspicious_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(suspicious_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))
            if mutant.status == UNTESTED:
                if untested_policy != 'ignore':
                    func = getattr(tc, 'add_{}_info'.format(untested_policy))
                    func(message=mutant.status,
                         output=get_unified_diff(mutant.id, dict_synonyms))

            test_cases.append(tc)

    ts = TestSuite("mutmut", test_cases)
    print(TestSuite.to_xml_string([ts]))
コード例 #38
0
ファイル: regressiontest.py プロジェクト: gdeskos/fluidity
    def test(self):
        def Trim(string):
          if len(string) > 4096:
            return string[:4096] + " ..."
          else:
            return string
    
        varsdict = {}
        self.log("Assigning variables:")
        for var in self.variables:
            tmpdict  = {}
            try:
              var.run(tmpdict)
            except:
              self.log("failure.")
              self.pass_status.append('F')
              return self.pass_status

            varsdict[var.name] = tmpdict[var.name]
            self.log("Assigning %s = %s" % (str(var.name), Trim(str(varsdict[var.name]))))

        if len(self.pass_tests) != 0:
            self.log("Running failure tests: ")
            for test in self.pass_tests:
                self.log("Running %s:" % test.name)
                status = test.run(varsdict)
                tc=TestCase(test.name,
                            '%s.%s'%(self.length,
                                     self.filename[:-4]))
                if status == True:
                    self.log("success.")
                    self.pass_status.append('P')
                elif status == False:
                    self.log("failure.")
                    self.pass_status.append('F')
                    tc.add_failure_info(  "Failure" )
                else:
                    self.log("failure (info == %s)." % status)
                    self.pass_status.append('F')
                    tc.add_failure_info(  "Failure", status )
                self.xml_reports.append(tc)

        if len(self.warn_tests) != 0:
            self.log("Running warning tests: ")
            for test in self.warn_tests:
                self.log("Running %s:" % test.name)
                status = test.run(varsdict)
                if status == True:
                    self.log("success.")
                    self.warn_status.append('P')
                elif status == False:
                    self.log("warning.")
                    self.warn_status.append('W')
                else:
                    self.log("warning (info == %s)." % status)
                    self.warn_status.append('W')

        self.log(''.join(self.pass_status + self.warn_status))
        return self.pass_status + self.warn_status
コード例 #39
0
 def test_init_illegal_unicode_char(self):
     tc = TestCase('Failure-Message')
     tc.add_failure_info(
         u("failure message with illegal unicode char: [\x02]"))
     (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
     verify_test_case(
         self, tcs[0], {'name': 'Failure-Message'}, failure_message=u(
             "failure message with illegal unicode char: []"))
コード例 #40
0
ファイル: JunitXML.py プロジェクト: ggouaillardet/mtt
    def execute(self, log, keyvals, testDef):
        testDef.logger.verbose_print("JunitXML Reporter")
        # pickup the options
        cmds = {}
        testDef.parseOptions(log, self.options, keyvals, cmds)
        if cmds['filename'] is not None:
            self.fh = open(cmds['filename'] if os.path.isabs(cmds['filename']) \
                           else os.path.join(cmds['scratch'],cmds['filename']), 'w')
        if testDef.options['description'] is not None:
            print(testDef.options['description'], file=self.fh)
            print(file=self.fh)
       
        # Use the Junit classname field to store the list of inifiles
        try:
            classname = testDef.log['inifiles']
        except KeyError:
            classname = None
        # get the entire log of results
        fullLog = testDef.logger.getLog(None)
        testCases = []
        # TODO: ain't nobody got time for that.  8-).
        time = 0
        for lg in fullLog:
            if 'stdout' in lg and lg['stdout'] is not None:
                stdout = "\n".join(lg['stdout'])
            else:
                stdout = None
            if 'stderr' in lg and lg['stderr'] is not None:
                stderr = "\n".join(lg['stderr'])
            else:
                stderr = None
            if 'time' in lg and lg['time'] is not None:
                time = lg['time']
            else:
                time = 0
            tc = TestCase(lg['section'], classname, time, stdout, stderr)
            try:
                if 0 != lg['status']:
                    # Find sections prefixed with 'TestRun'
                    if re.match("TestRun", lg['section']):
                        tc.add_failure_info("Test reported failure")
                    else:
                        tc.add_error_info("Test error")
            except KeyError:
                sys.exit(lg['section'] + " is missing status!")
            testCases.append(tc)

        # TODO:  Pull in the resource manager jobid.
        jobid = "job1"
        ts = TestSuite(jobid, testCases)
        print(TestSuite.to_xml_string([ts]), file=self.fh)

        if cmds['filename'] is not None:
            self.fh.close()
        log['status'] = 0
        return
コード例 #41
0
ファイル: parse_results.py プロジェクト: a-ilango/libfabric
def generate_generic_test_case(name, classname, time, message, result):
    default_pass_message = 'Test passed, check build log for additional details'
    default_skip_message = 'Test skipped, check build log for additional details'
    tc = TestCase(name, classname, time,
                  default_pass_message if result == 'pass' else '')
    if result == 'fail':
        tc.add_failure_info(message=message)
    if result == 'skip':
        tc.add_skipped_info(message=default_skip_message)

    return tc
コード例 #42
0
ファイル: generate.py プロジェクト: dmsimard/ara
    def take_action(self, args):
        test_cases = []
        if args.playbook is not None:
            playbooks = args.playbook
            results = (models.TaskResult().query
                       .join(models.Task)
                       .filter(models.TaskResult.task_id == models.Task.id)
                       .filter(models.Task.playbook_id.in_(playbooks)))
        else:
            results = models.TaskResult().query.all()

        for result in results:
            task_name = result.task.name
            if not task_name:
                task_name = result.task.action
            additional_results = {
                'host': result.host.name,
                'playbook_path': result.task.playbook.path
            }
            result_str = jsonutils.dumps(additional_results)
            test_path = \
                u'{playbook_file}.{play_name}'.format(
                    playbook_file=os.path.basename(result.task.playbook.path),
                    play_name=result.task.play.name)
            test_case = TestCase(
                name=task_name,
                classname=test_path,
                elapsed_sec=result.duration.seconds,
                stdout=result_str)
            if result.status == 'skipped':
                test_case.add_skipped_info(message=result.result)
            elif ((result.status in ('failed', 'unreachable') and
                    result.ignore_errors is False and
                    'EXPECTED FAILURE' not in task_name and
                    'TOGGLE RESULT' not in task_name) or
                    (result.status == 'ok' and 'TOGGLE RESULT' in task_name)):
                test_case.add_failure_info(message=result.result)
            test_cases.append(test_case)
        test_suite = TestSuite('Ansible Tasks', test_cases)

        # TODO: junit_xml doesn't order the TestCase parameters.
        # This makes it so the order of the parameters for the same exact
        # TestCase is not guaranteed to be the same and thus results in a
        # different stdout (or file). This is easily reproducible on Py3.
        xml_string = six.text_type(test_suite.to_xml_string([test_suite]))
        if args.output_file == '-':
            if six.PY2:
                sys.stdout.write(encodeutils.safe_encode(xml_string))
            else:
                sys.stdout.buffer.write(encodeutils.safe_encode(xml_string))
        else:
            with open(args.output_file, 'wb') as f:
                f.write(encodeutils.safe_encode(xml_string))
コード例 #43
0
    def test_init_failure_type(self):
        tc = TestCase('Failure-Type')
        tc.add_failure_info(failure_type='com.example.Error')
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(self, tcs[0], {'name': 'Failure-Type'})

        tc.add_failure_info("failure message")
        (ts, tcs) = serialize_and_read(TestSuite('test', [tc]))[0]
        verify_test_case(
            self, tcs[0], {'name': 'Failure-Type'},
            failure_message="failure message",
            failure_type='com.example.Error')
コード例 #44
0
ファイル: runner.py プロジェクト: 4m1g0/Arduino
 def run_tests(self):
     test_cases = []
     should_update_env = True
     for test in self.tests:
         desc = test['desc']
         name = test['name']
         index = test['id']
         test_case = TestCase(name, self.name)
         if '[.]' in desc:
             print('skipping test "{}"'.format(name))
             test_case.add_skipped_info(message="Skipped test marked with [.]")
         else:
             test_output = StringIO()
             self.sp.logfile = test_output
             print('running test "{}"'.format(name))
             if should_update_env:
                 res = self.update_env(self.env_vars)
                 if res != BSTestRunner.SUCCESS:
                     print('failed to set environment variables')
                     break;
                 should_update_env = False
             if name in self.mocks:
                 debug_print('setting up mocks')
                 self.mocks[name]['request_env'] = self.request_env
                 self.mocks[name]['setup']()
                 extra_env = mock_decorators.get_all_envs(name)
                 if extra_env is not None:
                     self.update_env(extra_env)
             t_start = time.time()
             result = self.run_test(index)
             if name in self.mocks:
                 debug_print('tearing down mocks')
                 try:
                     self.mocks[name]['teardown']()
                 except AssertionError:
                     debug_print('teardown assert failure')
                     result = BSTestRunner.FAIL
             t_stop = time.time()
             self.sp.logfile = None
             test_case.elapsed_sec = t_stop - t_start
             debug_print('test output was:')
             debug_print(test_output.getvalue())
             if result == BSTestRunner.SUCCESS:
                 test_case.stdout = test_output.getvalue()
                 print('test "{}" passed'.format(name))
             else:
                 print('test "{}" failed'.format(name))
                 test_case.add_failure_info('Test failed', output=test_output.getvalue())
                 should_update_env = True
             test_output.close()
         test_cases += [test_case];
     return TestSuite(self.name, test_cases)
コード例 #45
0
ファイル: test_exporters.py プロジェクト: jaustin/mbed
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase

        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result["description"]
                            classname = "%s.%s.%s.%s" % (self.package, target, toolchain, test_result["id"])
                            elapsed_sec = test_result["elapsed_time"]
                            _stdout = test_result["output"]

                            if "target_name_unique" in test_result:
                                _stderr = test_result["target_name_unique"]
                            else:
                                _stderr = test_result["target_name"]

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result["result"]
                            if test_result["result"] == "FAIL":
                                tc.add_failure_info(message, _stdout)
                            elif test_result["result"] == "SKIP" or test_result["result"] == "NOT_SUPPORTED":
                                tc.add_skipped_info(message, _stdout)
                            elif test_result["result"] != "OK":
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite(
                    "test.suite.%s.%s" % (target, toolchain),
                    test_cases,
                    properties=test_suite_properties[target][toolchain],
                )
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #46
0
def process(config):

    for mnf in config.keys():
        log("Processing manifest: %s" % mnf, 'info')

        if config[mnf].has_key('submodule'):
            sub = config[mnf].pop('submodule')
            sub_manifest = guess_manifest(str(mnf), sub['source'])
            sub_app = qubell.organization.application(name=sub['name'])
            sub_app.upload(sub_manifest)

        if config[mnf].has_key('source'):
            manifest = guess_manifest(str(mnf), config[mnf].pop('source'))
        else:
            error("Missing manifest source directive")

        if not 'launch' == config[mnf].keys()[0]:
            error('Launch action missing or not first')

        if config[mnf]['launch'].has_key('parameters'):
            parameters = config[mnf]['launch']['parameters']
        else:
            parameters = {}

        if config[mnf]['launch'].has_key('settings'):
            settings = config[mnf]['launch']['settings']
        else:
            settings = {}

        if qubell.appid:
            app = qubell.organization.application(id=qubell.appid, manifest=manifest)
        elif qubell.appname:
            app = qubell.organization.application(name=qubell.appname, manifest=manifest)
        else:
            app = qubell.organization.application(name=manifest.name, manifest=manifest)
        assert app.upload(manifest)
        instance = qubell.organization.create_instance(application=app, parameters=parameters, **settings)
        assert instance

        for action in config[mnf].keys():
            if config[mnf][action].has_key('parameters'):
                parameters = config[mnf][action]['parameters']
            else:
                parameters = {}
            if not run_test(instance, manifest, Action(action, parameters, config[mnf][action]['expected'])):
                test = TestCase(action, manifest.name, 0, 'Manifest: ' + manifest.source, 'failed to start')
                test.add_failure_info(str(instance))
                test_cases.append(test)
                return False
コード例 #47
0
ファイル: junit_reporter.py プロジェクト: JahanviB/Zopkio
 def _generate_junit_xml(self, config_name):
     testcases = []
     tests=self.data_source.get_test_results(config_name)
     for test in tests:
         test_time = 0
         if test.func_end_time != None and test.func_start_time != None:
             test_time = test.func_end_time - test.func_start_time
         tc = TestCase(test.name,config_name,test_time, test.description, test.message)
         if 'failed' in test.result:
             tc.add_failure_info(test.result)
         elif 'skipped' in test.result:
             tc.add_skipped_info(test.result)
         testcases.append(tc)
     testsuite = TestSuite(config_name+'_'+self.name, testcases)
     return testsuite
コード例 #48
0
ファイル: test_exporters.py プロジェクト: DanKupiniak/mbed
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        targets = sorted(test_result_ext.keys())
        for target in targets:
            toolchains = sorted(test_result_ext[target].keys())
            for toolchain in toolchains:
                test_cases = []
                tests = sorted(test_result_ext[target][toolchain].keys())
                for test in tests:
                    test_results = test_result_ext[target][toolchain][test]
                    for test_res in test_results:
                        test_ids = sorted(test_res.keys())
                        for test_no in test_ids:
                            test_result = test_res[test_no]
                            name = test_result['description']
                            classname = '%s.%s.%s.%s'% (self.package, target, toolchain, test_result['id'])
                            elapsed_sec = test_result['elapsed_time']
                            _stdout = test_result['output']

                            if 'target_name_unique' in test_result:
                                _stderr = test_result['target_name_unique']
                            else:
                                _stderr = test_result['target_name']

                            # Test case
                            tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)

                            # Test case extra failure / error info
                            message = test_result['result']
                            if test_result['result'] == 'FAIL':
                                tc.add_failure_info(message, _stdout)
                            elif test_result['result'] == 'SKIP' or test_result["result"] == 'NOT_SUPPORTED':
                                tc.add_skipped_info(message, _stdout)
                            elif test_result['result'] != 'OK':
                                tc.add_error_info(message, _stdout)

                            test_cases.append(tc)

                ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #49
0
ファイル: reporter.py プロジェクト: dkentw/pi-tester
    def _output_normal(self, test_result):
        # Need refactor
        if test_result == {}:
            print '[what?!] there are not any test result, what is the test case id?'
        else:
            print
            xml_test_suites = []
            summary_dict = self._get_summary_dict(test_result)
            self.report_create_time = str(time.strftime('%Y%m%d_%H%M%S', time.localtime()))
            for case_classify in test_result.keys():
                xml_test_cases = []
                if 'result' in test_result[case_classify].keys():
                    # Generate HTML report
                    self._generate_html_file(
                        case_classify, test_result[case_classify]['result'],
                        test_result[case_classify]['summary'])

                    # Save the result into the CSV
                    self._output_result_to_csv(test_result)

                    # Show in Console
                    print '{0} {1} {2}'.format('='*16, case_classify, '='*16)
                    test_case_result = test_result[case_classify]['result']
                    for case_id in test_case_result.keys():
                        print '[{0}][{1}] {2}, {3}, {4}'.format(case_classify, case_id,
                                                                test_case_result[case_id][0],
                                                                test_case_result[case_id][1],
                                                                str(test_case_result[case_id][2]))

                        # Produce xml file
                        test_case = TestCase(case_id, case_classify, int(test_case_result[case_id][2]))
                        if test_case_result[case_id][0] == 'Fail' or test_case_result[case_id][0] == 'Error':
                            try:
                                test_case.add_failure_info('msg' + test_case_result[case_id][1])
                            except:
                                test_case.add_failure_info('msg' + str(test_case_result[case_id]))

                        xml_test_cases.append(test_case)

                    xml_test_suites.append(TestSuite(case_classify, xml_test_cases))
                    with open(os.path.join(self.latest_reports_dir, case_classify + '.xml'), 'w') as f:
                        TestSuite.to_file(f, xml_test_suites, prettyprint=True)

            self._generate_summary_html_file(summary_dict)
            print '{0} {1} {2}'.format('='*16, 'Summary', '='*16)
            pprint.pprint(summary_dict)
コード例 #50
0
ファイル: runner.py プロジェクト: sajedts/pipedrive
def run_tests(TestClass):
    setup_logging()
    # Verify that we have needed input
    assert base_url, 'Please provide a value for base_url in runner.py'
    assert chromedriver_location, 'Please provide a value for chromedriver_location in runner.py'
    assert credentials['email'], 'Please provide credentials in runner.py'
    assert credentials['password'], 'Please provide credentials in runner.py'

    #WebDriver setup
    driver = webdriver.Chrome(chromedriver_location)

    # Init the provided class
    myTestClass = TestClass(driver, base_url, credentials)
    logging.info("Running: %s", myTestClass.__class__.__name__)

    # Run all the methods containing 'test' in name
    test_cases = []
    for object_name in dir(myTestClass):
        if 'test' in object_name:

            test_method = getattr(myTestClass, object_name)
            if not hasattr(test_method, '__call__'):
                break  # If the object is not callable

            this_test_case = TestCase(object_name, myTestClass.__class__.__name__)
            logging.info('#######################################################')
            logging.info("Running %s", object_name)

            try:
                # Run the actual method
                test_method()
            except Exception as e:
                logging.info('TEST FAILED')
                logging.exception(e)
                this_test_case.add_failure_info(output=traceback.format_exc())
            else:
                logging.info("TEST PASSED")

            test_cases.append(this_test_case)
            logging.info('#######################################################')

    # Write the results to junit xml
    write_xml(test_cases, myTestClass.__class__.__name__)
コード例 #51
0
ファイル: junit.py プロジェクト: ernstp/ansible
    def _build_test_case(self, task_data, host_data):
        """ build a TestCase from the given TaskData and HostData """

        name = '[%s] %s: %s' % (host_data.name, task_data.play, task_data.name)
        duration = host_data.finish - task_data.start

        if self._task_class == 'true':
            junit_classname = re.sub('\.yml:[0-9]+$', '', task_data.path)
        else:
            junit_classname = task_data.path

        if host_data.status == 'included':
            return TestCase(name, junit_classname, duration, host_data.result)

        res = host_data.result._result
        rc = res.get('rc', 0)
        dump = self._dump_results(res, indent=0)
        dump = self._cleanse_string(dump)

        if host_data.status == 'ok':
            return TestCase(name, junit_classname, duration, dump)

        test_case = TestCase(name, junit_classname, duration)

        if host_data.status == 'failed':
            if 'exception' in res:
                message = res['exception'].strip().split('\n')[-1]
                output = res['exception']
                test_case.add_error_info(message, output)
            elif 'msg' in res:
                message = res['msg']
                test_case.add_failure_info(message, dump)
            else:
                test_case.add_failure_info('rc=%s' % rc, dump)
        elif host_data.status == 'skipped':
            if 'skip_reason' in res:
                message = res['skip_reason']
            else:
                message = 'skipped'
            test_case.add_skipped_info(message)

        return test_case
コード例 #52
0
ファイル: test_parser.py プロジェクト: Mraedis/VHDL
def xmlwrite(target, everyline):
	xmltargetpath = os.getcwd() + os.sep + target + '_testresults.xml'
	xmltargetfile = open(xmltargetpath, 'w+')
	test_cases = []
	for line in everyline.split('\n'):
		words = line.split(' ')
		if line.find('success') != -1:
			time_taken = get_time(line.split('-')[-1][7:])
			name = words[0] + ' - ' + line.split('-')[1].split('name:')[1].strip()
			print name
			test_cases.append(TestCase(name, target, time_taken, None))
		elif line.find('failed') != -1:
			time_taken = get_time(line.split('-')[-1][7:])
			name = words[0] + ' - ' + line.split('-')[1].split('name:')[1].strip()
			message = ("-".join(line.split(' - ')[2:-1])).strip()
			tc = TestCase(name, target, time_taken, None)
			tc.add_failure_info(None, message)
			test_cases.append(tc)
	ts = TestSuite("testing this suite", test_cases)
	xmltargetfile.write(TestSuite.to_xml_string([ts]))
	xmltargetfile.close()
コード例 #53
0
ファイル: reporter.py プロジェクト: jumboTest/test
	def report(self, report_dir=None):
		test_cases = []
		if report_dir is None:
			report_dir = self.config.report_dir
		for report_file in os.listdir(report_dir):
			if report_file.endswith(".pkl"):
				f = open(os.path.join(report_dir, report_file), "r")
				result_dict = cPickle.load(f)
				f.close()
				tests = result_dict.keys()
				tests.sort()
				for test in tests:
					in_entry = result_dict[test]
					report_entry = TestCase(test, in_entry["CLASS_NAME"], in_entry["TIME"])
					if in_entry["RESULT"] == "FAIL":
						report_entry.add_failure_info(in_entry["MESSAGE"], in_entry["TRACE"])
					elif in_entry["RESULT"] == "ERROR":
						report_entry.add_error_info(in_entry["MESSAGE"], in_entry["TRACE"])
					test_cases.append(report_entry)
		ts = TestSuite("my test suite", test_cases)
		f_xml = open(os.path.join(report_dir, "results.xml"), "w")
		f_xml.write(TestSuite.to_xml_string([ts]))
		f_xml.close()
コード例 #54
0
def generate_junit_xml(file_name='junit.xml'):
    results = monitor_runner.get_latest_status()
    print(results)

    test_suites = []

    # test_cases = [TestCase('Test1', 'some.class.name', 123.345, 'I am stdout!', 'I am stderr!')]
    # ts = TestSuite("my test suite", test_cases)
    for testsuite_name in results:
        test_cases = []
        for test_case_name in results[testsuite_name]:
            try:
                name = results[testsuite_name][test_case_name]['name']
            except:
                name = '.'

            success = results[testsuite_name][test_case_name]['success']

            try:
                elapsed_sec = results[testsuite_name][test_case_name]['response_time'].total_seconds()
            except:
                elapsed_sec = -1
            tc = TestCase(
                name=name,
                classname='{}.{}'.format(testsuite_name, test_case_name),
                elapsed_sec=elapsed_sec,
                stdout='{}'.format(success),
            )
            if success is False:
                tc.add_failure_info('Failed')
            test_cases.append(tc)
        ts = TestSuite(testsuite_name, test_cases)
        test_suites.append(ts)
    pass

    with open(file_name, "w", encoding='utf-8-sig') as f:
        TestSuite.to_file(f, test_suites, prettyprint=True)
コード例 #55
0
ファイル: test_exporters.py プロジェクト: 1deus/tmk_keyboard
    def exporter_junit(self, test_result_ext, test_suite_properties=None):
        """ Export test results in JUnit XML compliant format
        """
        from junit_xml import TestSuite, TestCase
        test_suites = []
        test_cases = []

        toolchains = sorted(test_result_ext.keys())
        for toolchain in toolchains:
            targets = sorted(test_result_ext[toolchain].keys())
            for target in targets:
                test_cases = []
                tests = sorted(test_result_ext[toolchain][target].keys())
                for test in tests:
                    test_results = test_result_ext[toolchain][target][test]
                    test_ids = sorted(test_results.keys())
                    for test_no in test_ids:
                        test_result = test_results[test_no]
                        name = test_result['test_description']
                        classname = 'test.%s.%s.%s'% (target, toolchain, test_result['test_id'])
                        elapsed_sec = test_result['elapsed_time']
                        _stdout = test_result['single_test_output']
                        _stderr = ''
                        # Test case
                        tc = TestCase(name, classname, elapsed_sec, _stdout, _stderr)
                        # Test case extra failure / error info
                        if test_result['single_test_result'] == 'FAIL':
                            message = test_result['single_test_result']
                            tc.add_failure_info(message, _stdout)
                        elif test_result['single_test_result'] != 'OK':
                            message = test_result['single_test_result']
                            tc.add_error_info(message, _stdout)

                        test_cases.append(tc)
                ts = TestSuite("test.suite.%s.%s"% (target, toolchain), test_cases, properties=test_suite_properties[target][toolchain])
                test_suites.append(ts)
        return TestSuite.to_xml_string(test_suites)
コード例 #56
0
def junit_test_case_from_compmake(db, job_id):
    from junit_xml import TestCase
    cache = get_job_cache(job_id, db=db)
    if cache.state == Cache.DONE:  # and cache.done_iterations > 1:
        # elapsed_sec = cache.walltime_used
        elapsed_sec = cache.cputime_used
    else:
        elapsed_sec = None

    check_isinstance(cache.captured_stderr, (type(None), six.text_type))
    check_isinstance(cache.captured_stdout, (type(None), six.text_type))
    check_isinstance(cache.exception, (type(None), six.text_type))
    stderr = remove_escapes(cache.captured_stderr)
    stdout = remove_escapes(cache.captured_stdout)

    tc = TestCase(name=job_id, classname=None, elapsed_sec=elapsed_sec,
                  stdout=stdout, stderr=stderr)

    if cache.state == Cache.FAILED:
        message = cache.exception
        output = cache.exception + "\n" + cache.backtrace
        tc.add_failure_info(message, output)

    return tc
コード例 #57
0
ファイル: output.py プロジェクト: thanasisk/drydock
 def write_xml_file(self):
     test_cases = []
     if os.path.isfile(self.output):
         logging.warn("File exists,deleting...")
         os.remove(self.output)
     with open(self.output, "a") as f:
         for _, elements in self.log.items():
             for j in elements.viewitems():
                 if j[0] == "date" or j[0] == "profile" or j[0] == "score":
                     # we really don't care
                     pass
                 else:
                     try:
                         test_case = TestCase(j[0], j[1]["descr"], "", "", "")
                         if j[1]["status"] == "Fail":
                             test_case.add_failure_info(j[1]["output"])
                         else:
                             test_case = TestCase(j[0], "", "", "", "")
                         test_cases.append(test_case)
                     except KeyError:
                         # the world's smallest violin playin' for KeyError
                         pass
         ts = [TestSuite("Docker Security Benchmarks", test_cases)]
         TestSuite.to_file(f, ts)