Пример #1
0
    def generateHtmlView(self):
	try:
            self.__logger.debug(
                "=== Test Results Folder : %s===" %
                self.__xunitOutFolder)
	    """Create html table with rows 'SNo', 'TCName', 'Result','Time'"""
	    t = HTML.table(header_row=['SNo', 'TCName', 'Result', 'RunTime'])
            test_suites = []	
	    no = 1	
            if self.__xunitOutFolder:
                if os.path.isdir(self.__xunitOutFolder):
		    for items in os.listdir(self.__xunitOutFolder):
                        if os.path.isfile(self.__xunitOutFolder + "/" + items) and items.startswith("test") and items.endswith("xml"):
                            test_suites.append(items)
                for files in test_suites:
                    with open(self.__xunitOutFolder + "/" + files) as f:
                        self.__ts, self.__tr = parse(self.__xunitOutFolder + "/" + files)
                    for tc in self.__ts :
		        t.rows.append([no, tc.classname+"_"+tc.methodname, tc.result, tc.time.total_seconds()])
			no = no + 1
            return t
	except Exception as e:
	    self.__logger.debug(
		"\nParsing Xunit Test Output Failed : %s" % 
		Codes.GetDetailExceptionInfo(e))
	    return Codes.FAILED
Пример #2
0
def main(xunit, origin):
    with open(xunit, "r") as fh:
        ts, tr = xunitparser.parse(fh)

    # Get test results for each module
    modules = {}
    other_testcases = []
    for tc in ts:
        if tc.classname.startswith("weboob."):
            other_testcases.append(repr(tc))
            continue
        module = tc.classname.split(".")[0]
        # In the following, we consider
        # bad > skipped > good
        # and only make update of a module status according to this order
        if tc.good:
            if tc.skipped:
                # Set to skipped only if previous test was good
                if module not in modules or modules[module] == "good":
                    modules[module] = "skipped"
            else:
                # Set to good only if no previous result
                if module not in modules:
                    modules[module] = "good"
        else:
            # Always set to bad on failed test
            modules[module] = "bad"
    # Agregate results by test result rather than module
    results = {"good": [], "bad": [], "skipped": []}
    for module in modules:
        results[modules[module]].append(module)
    return {"origin": origin, "modules": results, "others": other_testcases}
 def getTotalTests(self):
    if (self.hypervisor.lower() != "simulator"):
       Tag="tags=%s"%self.zoneType
    else:
       Tag="tags=selfservice,'!BugId'"
    bash("%s/testenv/bin/nosetests-2.7 --with-xunit --xunit-file=totalTests.xml -w %s/test/integration/smoke -a %s --collect-only "%(self.basedir, self.basedir,Tag))
    ts, tr=parse(self.basedir+"/totalTests.xml")
Пример #4
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--reports-dir",
        default=os.path.join(REPO_ROOT, "artifacts", "xml-unittests-output"),
        help="Path to the directory where the JUnit XML reports can be found",
    )
    parser.add_argument(
        "output_file",
        help=(
            "Path to the file containing the failed tests listing to be fed to"
            " --names-files"),
    )
    options = parser.parse_args()
    total_xml_reports = 0
    failures = set()
    for fname in sorted(glob.glob(os.path.join(options.reports_dir, "*.xml"))):
        total_xml_reports += 1
        with open(fname) as rfh:
            test_suite, test_result = xunitparser.parse(rfh)
            if not test_result.errors and not test_result.failures:
                continue
            for test in test_suite:
                if test.bad:
                    failures.add(
                        "{classname}.{methodname}".format(**test.__dict__))

    if not total_xml_reports:
        parser.exit(status=1, message="No JUnit XML files were parsed")

    with open(options.output_file, "w") as wfh:
        wfh.write(os.linesep.join(sorted(failures)))

    parser.exit(status=0)
Пример #5
0
def download_badge(junit_xml: str='reports/junit/junit.xml', dest_folder: str='reports/junit'):

    makedirs(dest_folder, exist_ok=True)

    # read the junit test file
    ts, tr = xunitparser.parse(open(junit_xml))
    runned = tr.testsRun
    failed = len(tr.failures)

    success_percentage = round((runned - failed) * 100 / runned)
    if success_percentage < 50:
        color = 'red'
    elif success_percentage < 75:
        color = 'orange'
    elif success_percentage < 90:
        color = 'green'
    else:
        color = 'brightgreen'
    url = 'https://img.shields.io/badge/tests-' + str(success_percentage) + '%25-' + color + '.svg'

    dest_file = path.join(dest_folder, 'junit-badge.svg')

    print('Generating junit badge from : ' + url)
    response = requests.get(url, stream=True)
    with open(dest_file, 'wb') as out_file:
        response.raw.decode_content = True
        shutil.copyfileobj(response.raw, out_file)
    del response
Пример #6
0
    def _on_post_junitxml(self, req, resp, **kwargs):
        resp.status = falcon.HTTP_201

        suite, _ = xunitparser.parse(req.stream)
        results = [
            Result.from_junit_xml_test_case(case, **kwargs) for case in suite
        ]
Пример #7
0
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--reports-dir',
        default=os.path.join(REPO_ROOT, 'artifacts', 'xml-unittests-output'),
        help='Path to the directory where the JUnit XML reports can be found')
    parser.add_argument(
        'output_file',
        help=
        'Path to the file containing the failed tests listing to be fed to --names-files'
    )
    options = parser.parse_args()
    total_xml_reports = 0
    failures = set()
    for fname in sorted(glob.glob(os.path.join(options.reports_dir, '*.xml'))):
        total_xml_reports += 1
        with open(fname) as rfh:
            test_suite, test_result = xunitparser.parse(rfh)
            if not test_result.errors and not test_result.failures:
                continue
            for test in test_suite:
                if test.bad:
                    failures.add(
                        '{classname}.{methodname}'.format(**test.__dict__))

    if not total_xml_reports:
        parser.exit(status=1, message='No JUnit XML files were parsed')

    with open(options.output_file, 'w') as wfh:
        wfh.write(os.linesep.join(sorted(failures)))

    parser.exit(status=0)
Пример #8
0
def parse_xunit(fname):
    """
    :param fname: The filename which contains the xunit results generated by
                  nosetests (when running tests or collecting them).
    """
    ts, tr = xunitparser.parse(open(fname))
    return ts, tr
Пример #9
0
def parseResults(results: str) -> list:
    failures = []
    if results is not None:
        tc, tr = xunitparser.parse(results)
        for t in tr.failures:
            label = t[0]
            failures.append(int(label[:label.find('.')]))
    return failures
Пример #10
0
    def test_anyFailures(self):
        xmlfile_path = self.setUp()
        testSuites, testResults = xunitparser.parse(open(xmlfile_path[0]))
        print(testResults.wasSuccessful())

        self.assertTrue(
            testResults.wasSuccessful(),
            msg="There were failures in cocotb tests, inspect results.xml file"
        )
Пример #11
0
    def _on_post_junitxml(self, req, resp, **kwargs):
        resp.status = falcon.HTTP_201

        suite, _ = xunitparser.parse(req.stream)
        results = [
            Result.from_junit_xml_test_case(case, **kwargs)
            for case in suite
        ]
        response_data = Result.create_many(results, **kwargs)
        resp.body = json.dumps(response_data)
 def parse_results(self):
     """
     Parse the XUnit results. Return a 2-item dict:
     'result': :py:class:`xunitparser.TestResult`
     'suite': :py:class:`xunitparser.TestSuite`
     """
     if not os.path.exists(self.results_file):
         logger.error("ERROR: results file does not exist.")
         return None
     ts, r = xunitparser.parse(open(self.results_file))
     return {'result': r, 'suite': ts}
Пример #13
0
	def assertXUnitResults(self, in_file):
		ts, tr = xunitparser.parse(open(in_file))
		for tc in ts:
			parts = tc.methodname.split("_")
			if not tc.good:
				if tc.message.find("AssertionError") != -1:
					regex = re.compile(r'(AssertionError\s*:.*)', re.MULTILINE)
					m = regex.search(tc.message)
					if m is not None:
						raise AssertionError(m.group(1) + "\r\n" + tc.trace)			
				else:
					raise AssertionError(tc.trace)	
Пример #14
0
    def parseReportFiles(self, files):
        self.failures = []
        self.errors = []
        self.skips = []
        self.total = 0
        for report_file in files:
            with open(report_file) as report:
                ts, tr = xunitparser.parse(report)
                self.failures.extend(tr.failures)
                self.errors.extend(tr.errors)
                self.skips.extend(tr.failures)
                self.total += tr.testsRun

        failures_count = len(self.failures)
        errors_count = len(self.errors)
        skips_count = len(self.skips)
        total = self.total

        count = failures_count + errors_count

        text = [self.name]
        text2 = ""

        if not count:
            results = SUCCESS
            if total:
                text += ["%d %s" %
                         (total,
                          total == 1 and "test" or "tests"),
                         "passed"]
            else:
                text += ["no tests", "run"]
        else:
            results = FAILURE
            text.append("Total %d test(s)" % total)
            if failures_count:
                text.append("%d %s" %
                            (failures_count,
                             failures_count == 1 and "failure" or "failures"))
            if errors_count:
                text.append("%d %s" %
                            (errors_count,
                             errors_count == 1 and "error" or "errors"))
            text2 = "%d %s" % (count, (count == 1 and 'test' or 'tests'))

        if skips_count:
            text.append("%d %s" % (skips_count,
                                   skips_count == 1 and "skip" or "skips"))

        self.results = results
        self.text = text
        self.text2 = [text2]
Пример #15
0
    def parseReportFiles(self, files):
        self.failures = []
        self.errors = []
        self.skips = []
        self.total = 0
        for report_file in files:
            with open(report_file) as report:
                ts, tr = xunitparser.parse(report)
                self.failures.extend(tr.failures)
                self.errors.extend(tr.errors)
                self.skips.extend(tr.failures)
                self.total += tr.testsRun

        failures_count = len(self.failures)
        errors_count = len(self.errors)
        skips_count = len(self.skips)
        total = self.total

        count = failures_count + errors_count

        text = [self.name]
        text2 = ""

        if not count:
            results = SUCCESS
            if total:
                text += [
                    "%d %s" % (total, total == 1 and "test" or "tests"),
                    "passed"
                ]
            else:
                text += ["no tests", "run"]
        else:
            results = FAILURE
            text.append("Total %d test(s)" % total)
            if failures_count:
                text.append("%d %s" %
                            (failures_count, failures_count == 1 and "failure"
                             or "failures"))
            if errors_count:
                text.append(
                    "%d %s" %
                    (errors_count, errors_count == 1 and "error" or "errors"))
            text2 = "%d %s" % (count, (count == 1 and 'test' or 'tests'))

        if skips_count:
            text.append("%d %s" %
                        (skips_count, skips_count == 1 and "skip" or "skips"))

        self.results = results
        self.text = text
        self.text2 = [text2]
Пример #16
0
def do_save(args):
    """ Save xunit results. """

    (testsuite, _) = xunitparser.parse(open(args.resultfile))

    for testcase in testsuite:
        testsuite_name = testcase.classname.split(".")[0]
        test_name = testcase.methodname
        test_result = xunit_result_get(testcase)

        logging.info("test %s.%s %s", testsuite_name, test_name, result)
        result.api.set_result(args.context, args.product, args.branch,
                              args.build, testsuite_name, test_name,
                              test_result, args.testkeys)
Пример #17
0
 def _parse_output(self, output_file):
     with open(output_file) as output_fd:
         _, tr = xunitparser.parse(output_fd)
         return {
             "total":
             tr.testsRun,
             "failure or error":
             len(tr.errors) + len(tr.failures),
             "score":
             float(tr.testsRun - (len(tr.errors) + len(tr.failures))) /
             tr.testsRun * 100,
             "output_file":
             output_file
         }
Пример #18
0
def get_nose_xml_report(path):
    """
    Get the report from the xml nosetests report
    """
    report = xunitparser.parse(open(path))[1]
    errors = len(report.errors)
    failures = len(report.failures)
    return {
        'total': report.testsRun,
        'errors': errors,
        'failures': failures,
        'successful': report.testsRun - errors - failures,
        'is_successful': report.wasSuccessful(),
    }
def get_nose_xml_report(path):
    """
    Get the report from the xml nosetests report
    """
    report = xunitparser.parse(open(path))[1]
    errors = len(report.errors)
    failures = len(report.failures)
    return {
        'total': report.testsRun,
        'errors': errors,
        'failures': failures,
        'successful': report.testsRun - errors - failures,
        'is_successful': report.wasSuccessful(),
    }
Пример #20
0
def do_save(args):
    """ Save xunit results. """

    (testsuite, _) = xunitparser.parse(open(args.resultfile))

    for testcase in testsuite:
        testsuite_name = testcase.classname.split(".")[0]
        test_name = testcase.methodname
        test_result = xunit_result_get(testcase)

        logging.info("test %s.%s %s", testsuite_name, test_name, result)
        result.api.set_result(args.context, args.product, args.branch,
                              args.build, testsuite_name, test_name,
                              test_result, args.testkeys)
Пример #21
0
    def process(self, params, arg_data):
        for param in ['item', 'type', 'checkname', 'file']:
            if param not in params.keys():
                raise CheckbDirectiveError('Mandatory parameter missing: %s' %
                                           param)
        item = params['item']
        itemtype = params['type']
        checkname = params['checkname']
        xunitfile = params['file']

        aggregation = params.get('aggregation', 'allpass')

        if aggregation not in self.aggregations:
            raise CheckbDirectiveError(
                "Aggregation '%s' is not one of: %s" %
                (aggregation, ', '.join(self.aggregations)))

        with open(xunitfile) as xmlfile:
            testsuite, testresult = xunitparser.parse(xmlfile)

        if aggregation == 'none':
            details = []
            for tc in testsuite:
                outcome = 'PASSED' if tc.good else 'FAILED'
                details.append(
                    CheckDetail(item=item,
                                checkname="%s.%s" % (checkname, tc.methodname),
                                report_type=itemtype,
                                outcome=outcome,
                                artifact=xunitfile))
            return export_YAML(details)

        elif aggregation == 'allpass':
            passed = len([tc for tc in testsuite if tc.good])
            failed = len([tc for tc in testsuite if not tc.good])
            final_outcome = 'PASSED' if failed == 0 else 'FAILED'
            note = CheckDetail.create_multi_item_summary(['PASSED'] * passed +
                                                         ['FAILED'] * failed)

            return export_YAML(
                CheckDetail(item=item,
                            report_type=itemtype,
                            outcome=final_outcome,
                            checkname=checkname,
                            note=note,
                            artifact=xunitfile))

        else:
            assert False, "This should never happen, aggregation is %r" % aggregation
 def processReport(self,suitFile):
     ts,tr=parse(suitFile)
     suit=self.testSuit(ts,tr)
     self.listOfTestSuits.append(suit)
     for tc in ts:
         if (tc.result.lower() == 'failure' or tc.result.lower() == 'error'):
             self.listOfFailedTests.append(tc)
             suit.failedTests.append(tc)
             self.failedTests=self.failedTests+1
         elif(tc.skipped):
             self.listOfSkippedTests.append(tc)
             suit.skippedTests.append(tc)
             self.skippedTests=self.skippedTests+1
         elif(tc.success):
             self.listOfPassedTests.append(tc)
             suit.passedTests.append(tc)
             self.passedTests=self.passedTests+1
Пример #23
0
def report_results(xml_filename):
    xunitparser = None
    try:
        import xunitparser
    except ImportError as e:
        sys.stderr.write("xunitparser module not availble results report not avaliable\n")
        sys.stderr.write("Import error was: %s\n" % repr(e))

     
    if xunitparser is not None:
        ts, tr = xunitparser.parse(open(xml_filename))
        report = {'skipped': len(tr.skipped), 'failed': len(tr.failures), 'errored': len(tr.errors), 'all': tr.testsRun}
        
        print('Test Report:', report)
        if report['failed'] or report['errored'] or not report['all']:
            return 1
        
        return 0
Пример #24
0
def get_test_stats(junit_xml='reports/junit/junit.xml'  # type: str
                   ):
    # type: (...) -> TestStats
    """
    read the junit test file and extract the success percentage
    :param junit_xml: the junit xml file path
    :return: the success percentage (an int)
    """
    ts, tr = xunitparser.parse(open(junit_xml))
    skipped = len(tr.skipped)
    runned = tr.testsRun - skipped
    failed = len(tr.failures)
    errors = len(tr.errors)
    success = runned - failed

    success_percentage = round(success * 100 / (runned + errors))

    return TestStats(success_percentage, success, runned, skipped, errors)
Пример #25
0
def main(xunit, origin):
    with open(xunit, "r") as fh:
        ts, tr = xunitparser.parse(fh)

    # Get test results for each module
    modules = {}
    other_testcases = []
    for tc in ts:
        if tc.classname.startswith("weboob."):
            other_testcases.append(repr(tc))
            continue
        module = tc.classname.split(".")[0]
        # In the following, we consider
        # bad > skipped > good
        # and only make update of a module status according to this order
        if tc.good:
            if tc.skipped:
                # Set to skipped only if previous test was good
                if module not in modules or modules[module] == "good":
                    modules[module] = "skipped"
            else:
                # Set to good only if no previous result
                if module not in modules:
                    modules[module] = "good"
        else:
            # Always set to bad on failed test
            modules[module] = "bad"
    # Agregate results by test result rather than module
    results = {
        "good": [],
        "bad": [],
        "skipped": []
    }
    for module in modules:
        results[modules[module]].append(module)
    return {
        "origin": origin,
        "modules": results,
        "others": other_testcases
    }
Пример #26
0
 def getParsedTCResultInfo(self):
     try:
         self.__logger.debug(
             "=== Test Results Folder : %s===" %
             self.__xunitOutFolder)
         test_suites = []
         if self.__xunitOutFolder:
             if os.path.isdir(self.__xunitOutFolder):
                 for items in os.listdir(self.__xunitOutFolder):
                     if os.path.isfile(self.__xunitOutFolder + "/" + items) and items.startswith("test") and items.endswith("xml"):
                         test_suites.append(items)
             for files in test_suites:
                 self.__logger.debug(
                     "==== Retrieving Test Results Information for Test Suite:%s ====" %
                     str(files))
                 with open(self.__xunitOutFolder + "/" + files) as f:
                     self.__ts, self.__tr = parse(
                         self.__xunitOutFolder + "/" + files)
                 for tc in self.__ts:
                     if tc and (tc.result.lower() in ['failure', 'error']):
                         if tc.classname and tc.methodname:
                             key = tc.classname + "." + tc.methodname
                             self.__parsedTCResultDict[key] = [tc.result,
                                                               MiscHandler.compressString(
                                                                   tc.message),
                                                               MiscHandler.compressString(tc.trace)]
         if self.__parsedTCResultDict and len(self.__parsedTCResultDict) == 0:
             self.__logger.debug(
                 "\n======No Failed or Error Cases under : %s====" %
                 str(self.__xunitOutFolder))
             return Codes.FAILED
         self.__logger.debug(
             "==== Total Failed and Error Cases:%s ====" % len(
                 self.__parsedTCResultDict.keys()))
         return self.__parsedTCResultDict
     except Exception as e:
         self.__logger.debug(
             "\nParsing Xunit Test Output Failed : %s" %
             Codes.GetDetailExceptionInfo(e))
         return Codes.FAILED
Пример #27
0
def get_test_results(result_dir, only_failed):
    """
    Returns the list of the testcases in the given directory
    :param result_dir: The directory where the junit xml-s are sitting
    :param only_failed: Collect only failed results
    :return: the list of the failed test_cases
    """
    logging.debug("get_test_results(%s, %s)", result_dir, only_failed)

    result = []
    for root, dirs, files in os.walk(result_dir):
        for filename in files:
            logging.debug("Checking file: %s/%s", root, filename)
            if filename.endswith(".xml"):
                test_suite, test_result = xunitparser.parse(
                    open(os.path.join(root, filename)))
                for test_case in test_suite:
                    if not test_case.good or not only_failed:
                        logging.info("Test case data to process: %s %s",
                                     test_case.classname, test_case.methodname)
                        test_case.suite = test_suite
                        result.append(test_case)
    return result
Пример #28
0
def report_results(xml_filename):
    xunitparser = None
    try:
        import xunitparser
    except ImportError as e:
        sys.stderr.write(
            "xunitparser module not availble results report not avaliable\n")
        sys.stderr.write("Import error was: %s\n" % repr(e))

    if xunitparser is not None:
        ts, tr = xunitparser.parse(open(xml_filename))
        report = {
            'skipped': len(tr.skipped),
            'failed': len(tr.failures),
            'errored': len(tr.errors),
            'all': tr.testsRun
        }

        print('Test Report:', report)
        if report['failed'] or report['errored'] or not report['all']:
            return 1

        return 0
def main(artifact_dir: str, slack_hook: str, slack_channels: list, pipeline_type: str, job_name: str, job_url: str):
    filename = os.path.join(artifact_dir, JUNIT_TEST_RESULTS_FILE)
    _, tr = xunitparser.parse(open(filename))

    message = "*{}* BDD tests run.\n*{}* errors found.".format(tr.testsRun, len(tr.failures))
    # Add test report url
    message += "\n\nGo here for full test report: {}\n\n".format(job_url)

    if len(tr.failures) > 0:
        message += "\nIt failed on the following tests:\n"
        for failure in tr.failures:
            test_info = "{}".format(failure[0])
            # cuts the test_ part and the class name since it's generated by the runner
            test_info = test_info.split('test_')[1]
            test_info = test_info.split(" ")[0]
            # Splits between espace name and test name
            test_info = test_info.split('__')
            test_module = test_info[0]
            test_name = test_info[1]

            message += "*{} ({})*\n".format(test_name, test_module)

    job_status = (len(tr.failures) <= 0)
    send_slack_message(slack_hook, slack_channels, pipeline_type, "*Test Results for {}:*".format(job_name), job_status, message)
Пример #30
0
 def get_xunit_test_suite(self):
     with open(self.xunit_report) as f:
         ts, tr = xunitparser.parse(f)
         return ts, tr
Пример #31
0
def buildTestReport(templateEnv):
    unstableTests = []
    testResults = {}

    try:
        with open('UnstableTests.txt') as unstableFile:
            for line in unstableFile:
                unstableTests.append(line.strip())
    except:
        print("Was not able to open list of unstable tests")

    for kind, directory in [('base', './MasterUnitTests/'), ('test', './LatestUnitTests/')]:
        print("Scanning directory %s" % directory)
        for xunitFile in glob.iglob(directory + '*/nosetests-*.xml'):
            print("Opening file %s" % xunitFile)
            with open(xunitFile) as xf:
                ts, tr = xunitparser.parse(xf)
                for tc in ts:
                    testName = '%s:%s' % (tc.classname, tc.methodname)
                    if testName in testResults:
                        testResults[testName].update({kind: tc.result})
                    else:
                        testResults[testName] = {kind: tc.result}

    failed = False
    errorConditions = ['error', 'failure']

    newFailures = []
    unstableChanges = []
    okChanges = []
    added = []
    deleted = []

    for testName, testResult in sorted(testResults.items()):
        oldStatus = testResult.get('base', None)
        newStatus = testResult.get('test', None)
        if oldStatus and newStatus and testName in unstableTests:
            if oldStatus != newStatus:
                unstableChanges.append({'name': testName, 'new': newStatus, 'old': oldStatus})
        elif oldStatus and newStatus:
            if oldStatus != newStatus:
                if newStatus in errorConditions:
                    failed = True
                    newFailures.append({'name': testName, 'new': newStatus, 'old': oldStatus})
                else:
                    okChanges.append({'name': testName, 'new': newStatus, 'old': oldStatus})
        elif newStatus:
            added.append({'name': testName, 'new': newStatus, 'old': oldStatus})
            if newStatus in errorConditions:
                failed = True
        elif oldStatus:
            deleted.append({'name': testName, 'new': newStatus, 'old': oldStatus})

    changed = newFailures or added or deleted or unstableChanges or okChanges
    stableChanged = newFailures or added or deleted or okChanges

    unitTestSummaryTemplate = templateEnv.get_template(unitTestSummaryFile)
    unitTestSummaryHTML = unitTestSummaryTemplate.render({'newFailures': newFailures,
                                                          'added': added,
                                                          'deleted': deleted,
                                                          'unstableChanges': unstableChanges,
                                                          'okChanges': okChanges,
                                                          'errorConditions': errorConditions,
                                                          })

    unitTestSummary = {'newFailures': len(newFailures), 'added': len(added), 'deleted': len(deleted),
                       'okChanges': len(okChanges), 'unstableChanges': len(unstableChanges)}
    print("Unit Test summary %s" % unitTestSummary)
    return failed, unitTestSummaryHTML, unitTestSummary
Пример #32
0
unstableTests = []

try:
    with open ('code/test/etc/UnstableTests.txt') as unstableFile:
        for line in unstableFile:
            unstableTests.append(line.strip())
except:
    print("Was not able to open list of unstable tests")

# Parse all the various nose xunit test reports looking for changes

for kind, directory in [('base', './MasterUnitTests/'), ('test', './LatestUnitTests/')]:
    for xunitFile in glob.iglob(directory + '*/nosetests-*.xml'):

        ts, tr = xunitparser.parse(open(xunitFile))
        for tc in ts:
            testName = '%s:%s' % (tc.classname, tc.methodname)
            if testName in testResults:
                testResults[testName].update({kind: tc.result})
            else:
                testResults[testName] = {kind: tc.result}

# Generate a Github report of any changes found

issueID = None

if 'ghprbPullId' in os.environ:
    issueID = os.environ['ghprbPullId']
    mode = 'PR'
elif 'TargetIssueID' in os.environ:
Пример #33
0
	    no = 1	
            result_colors = {
                       'success':    'lime',
                       'failure':    'red',
                       'error':      'yellow',
                       'skipped':    'grey',
                }
            #import pdb;pdb.set_trace()
            if self.__xunitOutFolder:
                if os.path.isdir(self.__xunitOutFolder):
		    for items in os.listdir(self.__xunitOutFolder):
                        if os.path.isfile(self.__xunitOutFolder + "/" + items) and items.startswith("test") and items.endswith("xml"):
                            test_suites.append(items)
                for files in test_suites:
                    with open(self.__xunitOutFolder + "/" + files) as f:
                        self.__ts, self.__tr = parse(self.__xunitOutFolder + "/" + files)
                    suite_file = files.split('.xml')[0]
                    suite_file_path = self.__tcRunPath + "/" + suite_file
                    os.mkdir(suite_file_path)
                    if (self.deCompress(suite_file_path + ".zip", suite_file_path) == Codes.SUCCESS):
                        for root, dirs, files in os.walk(suite_file_path):
                            if 'runinfo.txt' in files:
                                run_info = suite_file_path + "/" + "runinfo.txt"
                                break
                            else:
                                return Codes.FAILED
                    pattern = 'TestCaseName:.*Result'
                    run_times = self.grepStartEndTimes(pattern, run_info)
                    #if (run_times == Codes.FAILED):
                    #    print "\nFailed to get the start and end times from runinfo.txt"
                    #     return Codes.FAILED
Пример #34
0
pb = PlayBook(playbook='run_tests.yml', inventory=inv, 
  stats=stats, runner_callbacks=runner_cb, callbacks=playbook_cb, 
  extra_vars=extra_vars,
  remote_user=remote_user,
  private_key_file=private_key_file)
pb.run()


os.system('find %s -name *tar.gz -exec tar -C %s -xzf {} \;' % (build_dir, build_dir))

failed = []
skipped = []
executed = 0
tests_durations = {}
for report in junit_reports(build_dir):
    ts, tr = xunitparser.parse(open(report))
    skipped += filter(lambda tc: tc.skipped, ts)
    failed += filter(lambda tc: not tc.good, ts)
    executed += len([tc for tc in ts])
    name = ts.name[ts.name.rindex('.') + 1:]
    tests_durations[name] = tr.time.total_seconds()

balancer = new_balancer()
balancer.update_stats(tests_durations)

print "Tests run: %s\tFailures: %s\tSkipped: %s" % (executed, len(failed), len(skipped))

if failed:
    print "\nFailed tests:"
    for tc in failed:
        print "  " + tc.methodname + "(" + tc.classname + ")"
Пример #35
0
 def load_xunit_results(self):
     if self.xunitFile != None and os.path.isfile(self.xunitFile):
         if xunitparser != None:
             with open(self.xunitFile, 'r') as xunit:
                 self.test_suite, self.test_result = xunitparser.parse(xunit)
Пример #36
0
try:
    with open('code/test/etc/UnstableTests.txt') as unstableFile:
        for line in unstableFile:
            unstableTests.append(line.strip())
except:
    print("Was not able to open list of unstable tests")

# Parse all the various nose xunit test reports looking for changes
filePattern = '*/nosetests-*.xml'
if len(sys.argv) == 2:
    filePattern = "*/%s-*.xml" % sys.argv[1]
for kind, directory in [('base', './MasterUnitTests/'),
                        ('test', './LatestUnitTests/')]:
    for xunitFile in glob.iglob(directory + filePattern):

        ts, tr = xunitparser.parse(open(xunitFile))
        for tc in ts:
            testName = '%s:%s' % (tc.classname, tc.methodname)
            if testName in testResults:
                testResults[testName].update({kind: tc.result})
            else:
                testResults[testName] = {kind: tc.result}

# Generate a Github report of any changes found

issueID, mode = None, None

if 'ghprbPullId' in os.environ:
    issueID = os.environ['ghprbPullId']
    mode = 'PR'
elif 'TargetIssueID' in os.environ:
Пример #37
0
def buildTestReport(templateEnv):
    print("Evaluating base/test unit tests report files")
    unstableTests = []
    testResults = {}

    try:
        with open('UnstableTests.txt') as unstableFile:
            for line in unstableFile:
                unstableTests.append(line.strip())
    except:
        print("Was not able to open list of unstable tests")

    for kind, directory in [('base', './MasterUnitTests/'),
                            ('test', './LatestUnitTests/')]:
        print("Scanning directory %s" % directory)
        for xunitFile in glob.iglob(directory + '*/nosetests-*.xml'):
            print("Opening file %s" % xunitFile)
            with open(xunitFile) as xf:
                ts, tr = xunitparser.parse(xf)
                for tc in ts:
                    testName = '%s:%s' % (tc.classname, tc.methodname)
                    if testName in testResults:
                        testResults[testName].update({kind: tc.result})
                    else:
                        testResults[testName] = {kind: tc.result}

    failed = False
    errorConditions = ['error', 'failure']

    newFailures = []
    unstableChanges = []
    okChanges = []
    added = []
    deleted = []

    for testName, testResult in sorted(testResults.items()):
        oldStatus = testResult.get('base', None)
        newStatus = testResult.get('test', None)
        if oldStatus and newStatus and testName in unstableTests:
            if oldStatus != newStatus:
                unstableChanges.append({
                    'name': testName,
                    'new': newStatus,
                    'old': oldStatus
                })
        elif oldStatus and newStatus:
            if oldStatus != newStatus:
                if newStatus in errorConditions:
                    failed = True
                    newFailures.append({
                        'name': testName,
                        'new': newStatus,
                        'old': oldStatus
                    })
                else:
                    okChanges.append({
                        'name': testName,
                        'new': newStatus,
                        'old': oldStatus
                    })
        elif newStatus:
            added.append({
                'name': testName,
                'new': newStatus,
                'old': oldStatus
            })
            if newStatus in errorConditions:
                failed = True
        elif oldStatus:
            deleted.append({
                'name': testName,
                'new': newStatus,
                'old': oldStatus
            })

    unitTestSummaryTemplate = templateEnv.get_template(unitTestSummaryFile)
    unitTestSummaryHTML = unitTestSummaryTemplate.render({
        'newFailures':
        newFailures,
        'added':
        added,
        'deleted':
        deleted,
        'unstableChanges':
        unstableChanges,
        'okChanges':
        okChanges,
        'errorConditions':
        errorConditions,
    })

    unitTestSummary = {
        'newFailures': len(newFailures),
        'added': len(added),
        'deleted': len(deleted),
        'okChanges': len(okChanges),
        'unstableChanges': len(unstableChanges)
    }
    print("Unit Test summary %s" % unitTestSummary)
    return failed, unitTestSummaryHTML, unitTestSummary
Пример #38
0
import xunitparser


ts, tr = xunitparser.parse(open("report.xml"))

for tc in ts:
    print(tc.classname, tc.methodname )

Пример #39
0
 def __get_reports__(self):
     files = [f for f in os.listdir(self.path) if os.path.isfile(os.path.join(self.path,f)) ]
     return [xunitparser.parse(open(self.path + f))[0] for f in files if re.match(".*xml$", f)] 
Пример #40
0
def buildUnitTestReport(templateEnv, pyName="Python2"):
    """
    Builds the python2/python3 unit test report
    :param templateEnv: string with the name of the jinja template
    :param pyName: string with either a Python2 or Python3 value
    :return:
    """
    if pyName not in ("Python2", "Python3"):
        print("Actually, you passed an invalid python name argument!")
        raise RuntimeError()

    print("Evaluating base/test {} unit tests report files".format(pyName))
    unstableTests = []
    testResults = {}

    try:
        with open('UnstableTests.txt') as unstableFile:
            for line in unstableFile:
                unstableTests.append(line.strip())
    except:
        print("Was not able to open list of unstable tests")

    filePattern = '*/nosetests-*.xml' if pyName == "Python2" else '*/nosetestspy3-*.xml'
    for kind, directory in [('base', './MasterUnitTests/'),
                            ('test', './LatestUnitTests/')]:
        print("Scanning directory %s" % directory)
        for xunitFile in glob.iglob(directory + filePattern):
            print("Opening file %s" % xunitFile)
            with open(xunitFile) as xf:
                ts, tr = xunitparser.parse(xf)
                for tc in ts:
                    testName = '%s:%s' % (tc.classname, tc.methodname)
                    if testName in testResults:
                        testResults[testName].update({kind: tc.result})
                    else:
                        testResults[testName] = {kind: tc.result}
    if not testResults:
        print("No unit test results found!")
        raise RuntimeError()

    failed = False
    errorConditions = ['error', 'failure']

    newFailures = []
    unstableChanges = []
    okChanges = []
    added = []
    deleted = []

    for testName, testResult in sorted(testResults.items()):
        oldStatus = testResult.get('base', None)
        newStatus = testResult.get('test', None)
        if oldStatus and newStatus and testName in unstableTests:
            if oldStatus != newStatus:
                unstableChanges.append({
                    'name': testName,
                    'new': newStatus,
                    'old': oldStatus
                })
        elif oldStatus and newStatus:
            if oldStatus != newStatus:
                if newStatus in errorConditions:
                    failed = True
                    newFailures.append({
                        'name': testName,
                        'new': newStatus,
                        'old': oldStatus
                    })
                else:
                    okChanges.append({
                        'name': testName,
                        'new': newStatus,
                        'old': oldStatus
                    })
        elif newStatus:
            added.append({
                'name': testName,
                'new': newStatus,
                'old': oldStatus
            })
            if newStatus in errorConditions:
                failed = True
        elif oldStatus:
            deleted.append({
                'name': testName,
                'new': newStatus,
                'old': oldStatus
            })

    unitTestSummaryTemplate = templateEnv.get_template(unitTestSummaryFile)
    unitTestSummaryHTML = unitTestSummaryTemplate.render({
        'whichPython':
        pyName,
        'newFailures':
        newFailures,
        'added':
        added,
        'deleted':
        deleted,
        'unstableChanges':
        unstableChanges,
        'okChanges':
        okChanges,
        'errorConditions':
        errorConditions,
    })

    unitTestSummary = {
        'newFailures': len(newFailures),
        'added': len(added),
        'deleted': len(deleted),
        'okChanges': len(okChanges),
        'unstableChanges': len(unstableChanges)
    }
    print("{} Unit Test summary {}".format(pyName, unitTestSummary))
    return failed, unitTestSummaryHTML, unitTestSummary
Пример #41
0
#!/usr/bin/env python
import sys
import re
import xunitparser
import pprint

ts, tr = xunitparser.parse(open(sys.argv[1]))

for tc in ts:
    if not tc.good:
        msg = 'FAILED'
        if tc.trace:
            for line in tc.trace.split('\n'):
                if tc.classname in line:
                    filename, lineno = re.match('.*\((.*):(.*)\)',line).group(1,2)
                    break
            
        print('"%(filename)s", line %(lineno)s: %(message)s' % {
            'filename': filename,
            'lineno': lineno,
            'message': tc.message
            })
    
Пример #42
0
from __future__ import print_function

import sys
import os
import os.path as path
import xunitparser

directory = sys.argv[1]
failure_count = 0
test_count = 0

for filename in os.listdir(directory):
    if filename.endswith('.xml') and not filename.endswith('TestSuites.xml'):
        with open(path.join(directory, filename)) as f:
            suite, tr = xunitparser.parse(f)
            test_count += len(list(suite))

            failures = [testcase for testcase in suite if not testcase.good]

            for testcase in failures:
                print('%s: Class %s, method %s' % (testcase.result.upper(), testcase.classname, testcase.methodname))
                print(testcase.trace)

            failure_count += len(failures)


print(test_count, "tests were run")

if failure_count:
    print(failure_count, 'TESTS FAILED')
Пример #43
0
 def setUp(self):
     with open(self.FILENAME) as f:
         # the lib already does some sanity checks;
         # passing this is already a good test in itself
         self.ts, self.tr = parse(f)
 def add_report_file(self, reportFile):
     suite, results = xunitparser.parse(reportFile)
     self._columnSuites[self._currentColumn].append(suite)
Пример #45
0
colors = {
	'red': u'\u001b[31m',
	'red': u'\u001b[31m',
	'green': u'\u001b[32m',
	'yellow': u'\u001b[33m',
	'reset': u'\u001b[0m',
}
characters = {
	'ellipsis': u'\u2026',
	'checkmark': u'\u2714',
	'exclamation': u'\u2757',
	'cancel': u'\u2717',
}

ts, tr = xunitparser.parse(open(sys.argv[1]))

summary = {
	'good': 0,
	'skip': 0,
	'fail': 0,
	'total': 0,
}

print("------------------------------")
print("")
for tc in ts:
	summary['total'] += 1
	if tc.success:
		result = colors['green'] + characters['checkmark']
		summary['good'] += 1
for project_directory in os.walk(directory).next()[1]:
    tests_path = path.join(directory, project_directory, 'build/test-results/test')

    if not path.isdir(tests_path):
        # print('Ignoring %s as it does not exist' % tests_path)
        continue

    print('Processing %s' % tests_path)

    test_count = 0
    failure_count = 0

    for entry in os.walk(tests_path).next()[2]:
        if entry.endswith('.xml') and not entry.endswith('TestSuites.xml'):
            with open(path.join(tests_path, entry)) as f:
                suite, tr = xunitparser.parse(f)

                failures = [testcase for testcase in suite if not testcase.good]

                for testcase in failures:
                    print(
                        '%s: Class %s, method %s' % (testcase.result.upper(), testcase.classname, testcase.methodname))
                    print(testcase.trace)

                test_count += len(list(suite))
                failure_count += len(failures)
        # else:
        #     print('Ignoring %s' % entry)

    print('Found %d tests with %d failures' % (test_count, failure_count))
Пример #47
0
        devices_info.json()['devices'][i]['serial']),
    print >> fp, '  NOS: {}'.format(devices_info.json()['devices'][i]['nos']),
    print >> fp, '  Software: {}'.format(
        devices_info.json()['devices'][i]['sw'])
print >> fp, ''

xml_files = [f for f in listdir(dst_xml_dir) if isfile(join(dst_xml_dir, f))]
xml_files.sort()

# print xml_files
print >> fp, '<<< Test Result >>>'

result = {'pass': 0, 'error': 0, 'failure': 0, 'check': 0}

for xml_file in xml_files:
    ts, tr = xunitparser.parse(open(dst_xml_dir + xml_file))

    if len(tr.errors) == 0 and len(tr.failures) == 0:
        test_result = 'PASS'
        result['pass'] = result['pass'] + 1
    elif len(tr.errors) != 0:
        test_result = 'ERROR'
        result['error'] = result['error'] + 1
    elif len(tr.failures) != 0:
        test_result = 'FAILURE'
        result['failure'] = result['failure'] + 1
    else:
        test_result = 'CHECK'
        result['pass'] = result['check'] + 1

    for tc in ts:
Пример #48
0
 def load_xunit_results(self):
     if self.xunitFile != None and os.path.isfile(self.xunitFile):
         if xunitparser != None:
             with open(self.xunitFile, 'r') as xunit:
                 self.test_suite, self.test_result = xunitparser.parse(
                     xunit)
Пример #49
0
    except Exception, e:
        print 'Error %s' % e
        return None
    else:
        if len(output.splitlines()) > 1:
            return None

        print output
        return output.strip()

output = file('disable.txt', 'w')

for fname in os.listdir('.'):
    if fname.endswith('.xml'):
        try:
            ts, tr = xunitparser.parse(open(fname))
            for test in ts._tests:
                if not test.success:
                    test_file = '/'.join(test.classname.split('.')[:-1]) + '.py'
                    test_method = test.methodname

                    if '/' not in test_file:
                        full_file = get_full_path_for_file(test_file)
                        if full_file:
                            test_file = full_file

                    output.write('%s,%s\n' % (test_file, test_method))
        except Exception, e:
            print 'Failed %s %s' % (fname, e)
        else:
            print 'Processed %s' % fname
Пример #50
0
 def setUp(self):
     with open(os.path.join('tests', self.FILENAME)) as f:
         # the lib already does some sanity checks;
         # passing this is already a good test in itself
         self.ts, self.tr = parse(f)
Пример #51
0
 def run_repo_suite(self):
     subprocess.check_call(["nosetests", "--with-xunit", "--with-duvetcover", "--duvet-skip"])
     result = xunitparser.parse(open("nosetests.xml"))
     return sum(1 for tc in result[0] if tc.good)