Exemple #1
0
def write_output(target: str, results: list, min_cvss: int) -> None:
    """Write scan results in junitxml format"""

    suite = TestSuite(f"{target}")

    no_vulns: List = [
        {
            "Results": "No vulnerabilities."
        },
        {
            "Results": f"No vulnerabilities >= the min CVSS score {min_cvss}."
        },
    ]

    for result in results:
        if result not in no_vulns:
            test_case = TestCase(result["Vulnerable Library"])
            test_case.name = (result["Vulnerable Library"] + " - " +
                              result["Vulnerability"] + " - " + "CVSS " +
                              str(result["CVSS"]))
            test_case.result = [Failure(result)]
        else:
            test_case = TestCase("No vulnerabilities")
            test_case.result = result

        suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write("test-output.xml")
Exemple #2
0
 def test_write_noarg(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = 'case1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     with self.assertRaises(JUnitXmlError):
         result.write()
def main():
    args = parse_args()


    github_token = ''
    gh = None
    if args.github:
        github_token = os.environ['GH_TOKEN']
        gh = Github(github_token)

    if args.status and args.sha != None and args.repo and gh:
        set_status(gh, args.repo, args.sha)
        sys.exit(0)

    if not args.commits:
        sys.exit(1)

    suite = TestSuite("Compliance")
    docs = {}
    for Test in ComplianceTest.__subclasses__():
        t = Test(suite, args.commits)
        t.run()
        suite.add_testcase(t.case)
        docs[t.case.name] = t._doc

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write('compliance.xml')

    if args.github:
        repo = gh.get_repo(args.repo)
        pr = repo.get_pull(int(args.pull_request))
        commit = repo.get_commit(args.sha)

        comment = "Found the following issues, please fix and resubmit:\n\n"
        comment_count = 0
        print("Processing results...")
        for case in suite:
            if case.result and case.result.type != 'skipped':
                comment_count += 1
                comment += ("## {}\n".format(case.result.message))
                comment += "\n"
                if case.name not in ['Gitlint', 'Identity/Emails', 'License']:
                    comment += "```\n"
                comment += ("{}\n".format(case.result._elem.text))
                if case.name not in ['Gitlint', 'Identity/Emails', 'License']:
                    comment += "```\n"

                commit.create_status('failure',
                                     docs[case.name],
                                     'Verification failed',
                                     '{}'.format(case.name))
def generate_junitxml_merged_report(test_results_dir):
    """
    Merge all junitxml generated reports in a single one.
    :param test_results_dir: output dir containing the junitxml reports to merge.
    """
    merged_xml = JUnitXml()
    for dir, _, files in os.walk(test_results_dir):
        for file in files:
            if file.endswith("results.xml"):
                merged_xml += JUnitXml.fromfile(os.path.join(dir, file))

    merged_xml.write("{0}/test_report.xml".format(test_results_dir), pretty=True)
Exemple #5
0
def gen_results_summary(results_dir,
                        output_fn=None,
                        merge_fn=None,
                        verbose=False,
                        print_section=False,
                        results_file='results.xml'):
    """Scan a results directory and generate a summary file"""
    reports = []
    combined = JUnitXml()
    nr_files = 0
    out_f = sys.stdout

    for filename in get_results(results_dir, results_file):
        reports.append(JUnitXml.fromfile(filename))

    if len(reports) == 0:
        return 0

    if output_fn is not None:
        out_f = open(output_fn, "w")

    props = copy.deepcopy(reports[0].child(Properties))

    ltm = check_for_ltm(results_dir, props)

    print_header(out_f, props)

    sort_by = lambda ts: parse_timestamp(ts.timestamp)
    if ltm:
        sort_by = lambda ts: ts.hostname

    if total_tests(reports) < 30:
        verbose = True

    for testsuite in sorted(reports, key=sort_by):
        print_summary(out_f, testsuite, verbose, print_section)
        combined.add_testsuite(testsuite)
        nr_files += 1

    out_f.write('Totals: %d tests, %d skipped, %d failures, %d errors, %ds\n' \
                % sum_testsuites(reports))

    print_trailer(out_f, props)

    if merge_fn is not None:
        combined.update_statistics()
        combined.write(merge_fn + '.new')
        if os.path.exists(merge_fn):
            os.rename(merge_fn, merge_fn + '.bak')
        os.rename(merge_fn + '.new', merge_fn)

    return nr_files
Exemple #6
0
 def test_read_written_xml(self):
     suite1 = TestSuite()
     suite1.name = "suite1"
     case1 = TestCase()
     case1.name = "用例1"
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     xml = JUnitXml.fromfile(self.tmp)
     suite = next(iter(xml))
     case = next(iter(suite))
     self.assertEqual(case.name, "用例1")
Exemple #7
0
 def test_write_nonascii(self):
     suite1 = TestSuite()
     suite1.name = "suite1"
     case1 = TestCase()
     case1.name = "用例1"
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp, encoding="utf-8") as f:
         text = f.read()
     self.assertIn("suite1", text)
     self.assertIn("用例1", text)
Exemple #8
0
 def test_write_pretty(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = '用例1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp, pretty=True)
     xml = JUnitXml.fromfile(self.tmp)
     suite = next(iter(xml))
     case = next(iter(suite))
     self.assertEqual(case.name, '用例1')
Exemple #9
0
 def test_write(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = 'case1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp) as f:
         text = f.read()
     self.assertIn('suite1', text)
     self.assertIn('case1', text)
Exemple #10
0
 def test_write_nonascii(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = '用例1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp, encoding='utf-8') as f:
         text = f.read()
     self.assertIn('suite1', text)
     self.assertIn('用例1', text)
Exemple #11
0
def write_output(target, results) -> None:
    """Write scan results in junitxml format"""

    test_case = TestCase(f"{target}")
    test_case.name = f"{target}"
    if results["Results"] != ["No SSL/TLS Violations found."]:
        test_case.result = [Failure(results)]
    else:
        test_case.result = results

    suite = TestSuite("SSLChecker")
    suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write("test-output.xml")
def merge_xml_files(test_file_pattern):
    xml_data = JUnitXml()
    staging_dir = os.environ['BUILD_ARTIFACTSTAGINGDIRECTORY']

    for test_file in glob.glob(test_file_pattern):
        xml_data += JUnitXml.fromfile(test_file)
        # Move file to harvest dir to save state and not publish the same test twice
        shutil.move(
            test_file,
            os.path.join(staging_dir, "harvest", os.path.basename(test_file)))

    if xml_data.tests > 0:
        # Merge all files into a single file for cleaner output
        output_file_name = f"test-results-{os.environ['SCENARIONAME']}-{os.environ['DISTRONAME']}.xml"
        xml_data.write(os.path.join(staging_dir, output_file_name))
    else:
        logger.info(f"No test files found for pattern: {test_file_pattern}")
Exemple #13
0
    def build(self) -> None:
        self.mainsuite = TestSuite("Drive")

        self.process_xml(self.folder / "final.xml")

        for idx in (2, 1):
            # First add the results from the reruns (suffixed with "2")
            # then the first runs, to add successes before failures.
            for results in Path(self.folder).glob(f"**/*.{idx}.xml"):
                self.process_xml(results)

        print("End of processing")
        print_suite(self.mainsuite)

        xml = JUnitXml()
        xml.add_testsuite(self.mainsuite)
        xml.write(self.folder / self.output)
Exemple #14
0
    def build(self) -> None:
        test_suite = os.getenv("TEST_SUITE", "Project")
        self.mainsuite = TestSuite(test_suite)

        # Aggregate all reports in reverse order:
        # this is important for projects using "rerun" mechanism and where
        # reports are numbered so that report-2.xml should be processed
        # before report-1.xml in order to add successes before failures.
        for report in sorted(self.folder.glob("**/*.xml"), reverse=True):
            # Skip the final report, if present
            if report.name == self.output:
                continue
            self.process_xml(report)

        print("End of processing")
        print_suite(self.mainsuite)

        xml = JUnitXml()
        xml.add_testsuite(self.mainsuite)
        xml.write(self.folder / self.output)
def write_output(target: str, results: list) -> None:
    """ Write scan results in junitxml format """

    suite = TestSuite(f"{target}")

    for result in results:
        if result != {"Results": "No vulnerabilities."}:
            test_case = TestCase(result["Vulnerable Library"])
            test_case.name = (result["Vulnerable Library"] + " - "\
                 + result["Vulnerability"] + " - "\
                 + "CVSS " + str(result["CVSS"]))
            test_case.result = [Failure(result)]
        else:
            test_case = TestCase("No vulnerabilities")
            test_case.result = result

        suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('test-output.xml')
def parse_junit(junit_dir):
    test_suite = TestSuite("Combined TestSuite")
    for junit_xml in glob.glob(os.path.join(junit_dir, "junit_*.xml")):
        if "junit_runner.xml" not in junit_xml:
            parsed = JUnitXml.fromfile(junit_xml)
            for testcase in parsed:
                if isinstance(testcase, TestSuite) or isinstance(
                        testcase.result, Skipped):
                    continue
                test_suite.add_testcase(testcase)
        os.remove(junit_xml)

    xml = JUnitXml()
    xml.add_testsuite(test_suite)
    xml.write(os.path.join(junit_dir, "junit_combined.xml"))
    xml.update_statistics()

    test_failure_rate = 0
    if xml.tests != 0:
        test_failure_rate = int(
            math.ceil(((xml.failures + xml.errors) * 100) / xml.tests))

    return utils.generate_payload(CANARY_TEST_FAILURE_RATE, test_failure_rate)
Exemple #17
0
def create_xunit_results(suite_name, test_cases, test_run_metadata):
    """Create an xUnit result file for the test suite's executed test cases.

    Args:
        suite_name: the test suite name
        test_cases: the test cases objects
        test_run_metadata: test run meta information in dict

    Returns: None
    """
    _file = suite_name.split("/")[-1].split(".")[0]
    run_dir = test_run_metadata["log-dir"]
    run_id = test_run_metadata["run-id"]
    xml_file = f"{run_dir}/xunit.xml"
    ceph_version = test_run_metadata["ceph-version"]
    ansible_version = test_run_metadata["ceph-ansible-version"]
    distribution = test_run_metadata["distro"]
    build = test_run_metadata["build"]
    test_run_id = f"RHCS-{build}-{_file}-{run_id}".replace(".", "-")
    test_group_id = (
        f"ceph-build: {ceph_version} "
        f"ansible-build: {ansible_version} OS distro: {distribution}")
    log.info(f"Creating xUnit {_file} for test run-id {test_run_id}")

    suite = TestSuite(_file)
    for k, v in test_run_metadata.items():
        suite.add_property(k, f" {v}" if v else " --NA--")

    for tc in test_cases:
        test_name = tc["name"]
        pol_ids = tc.get("polarion-id")
        test_status = tc["status"]
        elapsed_time = tc.get("duration")

        if pol_ids:
            _ids = pol_ids.split(",")
            for _id in _ids:
                suite.add_testcase(
                    generate_test_case(
                        test_name,
                        elapsed_time,
                        test_status,
                        polarion_id=_id,
                    ))
        else:
            suite.add_testcase(
                generate_test_case(
                    test_name,
                    elapsed_time,
                    test_status,
                ))

    suite.update_statistics()

    xml = JUnitXml()
    props = Properties()
    props.append(Property(name="polarion-project-id", value="CEPH"))
    props.append(Property(name="polarion-testrun-id", value=test_run_id))
    props.append(Property(name="polarion-group-id", value=test_group_id))
    xml.append(props)
    xml.add_testsuite(suite)
    xml.write(xml_file, pretty=True)

    log.info(f"xUnit result file created: {xml_file}")
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error

# Create cases
case1 = TestCase('case1')
case1.result = Skipped()
case2 = TestCase('case2')
case2.result = Error('Example error message', 'the_error_type')

# Create suite and add cases
suite = TestSuite('suite1')
suite.add_property('build', '55')
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case2)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)
xml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')
def _main(args):
    # The "real" main(), which is wrapped to catch exceptions and report them
    # to GitHub. Returns the number of test failures.

    init_logs(args.loglevel)

    if args.list:
        for testcase in ComplianceTest.__subclasses__():
            print(testcase._name)
        return 0

    if args.status:
        set_pending()
        return 0

    if not args.commits:
        err("No commit range given")

    # Load saved test results from an earlier run, if requested
    if args.previous_run:
        if not os.path.exists(args.previous_run):
            # This probably means that an earlier pass had an internal error
            # (the script is currently run multiple times by the ci-pipelines
            # repo). Since that earlier pass might've posted an error to
            # GitHub, avoid generating a GitHub comment here, by avoiding
            # sys.exit() (which gets caught in main()).
            print("error: '{}' not found".format(args.previous_run),
                  file=sys.stderr)
            return 1

        logging.info("Loading previous results from " + args.previous_run)
        for loaded_suite in JUnitXml.fromfile(args.previous_run):
            suite = loaded_suite
            break
    else:
        suite = TestSuite("Compliance")

    for testcase in ComplianceTest.__subclasses__():
        test = testcase(suite, args.commits)
        if args.module:
            if test._name not in args.module:
                continue
        elif test._name in args.exclude_module:
            print("Skipping " + test._name)
            continue

        try:
            test.run()
        except EndTest:
            pass

        suite.add_testcase(test.case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write(args.output, pretty=True)

    failed_cases = []

    # TODO maybe: move all the github-related code to a different .py
    # file to draw a better line between developer code versus
    # infrastructure-specific code, in other words keep this file
    # 100% testable and maintainable by non-admins developers.
    if args.github:
        n_fails = report_test_results_to_github(suite)
    else:
        for case in suite:
            if case.result:
                if case.result.type == 'skipped':
                    logging.warning("Skipped %s, %s", case.name, case.result.message)
                else:
                    failed_cases.append(case)
            else:
                # Some checks like codeowners can produce no .result
                logging.info("No JUnit result for %s", case.name)

        n_fails = len(failed_cases)
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--apikey",
                        required=True,
                        default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a")
    parser.add_argument("--port", type=int, default=4723)
    parsed_args = parser.parse_args(args)
    client_site_url = parsed_args.url
    if not client_site_url.endswith("/"):
        client_site_url = client_site_url + "/"
    apikey = parsed_args.apikey
    port = parsed_args.port
    s = socket.socket()
    try:
        s.bind(('localhost', port))
    except socket.error as err:
        if err.errno == 98:
            #Create Test Cases
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Test failed. Can not connect because port is actually used',
                err)
            #Create Test Suite
            suite = TestSuite('Suite1')
            suite.name = 'Test suite 1'
            suite.add_testcase(case1)
            #Add info into JunitXml
            xml = JUnitXml()
            xml.add_testsuite(suite)
            xml.write('junit_test.xml')
            sys.exit(
                "Port {port} is already in use.\n"
                "Is there another instance of {process} already running?\n"
                "To run multiple instances of {process} at once use the "
                "--port <num> option.".format(port=port, process=sys.argv[0]))
        else:
            raise
    try:
        response = requests.get(client_site_url,
                                headers=dict(Authorization=apikey))
    except requests.exceptions.RequestException as err:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test the connection to client_site_url'
        case1.result = Failure(
            'Test failed. Cannot connect to the client_site_url', err)
        #Create Test Suite
        suite = TestSuite('Suite1')
        suite.name = 'Test suite 1'
        suite.add_testcase(case1)
        #Add info into JunitXml
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write('junit_test.xml')
        sys.exit(
            "The client could not connect with the client site due to {error}".
            format(error=err))
    success, response = get_resources_to_check(client_site_url, apikey)
    data = response.json()
    if success:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test for get_resources'
        case1.result = Skipped(
            'Test passed successfully with 50 resources obtained')
    else:
        #Create Test Cases
        if not response.ok:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Client could not get the list with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Error(
                'Client could not get the list correctly, it only have got {0} resources'
                .format(len(data)), 'error_list')
    resource_id = data[0]
    success, response = get_url_for_id(client_site_url, apikey, resource_id)
    if success:
        #Create Test Cases
        case2 = TestCase('Test2')
        case2.name = 'Test for get_url_for_resource_id'
        case2.result = Skipped(
            'Test passed successfully with the url obtained correctly')
    else:
        #Create Test Cases
        if not response.ok:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Failure(
                'Client could not get the url for the resource with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Error('Client could not get the url correctly',
                                 'the_error_type')
    #Create Test Suite
    suite = TestSuite('Suite1')
    suite.name = 'Test suite 1'
    suite.add_testcase(case1)
    suite.add_testcase(case2)
    #Add info into JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('junit_test.xml')
Exemple #21
0
from junitparser import JUnitXml, Element, Attr, TestCase
import argparse
import glob, os


class ClassNameTestCase(TestCase):
    classname = Attr('classname')


parser = argparse.ArgumentParser()
parser.add_argument("search_path")
parser.add_argument("output_name")
args = parser.parse_args()

xml = JUnitXml()
for file in glob.glob(args.search_path + "/**/*.xml", recursive=True):
    xml += JUnitXml.fromfile(file)

for suite in xml:
    for case in suite:
        classname_case = ClassNameTestCase.fromelem(case)
        if classname_case.name == 'test_all':
            classname_case.name = classname_case.classname
xml.write(args.output_name)
Exemple #22
0
    if proc1_found_proc2 and proc2_found_proc1:
        discovery_case.result = Success()
    else:
        discovery_case.result = Failure(message='discovery error')
except Exception:
    if proc1.poll() == None:
        proc1.kill()
        proc1.communicate()
    if proc2.poll() == None:
        proc2.kill()
        proc2.communicate()
    # Set case results
    program_starting_case.result = Failure(message='program crash error')
    discovery_case.result = Failure(message='discovery error') 

# Create suite and add cases
suite = TestSuite('testsuite')
#suite.add_property('build', '55')
suite.add_testcase(program_starting_case)
suite.add_testcase(discovery_case)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)

test_reporst_dir = os.path.join('tests', 'test-reports')
if not os.path.exists(test_reporst_dir):
    os.makedirs(test_reporst_dir)

xml.write(os.path.join(test_reporst_dir, 'result.xml'), pretty=True)
Exemple #23
0
    
    
        owd: str = os.getcwd()
        build_dir: str = os.path.dirname(build_script_path)
        logger.info(f'Changing to build_dir[{build_dir}]')
        os.chdir(build_dir)
        start = datetime.utcnow()
        logger.info(f'Running Build for Notebook[{notebook_name}]')
        try:
            run_command([f'bash build.sh {ARTIFACT_HTML_DIR}'])
        except BuildError:
            raise BuildError(f'Unable to execute notebook[{notebook_name}]')

        delta = datetime.utcnow() - start
        logger.info(f'Changing back to old working dir[{owd}]')
        os.chdir(owd)
        test_case = TestCase(f'{notebook_name} Test')
    
        TEST_CASES.append(test_case)
    
    test_suite = TestSuite(f'Notebooks Test Suite')
    [test_suite.add_testcase(case) for case in TEST_CASES]
    test_output_path: str = os.path.join(TEST_OUTPUT_DIR, f'results.xml')
    xml = JUnitXml()
    xml.add_testsuite(test_suite)
    xml.write(test_output_path)

if __name__ in ['__main__']:
    main()

 def __generate_report(self, test_file_path):
     xml_junit = JUnitXml()
     xml_junit.add_testsuite(self.__test_suite)
     xml_junit.write(filepath=test_file_path, pretty=True)
Exemple #25
0
import csv
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Failure


def convert_csv_to_junit(csv_filename, junit_filename):
    suite = TestSuite('Gadgetron Integration')
    with open(csv_filename) as csv_file:
        statsreader = csv.DictReader(csv_file)
        for row in statsreader:
            case = TestCase(name=row['test'], time=row['processing_time'])
            if row['status'] != "Passed":
                case.result = [Failure()]
            suite.add_testcase(case)
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write(str(junit_filename))


def main():
    parser = argparse.ArgumentParser(
        description='Converts Gadgetron stats to jUNIT xml',
        formatter_class=argparse.ArgumentDefaultsHelpFormatter)
    parser.add_argument('-i',
                        '--input',
                        type=pathlib.Path,
                        help='Input CSV file')
    parser.add_argument('-o',
                        '--output',
                        type=pathlib.Path,
                        help='Output junit xml')
    args = parser.parse_args()
def main():
    """
    Main function

    :return:
    """

    args = parse_args()

    init_logs(args.loglevel)

    if args.list:
        for testcase in ComplianceTest.__subclasses__():
            test = testcase(None, "")
            print("{}".format(test._name))
        sys.exit(0)

    if args.status and args.sha is not None and args.repo:
        set_status(args.repo, args.sha)
        sys.exit(0)

    if not args.commits:
        print("No commit range given.")
        sys.exit(1)

    if args.previous_run and os.path.exists(args.previous_run) and args.module:
        junit_xml = JUnitXml.fromfile(args.previous_run)
        logging.info("Loaded previous results from %s", args.previous_run)
        for loaded_suite in junit_xml:
            suite = loaded_suite
            break

    else:
        suite = TestSuite("Compliance")

    docs = {}
    for testcase in ComplianceTest.__subclasses__():
        test = testcase(None, "")
        docs[test._name] = test._doc

    for testcase in ComplianceTest.__subclasses__():
        test = testcase(suite, args.commits)
        if args.module:
            if test._name in args.module:
                test.run()
                suite.add_testcase(test.case)
        else:
            if test._name in args.exclude_module:
                print("Skipping {}".format(test._name))
                continue
            test.run()
            suite.add_testcase(test.case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write(args.output)

    failed_cases = []

    # TODO maybe: move all the github-related code to a different .py
    # file to draw a better line between developer code versus
    # infrastructure-specific code, in other words keep this file
    # 100% testable and maintainable by non-admins developers.
    if args.github and 'GH_TOKEN' in os.environ:
        errors = report_to_github(args.repo, args.pull_request, args.sha,
                                  suite, docs)
    else:
        for case in suite:
            if case.result:
                if case.result.type == 'skipped':
                    logging.warning("Skipped %s, %s", case.name,
                                    case.result.message)
                else:
                    failed_cases.append(case)
            else:
                # Some checks like codeowners can produce no .result
                logging.info("No JUnit result for %s", case.name)

        errors = len(failed_cases)
Exemple #27
0
def _main(args):
    # The "real" main(), which is wrapped to catch exceptions and report them
    # to GitHub. Returns the number of test failures.

    # The absolute path of the top-level git directory. Initialize it here so
    # that issues running Git can be reported to GitHub.
    global GIT_TOP
    GIT_TOP = git("rev-parse", "--show-toplevel")

    # The commit range passed in --commit, e.g. "HEAD~3"
    global COMMIT_RANGE
    COMMIT_RANGE = args.commits

    init_logs(args.loglevel)

    if args.list:
        for testcase in ComplianceTest.__subclasses__():
            print(testcase.name)
        return 0

    # Load saved test results from an earlier run, if requested
    if args.previous_run:
        if not os.path.exists(args.previous_run):
            # This probably means that an earlier pass had an internal error
            # (the script is currently run multiple times by the ci-pipelines
            # repo). Since that earlier pass might've posted an error to
            # GitHub, avoid generating a GitHub comment here, by avoiding
            # sys.exit() (which gets caught in main()).
            print("error: '{}' not found".format(args.previous_run),
                  file=sys.stderr)
            return 1

        logging.info("Loading previous results from " + args.previous_run)
        for loaded_suite in JUnitXml.fromfile(args.previous_run):
            suite = loaded_suite
            break
    else:
        suite = TestSuite("Compliance")

    for testcase in ComplianceTest.__subclasses__():
        # "Modules" and "testcases" are the same thing. Better flags would have
        # been --tests and --exclude-tests or the like, but it's awkward to
        # change now.

        if args.module and testcase.name not in args.module:
            continue

        if testcase.name in args.exclude_module:
            print("Skipping " + testcase.name)
            continue

        test = testcase()
        try:
            print(
                f"Running {test.name:16} tests in "
                f"{GIT_TOP if test.path_hint == '<git-top>' else test.path_hint} ..."
            )
            test.run()
        except EndTest:
            pass

        suite.add_testcase(test.case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write(args.output, pretty=True)

    failed_cases = []
    name2doc = {
        testcase.name: testcase.doc
        for testcase in ComplianceTest.__subclasses__()
    }

    for case in suite:
        if case.result:
            if case.result.type == 'skipped':
                logging.warning("Skipped %s, %s", case.name,
                                case.result.message)
            else:
                failed_cases.append(case)
        else:
            # Some checks like codeowners can produce no .result
            logging.info("No JUnit result for %s", case.name)
Exemple #28
0
    Returns: None
    """
    _file = suite_name.split("/")[-1].strip(".yaml")
    xml_file = f"{run_dir}/xunit.xml"

    log.info(f"Creating xUnit result file for test suite: {_file}")

    suite = TestSuite(_file)
    for k, v in test_run_metadata.items():
        suite.add_property(k, v if v else "--NA--")

    for tc in test_cases:
        case = TestCase(tc["name"])
        elapsed = tc.get("duration")
        if isinstance(elapsed, timedelta):
            case.time = elapsed.total_seconds()
        else:
            case.time = 0.0

        if tc["status"] != "Pass":
            case.result = Failure("test failed")
        suite.add_testcase(case)

    suite.update_statistics()

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write(xml_file, pretty=True)

    log.info(f"xUnit result file created: {xml_file}")