예제 #1
0
def write_output(target: str, results: list, min_cvss: int) -> None:
    """Write scan results in junitxml format"""

    suite = TestSuite(f"{target}")

    no_vulns: List = [
        {
            "Results": "No vulnerabilities."
        },
        {
            "Results": f"No vulnerabilities >= the min CVSS score {min_cvss}."
        },
    ]

    for result in results:
        if result not in no_vulns:
            test_case = TestCase(result["Vulnerable Library"])
            test_case.name = (result["Vulnerable Library"] + " - " +
                              result["Vulnerability"] + " - " + "CVSS " +
                              str(result["CVSS"]))
            test_case.result = [Failure(result)]
        else:
            test_case = TestCase("No vulnerabilities")
            test_case.result = result

        suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write("test-output.xml")
예제 #2
0
 def test_add_suite(self):
     suite1 = TestSuite("suite1")
     suite2 = TestSuite("suite2")
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.add_testsuite(suite2)
     self.assertEqual(len(result), 2)
예제 #3
0
def add_junit_failure(xml: junitparser.JUnitXml, test: Path, message: str,
                      starttime: datetime.datetime):
    t = junitparser.TestCase(name=test.name)
    t.result = junitparser.Failure(message=str(message))
    t.time = (datetime.datetime.utcnow() - starttime).total_seconds()
    suite = junitparser.TestSuite(name=test.name)
    suite.add_testcase(t)
    xml.add_testsuite(suite)
예제 #4
0
 def test_iadd_same_suite(self):
     result1 = JUnitXml()
     suite1 = TestSuite()
     result1.add_testsuite(suite1)
     result2 = JUnitXml()
     suite2 = TestSuite()
     result2.add_testsuite(suite2)
     result1 += result2
     self.assertEqual(len(result1), 1)
예제 #5
0
 def test_xml_statistics(self):
     result1 = JUnitXml()
     suite1 = TestSuite()
     result1.add_testsuite(suite1)
     result2 = JUnitXml()
     suite2 = TestSuite()
     result2.add_testsuite(suite2)
     result3 = result1 + result2
     result3.update_statistics()
예제 #6
0
 def test_add(self):
     result1 = JUnitXml()
     suite1 = TestSuite("suite1")
     result1.add_testsuite(suite1)
     result2 = JUnitXml()
     suite2 = TestSuite("suite2")
     result2.add_testsuite(suite2)
     result3 = result1 + result2
     self.assertEqual(len(result3), 2)
예제 #7
0
 def test_write_noarg(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = 'case1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     with self.assertRaises(JUnitXmlError):
         result.write()
예제 #8
0
def compare_artifacts(dir1, dir2, run_name1, run_name2):
    """Compare artifacts from dir1 with di2 and store results in out_dir"""

    logger.info("Comparing artifacts from %s with %s", dir1, dir2)
    sub_dirs_1 = get_sub_dirs(dir1)

    over_all_pass = True
    aggregates = ["mean", "max", "min"]
    header = ["run_name1", "run_name2", "test_suite", "metric", "run1", "run2",
              "percentage_diff", "expected_diff", "result", "message"]
    rows = [header]

    reporter = JUnitXml()
    for sub_dir1 in sub_dirs_1:
        with Timer("Comparison test suite {} execution time".format(sub_dir1)) as t:
            comp_ts = CompareTestSuite(sub_dir1, run_name1 + " and " + run_name1, t)

            metrics_file1, metrics_file2 = get_log_file(dir1, sub_dir1), get_log_file(dir2, sub_dir1)
            if not (metrics_file1 and metrics_file2):
                msg = "Metrics monitoring logs are not captured for {} in either " \
                      "of the runs.".format(sub_dir1)
                logger.info(msg)
                rows.append([run_name1, run_name2, sub_dir1, "metrics_log_file_availability",
                             "NA", "NA", "NA", "NA", "pass", msg])
                comp_ts.add_test_case("metrics_log_file_availability", msg, "skip")
                continue

            metrics_from_file1 = pd.read_csv(metrics_file1)
            metrics_from_file2 = pd.read_csv(metrics_file2)
            metrics, diff_percents = taurus_reader.get_compare_metric_list(dir1, sub_dir1)

            for col, diff_percent in zip(metrics, diff_percents):
                for agg_func in aggregates:
                    name = "{}_{}".format(agg_func, str(col))

                    val1 = get_aggregate_val(metrics_from_file1, agg_func, col)
                    val2 = get_aggregate_val(metrics_from_file2, agg_func, col)

                    diff, pass_fail, msg = compare_values(val1, val2, diff_percent, run_name1, run_name2)

                    if over_all_pass:
                        over_all_pass = pass_fail == "pass"

                    result_row = [run_name1, run_name2, sub_dir1, name, val1, val2,
                                  diff, diff_percent, pass_fail, msg]
                    rows.append(result_row)
                    test_name = "{}: diff_percent < {}".format(name, diff_percent)
                    comp_ts.add_test_case(test_name, msg, pass_fail)

            comp_ts.ts.time = t.diff()
            comp_ts.ts.update_statistics()
            reporter.add_testsuite(comp_ts.ts)

    dataframe = pd.DataFrame(rows[1:], columns=rows[0])
    return reporter, dataframe
예제 #9
0
def run_subdir(qemu: boot_cheribsd.CheriBSDInstance, subdir: Path,
               xml: junitparser.JUnitXml, successful_tests: list,
               failed_tests: list, build_dir: Path):
    tests = []
    for root, dirs, files in os.walk(str(subdir), topdown=True):
        for name in files:
            if not name.startswith("tst_") or name.endswith(".core"):
                continue
            tests.append(Path(root, name))
        # Ignore .moc and .obj directories:
        dirs[:] = [d for d in dirs if not d.startswith(".")]
    # Ensure that we run the tests in a reproducible order
    for f in sorted(tests):
        starttime = datetime.datetime.utcnow()
        try:
            # TODO: -o /path/to/file -junitxml
            qemu.checked_run(
                "rm -f /build/test.xml && {} -o /build/test.xml,junitxml -o -,txt -v1"
                .format(f),
                timeout=10)
            endtime = datetime.datetime.utcnow()
            successful_tests.append(f)
            qemu.checked_run("fsync /build/test.xml")
            test_xml = build_dir / "test.xml"
            qt_test = junitparser.JUnitXml.fromfile(str(test_xml))
            if not isinstance(qt_test, junitparser.TestSuite):
                raise ValueError(
                    "Got unexpected parse result loading JUnit Xml: " +
                    qt_test.tostring())
            if qt_test.name.lower() != f.name:
                raise ValueError(
                    "Got unexpected test suite name: '{}' instead of '{}'".
                    format(qt_test.name, f.name))
            if not qt_test.time:
                qt_test.time = (endtime - starttime).total_seconds()
            boot_cheribsd.info("Results for ", f.name, ": ", qt_test)
            xml.add_testsuite(qt_test)
        except Exception as e:
            if isinstance(e, boot_cheribsd.CheriBSDCommandFailed):
                boot_cheribsd.failure("Failed to run ",
                                      f.name,
                                      ": ",
                                      str(e),
                                      exit=False)
            else:
                boot_cheribsd.failure("Error loading JUnit result for",
                                      f.name,
                                      ": ",
                                      str(e),
                                      exit=False)
            failed_tests.append(f)
            add_junit_failure(xml, f, str(e), starttime)
            # Kill the process that timed out:
            qemu.sendintr()
            qemu.expect_prompt(timeout=60)
예제 #10
0
def main():
    args = parse_args()


    github_token = ''
    gh = None
    if args.github:
        github_token = os.environ['GH_TOKEN']
        gh = Github(github_token)

    if args.status and args.sha != None and args.repo and gh:
        set_status(gh, args.repo, args.sha)
        sys.exit(0)

    if not args.commits:
        sys.exit(1)

    suite = TestSuite("Compliance")
    docs = {}
    for Test in ComplianceTest.__subclasses__():
        t = Test(suite, args.commits)
        t.run()
        suite.add_testcase(t.case)
        docs[t.case.name] = t._doc

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write('compliance.xml')

    if args.github:
        repo = gh.get_repo(args.repo)
        pr = repo.get_pull(int(args.pull_request))
        commit = repo.get_commit(args.sha)

        comment = "Found the following issues, please fix and resubmit:\n\n"
        comment_count = 0
        print("Processing results...")
        for case in suite:
            if case.result and case.result.type != 'skipped':
                comment_count += 1
                comment += ("## {}\n".format(case.result.message))
                comment += "\n"
                if case.name not in ['Gitlint', 'Identity/Emails', 'License']:
                    comment += "```\n"
                comment += ("{}\n".format(case.result._elem.text))
                if case.name not in ['Gitlint', 'Identity/Emails', 'License']:
                    comment += "```\n"

                commit.create_status('failure',
                                     docs[case.name],
                                     'Verification failed',
                                     '{}'.format(case.name))
예제 #11
0
def gen_results_summary(results_dir,
                        output_fn=None,
                        merge_fn=None,
                        verbose=False,
                        print_section=False,
                        results_file='results.xml'):
    """Scan a results directory and generate a summary file"""
    reports = []
    combined = JUnitXml()
    nr_files = 0
    out_f = sys.stdout

    for filename in get_results(results_dir, results_file):
        reports.append(JUnitXml.fromfile(filename))

    if len(reports) == 0:
        return 0

    if output_fn is not None:
        out_f = open(output_fn, "w")

    props = copy.deepcopy(reports[0].child(Properties))

    ltm = check_for_ltm(results_dir, props)

    print_header(out_f, props)

    sort_by = lambda ts: parse_timestamp(ts.timestamp)
    if ltm:
        sort_by = lambda ts: ts.hostname

    if total_tests(reports) < 30:
        verbose = True

    for testsuite in sorted(reports, key=sort_by):
        print_summary(out_f, testsuite, verbose, print_section)
        combined.add_testsuite(testsuite)
        nr_files += 1

    out_f.write('Totals: %d tests, %d skipped, %d failures, %d errors, %ds\n' \
                % sum_testsuites(reports))

    print_trailer(out_f, props)

    if merge_fn is not None:
        combined.update_statistics()
        combined.write(merge_fn + '.new')
        if os.path.exists(merge_fn):
            os.rename(merge_fn, merge_fn + '.bak')
        os.rename(merge_fn + '.new', merge_fn)

    return nr_files
예제 #12
0
 def test_write_nonascii(self):
     suite1 = TestSuite()
     suite1.name = "suite1"
     case1 = TestCase()
     case1.name = "用例1"
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp, encoding="utf-8") as f:
         text = f.read()
     self.assertIn("suite1", text)
     self.assertIn("用例1", text)
예제 #13
0
 def test_write_pretty(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = '用例1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp, pretty=True)
     xml = JUnitXml.fromfile(self.tmp)
     suite = next(iter(xml))
     case = next(iter(suite))
     self.assertEqual(case.name, '用例1')
예제 #14
0
 def test_write_nonascii(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = '用例1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp, encoding='utf-8') as f:
         text = f.read()
     self.assertIn('suite1', text)
     self.assertIn('用例1', text)
예제 #15
0
 def test_read_written_xml(self):
     suite1 = TestSuite()
     suite1.name = "suite1"
     case1 = TestCase()
     case1.name = "用例1"
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     xml = JUnitXml.fromfile(self.tmp)
     suite = next(iter(xml))
     case = next(iter(suite))
     self.assertEqual(case.name, "用例1")
예제 #16
0
 def test_write(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = 'case1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     result.write(self.tmp)
     with open(self.tmp) as f:
         text = f.read()
     self.assertIn('suite1', text)
     self.assertIn('case1', text)
예제 #17
0
 def test_construct_xml(self):
     suite1 = TestSuite()
     suite1.name = 'suite1'
     case1 = TestCase()
     case1.name = 'case1'
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     self.assertEqual(result._elem.tag, 'testsuites')
     suite = result._elem.findall('testsuite')
     self.assertEqual(len(suite), 1)
     self.assertEqual(suite[0].attrib['name'], 'suite1')
     case = suite[0].findall('testcase')
     self.assertEqual(len(case), 1)
     self.assertEqual(case[0].attrib['name'], 'case1')
예제 #18
0
 def test_construct_xml(self):
     suite1 = TestSuite()
     suite1.name = "suite1"
     case1 = TestCase()
     case1.name = "case1"
     suite1.add_testcase(case1)
     result = JUnitXml()
     result.add_testsuite(suite1)
     self.assertEqual(result._elem.tag, "testsuites")
     suite = result._elem.findall("testsuite")
     self.assertEqual(len(suite), 1)
     self.assertEqual(suite[0].attrib["name"], "suite1")
     case = suite[0].findall("testcase")
     self.assertEqual(len(case), 1)
     self.assertEqual(case[0].attrib["name"], "case1")
예제 #19
0
def write_output(target, results) -> None:
    """Write scan results in junitxml format"""

    test_case = TestCase(f"{target}")
    test_case.name = f"{target}"
    if results["Results"] != ["No SSL/TLS Violations found."]:
        test_case.result = [Failure(results)]
    else:
        test_case.result = results

    suite = TestSuite("SSLChecker")
    suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write("test-output.xml")
예제 #20
0
    def build(self) -> None:
        self.mainsuite = TestSuite("Drive")

        self.process_xml(self.folder / "final.xml")

        for idx in (2, 1):
            # First add the results from the reruns (suffixed with "2")
            # then the first runs, to add successes before failures.
            for results in Path(self.folder).glob(f"**/*.{idx}.xml"):
                self.process_xml(results)

        print("End of processing")
        print_suite(self.mainsuite)

        xml = JUnitXml()
        xml.add_testsuite(self.mainsuite)
        xml.write(self.folder / self.output)
예제 #21
0
def test_is_compliant_suite_returns_true_WHEN_no_failures_AND_no_errors_in_JUnitXML(
):
    # Create cases
    case1 = TestCase('case1')
    case2 = TestCase('case2')
    case2.result = [Skipped()]
    # Create suite and add cases
    suite = TestSuite('suite1')
    suite.add_property('build', '55')
    suite.add_testcase(case1)
    suite.add_testcase(case2)

    # Add suite to JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)

    (control_result, message) = is_compliant_suite(xml)
    assert control_result is True
    assert message == "All tests passed"
예제 #22
0
def test_is_compliant_suite_returns_false_WHEN_errors_in_JUnitXML():
    # Create cases
    case1 = TestCase('case1')
    case1.result = [Error()]
    case2 = TestCase('case2')

    # Create suite and add cases
    suite = TestSuite('suite1')
    suite.add_property('build', '55')
    suite.add_testcase(case1)
    suite.add_testcase(case2)

    # Add suite to JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)

    (control_result, message) = is_compliant_suite(xml)
    assert control_result is False
    assert message == "Tests contain errors"
예제 #23
0
    def build(self) -> None:
        test_suite = os.getenv("TEST_SUITE", "Project")
        self.mainsuite = TestSuite(test_suite)

        # Aggregate all reports in reverse order:
        # this is important for projects using "rerun" mechanism and where
        # reports are numbered so that report-2.xml should be processed
        # before report-1.xml in order to add successes before failures.
        for report in sorted(self.folder.glob("**/*.xml"), reverse=True):
            # Skip the final report, if present
            if report.name == self.output:
                continue
            self.process_xml(report)

        print("End of processing")
        print_suite(self.mainsuite)

        xml = JUnitXml()
        xml.add_testsuite(self.mainsuite)
        xml.write(self.folder / self.output)
def write_output(target: str, results: list) -> None:
    """ Write scan results in junitxml format """

    suite = TestSuite(f"{target}")

    for result in results:
        if result != {"Results": "No vulnerabilities."}:
            test_case = TestCase(result["Vulnerable Library"])
            test_case.name = (result["Vulnerable Library"] + " - "\
                 + result["Vulnerability"] + " - "\
                 + "CVSS " + str(result["CVSS"]))
            test_case.result = [Failure(result)]
        else:
            test_case = TestCase("No vulnerabilities")
            test_case.result = result

        suite.add_testcase(test_case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('test-output.xml')
예제 #25
0
def parse_junit(junit_dir):
    test_suite = TestSuite("Combined TestSuite")
    for junit_xml in glob.glob(os.path.join(junit_dir, "junit_*.xml")):
        if "junit_runner.xml" not in junit_xml:
            parsed = JUnitXml.fromfile(junit_xml)
            for testcase in parsed:
                if isinstance(testcase, TestSuite) or isinstance(
                        testcase.result, Skipped):
                    continue
                test_suite.add_testcase(testcase)
        os.remove(junit_xml)

    xml = JUnitXml()
    xml.add_testsuite(test_suite)
    xml.write(os.path.join(junit_dir, "junit_combined.xml"))
    xml.update_statistics()

    test_failure_rate = 0
    if xml.tests != 0:
        test_failure_rate = int(
            math.ceil(((xml.failures + xml.errors) * 100) / xml.tests))

    return utils.generate_payload(CANARY_TEST_FAILURE_RATE, test_failure_rate)
예제 #26
0
def _main(args):
    # The "real" main(), which is wrapped to catch exceptions and report them
    # to GitHub. Returns the number of test failures.

    # The absolute path of the top-level git directory. Initialize it here so
    # that issues running Git can be reported to GitHub.
    global GIT_TOP
    GIT_TOP = git("rev-parse", "--show-toplevel")

    # The commit range passed in --commit, e.g. "HEAD~3"
    global COMMIT_RANGE
    COMMIT_RANGE = args.commits

    init_logs(args.loglevel)

    if args.list:
        for testcase in ComplianceTest.__subclasses__():
            print(testcase.name)
        return 0

    # Load saved test results from an earlier run, if requested
    if args.previous_run:
        if not os.path.exists(args.previous_run):
            # This probably means that an earlier pass had an internal error
            # (the script is currently run multiple times by the ci-pipelines
            # repo). Since that earlier pass might've posted an error to
            # GitHub, avoid generating a GitHub comment here, by avoiding
            # sys.exit() (which gets caught in main()).
            print("error: '{}' not found".format(args.previous_run),
                  file=sys.stderr)
            return 1

        logging.info("Loading previous results from " + args.previous_run)
        for loaded_suite in JUnitXml.fromfile(args.previous_run):
            suite = loaded_suite
            break
    else:
        suite = TestSuite("Compliance")

    for testcase in ComplianceTest.__subclasses__():
        # "Modules" and "testcases" are the same thing. Better flags would have
        # been --tests and --exclude-tests or the like, but it's awkward to
        # change now.

        if args.module and testcase.name not in args.module:
            continue

        if testcase.name in args.exclude_module:
            print("Skipping " + testcase.name)
            continue

        test = testcase()
        try:
            print(
                f"Running {test.name:16} tests in "
                f"{GIT_TOP if test.path_hint == '<git-top>' else test.path_hint} ..."
            )
            test.run()
        except EndTest:
            pass

        suite.add_testcase(test.case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write(args.output, pretty=True)

    failed_cases = []
    name2doc = {
        testcase.name: testcase.doc
        for testcase in ComplianceTest.__subclasses__()
    }

    for case in suite:
        if case.result:
            if case.result.type == 'skipped':
                logging.warning("Skipped %s, %s", case.name,
                                case.result.message)
            else:
                failed_cases.append(case)
        else:
            # Some checks like codeowners can produce no .result
            logging.info("No JUnit result for %s", case.name)
예제 #27
0
def main():
    """
    Main function

    :return:
    """

    args = parse_args()

    init_logs(args.loglevel)

    if args.list:
        for testcase in ComplianceTest.__subclasses__():
            test = testcase(None, "")
            print("{}".format(test._name))
        sys.exit(0)

    if args.status and args.sha is not None and args.repo:
        set_status(args.repo, args.sha)
        sys.exit(0)

    if not args.commits:
        print("No commit range given.")
        sys.exit(1)

    if args.previous_run and os.path.exists(args.previous_run) and args.module:
        junit_xml = JUnitXml.fromfile(args.previous_run)
        logging.info("Loaded previous results from %s", args.previous_run)
        for loaded_suite in junit_xml:
            suite = loaded_suite
            break

    else:
        suite = TestSuite("Compliance")

    docs = {}
    for testcase in ComplianceTest.__subclasses__():
        test = testcase(None, "")
        docs[test._name] = test._doc

    for testcase in ComplianceTest.__subclasses__():
        test = testcase(suite, args.commits)
        if args.module:
            if test._name in args.module:
                test.run()
                suite.add_testcase(test.case)
        else:
            if test._name in args.exclude_module:
                print("Skipping {}".format(test._name))
                continue
            test.run()
            suite.add_testcase(test.case)

    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.update_statistics()
    xml.write(args.output)

    failed_cases = []

    # TODO maybe: move all the github-related code to a different .py
    # file to draw a better line between developer code versus
    # infrastructure-specific code, in other words keep this file
    # 100% testable and maintainable by non-admins developers.
    if args.github and 'GH_TOKEN' in os.environ:
        errors = report_to_github(args.repo, args.pull_request, args.sha,
                                  suite, docs)
    else:
        for case in suite:
            if case.result:
                if case.result.type == 'skipped':
                    logging.warning("Skipped %s, %s", case.name,
                                    case.result.message)
                else:
                    failed_cases.append(case)
            else:
                # Some checks like codeowners can produce no .result
                logging.info("No JUnit result for %s", case.name)

        errors = len(failed_cases)
예제 #28
0
def create_xunit_results(suite_name, test_cases, test_run_metadata):
    """Create an xUnit result file for the test suite's executed test cases.

    Args:
        suite_name: the test suite name
        test_cases: the test cases objects
        test_run_metadata: test run meta information in dict

    Returns: None
    """
    _file = suite_name.split("/")[-1].split(".")[0]
    run_dir = test_run_metadata["log-dir"]
    run_id = test_run_metadata["run-id"]
    xml_file = f"{run_dir}/xunit.xml"
    ceph_version = test_run_metadata["ceph-version"]
    ansible_version = test_run_metadata["ceph-ansible-version"]
    distribution = test_run_metadata["distro"]
    build = test_run_metadata["build"]
    test_run_id = f"RHCS-{build}-{_file}-{run_id}".replace(".", "-")
    test_group_id = (
        f"ceph-build: {ceph_version} "
        f"ansible-build: {ansible_version} OS distro: {distribution}")
    log.info(f"Creating xUnit {_file} for test run-id {test_run_id}")

    suite = TestSuite(_file)
    for k, v in test_run_metadata.items():
        suite.add_property(k, f" {v}" if v else " --NA--")

    for tc in test_cases:
        test_name = tc["name"]
        pol_ids = tc.get("polarion-id")
        test_status = tc["status"]
        elapsed_time = tc.get("duration")

        if pol_ids:
            _ids = pol_ids.split(",")
            for _id in _ids:
                suite.add_testcase(
                    generate_test_case(
                        test_name,
                        elapsed_time,
                        test_status,
                        polarion_id=_id,
                    ))
        else:
            suite.add_testcase(
                generate_test_case(
                    test_name,
                    elapsed_time,
                    test_status,
                ))

    suite.update_statistics()

    xml = JUnitXml()
    props = Properties()
    props.append(Property(name="polarion-project-id", value="CEPH"))
    props.append(Property(name="polarion-testrun-id", value=test_run_id))
    props.append(Property(name="polarion-group-id", value=test_group_id))
    xml.append(props)
    xml.add_testsuite(suite)
    xml.write(xml_file, pretty=True)

    log.info(f"xUnit result file created: {xml_file}")
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error

# Create cases
case1 = TestCase('case1')
case1.result = Skipped()
case2 = TestCase('case2')
case2.result = Error('Example error message', 'the_error_type')

# Create suite and add cases
suite = TestSuite('suite1')
suite.add_property('build', '55')
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case2)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)
xml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--apikey",
                        required=True,
                        default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a")
    parser.add_argument("--port", type=int, default=4723)
    parsed_args = parser.parse_args(args)
    client_site_url = parsed_args.url
    if not client_site_url.endswith("/"):
        client_site_url = client_site_url + "/"
    apikey = parsed_args.apikey
    port = parsed_args.port
    s = socket.socket()
    try:
        s.bind(('localhost', port))
    except socket.error as err:
        if err.errno == 98:
            #Create Test Cases
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Test failed. Can not connect because port is actually used',
                err)
            #Create Test Suite
            suite = TestSuite('Suite1')
            suite.name = 'Test suite 1'
            suite.add_testcase(case1)
            #Add info into JunitXml
            xml = JUnitXml()
            xml.add_testsuite(suite)
            xml.write('junit_test.xml')
            sys.exit(
                "Port {port} is already in use.\n"
                "Is there another instance of {process} already running?\n"
                "To run multiple instances of {process} at once use the "
                "--port <num> option.".format(port=port, process=sys.argv[0]))
        else:
            raise
    try:
        response = requests.get(client_site_url,
                                headers=dict(Authorization=apikey))
    except requests.exceptions.RequestException as err:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test the connection to client_site_url'
        case1.result = Failure(
            'Test failed. Cannot connect to the client_site_url', err)
        #Create Test Suite
        suite = TestSuite('Suite1')
        suite.name = 'Test suite 1'
        suite.add_testcase(case1)
        #Add info into JunitXml
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write('junit_test.xml')
        sys.exit(
            "The client could not connect with the client site due to {error}".
            format(error=err))
    success, response = get_resources_to_check(client_site_url, apikey)
    data = response.json()
    if success:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test for get_resources'
        case1.result = Skipped(
            'Test passed successfully with 50 resources obtained')
    else:
        #Create Test Cases
        if not response.ok:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Client could not get the list with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Error(
                'Client could not get the list correctly, it only have got {0} resources'
                .format(len(data)), 'error_list')
    resource_id = data[0]
    success, response = get_url_for_id(client_site_url, apikey, resource_id)
    if success:
        #Create Test Cases
        case2 = TestCase('Test2')
        case2.name = 'Test for get_url_for_resource_id'
        case2.result = Skipped(
            'Test passed successfully with the url obtained correctly')
    else:
        #Create Test Cases
        if not response.ok:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Failure(
                'Client could not get the url for the resource with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Error('Client could not get the url correctly',
                                 'the_error_type')
    #Create Test Suite
    suite = TestSuite('Suite1')
    suite.name = 'Test suite 1'
    suite.add_testcase(case1)
    suite.add_testcase(case2)
    #Add info into JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('junit_test.xml')