def run(self):
        self.prepare()

        scancode = "/opt/scancode-toolkit/scancode"
        if not os.path.exists(scancode):
            self.case.result = Skipped("scancode-toolkit not installed", "skipped")
            return

        os.makedirs("scancode-files", exist_ok=True)
        new_files = sh.git("diff", "--name-only", "--diff-filter=A", self.commit_range, **sh_special_args)

        if len(new_files) == 0:
            return

        for newf in new_files:
            f = str(newf).rstrip()
            os.makedirs(os.path.join('scancode-files', os.path.dirname(f)), exist_ok=True)
            copy = os.path.join("scancode-files", f)
            copyfile(f, copy)

        try:
            cmd = [scancode, '--verbose', '--copyright', '--license', '--license-diag', '--info',
                    '--classify', '--summary', '--json', 'scancode.json', 'scancode-files/']

            cmd_str = " ".join(cmd)
            logging.info(cmd_str)

            out = subprocess.check_output(cmd_str, stderr=subprocess.STDOUT, shell=True)

        except subprocess.CalledProcessError as e:
            logging.error(e.output)
            self.case.result = Skipped("Exception when running scancode", "skipped")
            return

        report = ""
        with open ('scancode.json', 'r') as json_fp:
            scancode_results = json.load(json_fp)
            for file in scancode_results['files']:
                if file['type'] == 'directory':
                    continue

                original_fp = str(file['path']).replace('scancode-files/', '')
                licenses = file['licenses']
                if (file['is_script'] or file['is_source']) and (file['programming_language'] not in ['CMake']) and (file['extension'] not in ['.yaml']):
                    if len(file['licenses']) == 0:
                        report += ("* {} missing license.\n".format(original_fp))
                    else:
                        for l in licenses:
                            if l['key'] != "apache-2.0":
                                report += ("* {} is not apache-2.0 licensed: {}\n".format(original_fp, l['key']))
                            if l['category'] != 'Permissive':
                                report += ("* {} has non-permissive license: {}\n".format(original_fp, l['key']))

                    if len(file['copyrights'])  == 0:
                        report += ("* {} missing copyright.\n".format(original_fp))

        if report != "":
            self.case.result = Failure("License/Copyright issues", "failure")
            self.case.result._elem.text = report
Exemple #2
0
class Test_TestCase(unittest.TestCase):
    def test_case_fromstring(self):
        text = """<testcase name="testname">
        <failure message="failure message" type="FailureType"/>
        <system-out>System out</system-out>
        <system-err>System err</system-err>
        </testcase>"""
        case = TestCase.fromstring(text)
        self.assertEqual(case.name, "testname")
        self.assertIsInstance(case.result[0], Failure)
        self.assertEqual(case.system_out, "System out")
        self.assertEqual(case.system_err, "System err")

    def test_illegal_xml_multi_results(self):
        text = """<testcase name="testname">
        <failure message="failure message" type="FailureType"/>
        <skipped message="skipped message" type="FailureType"/>
        </testcase>
        """
        case = TestCase.fromstring(text)
        self.assertRaises(JUnitXmlError)

    def test_case_attributes(self):
        case = TestCase()
        case.name = "testname"
        case.classname = "testclassname"
        case.time = 15.123
        case.result = [Skipped()]
        case.result[0].text = "woah skipped"
        self.assertEqual(case.name, "testname")
        self.assertEqual(case.classname, "testclassname")
        self.assertEqual(case.time, 15.123)
        self.assertIsInstance(case.result[0], Skipped)
        self.assertEqual(case.result[0].text, "woah skipped")
    def run(self):
        self.prepare()
        git_root = sh.git("rev-parse", "--show-toplevel").strip()
        codeowners = os.path.join(git_root, "CODEOWNERS")
        if not os.path.exists(codeowners):
            self.case.result = Skipped(
                "CODEOWNERS not available in this repo.", "skipped")
            return

        commit = sh.git("diff", "--name-only", "--diff-filter=A",
                        self.commit_range, **sh_special_args)
        new_files = commit.split("\n")
        files_in_tree = sh.git("ls-files", **sh_special_args).split("\n")
        git = set(files_in_tree)
        if new_files:
            owned = self.parse_codeowners(git_root, codeowners)
            new_not_owned = []
            for f in new_files:
                if not f:
                    continue
                if f not in owned:
                    new_not_owned.append(f)

            if new_not_owned:
                self.case.result = Error("CODEOWNERS Issues", "failure")
                self.case.result._elem.text = "New files added that are not covered in CODEOWNERS:\n\n"
                self.case.result._elem.text += "\n".join(new_not_owned)
                self.case.result._elem.text += "\n\nPlease add one or more entries in the CODEWONERS file to cover those files"
Exemple #4
0
    def skip(self, msg):
        """
        Signals that the test should be skipped, with message 'msg'.

        Raises an exception internally, so you do not need to put a 'return'
        after skip().

        Any failures generated prior to the skip() are included automatically
        in the message. Usually, any failures would indicate problems with the
        test code.
        """
        if self.case.result:
            msg += "\n\nFailures before skip: " + self.case.result._elem.text

        self.case.result = Skipped(msg, "skipped")

        raise EndTest
Exemple #5
0
 def test_add_case(self):
     suite = TestSuite()
     case1 = TestCase()
     case2 = TestCase()
     case2.result = Failure()
     case3 = TestCase()
     case3.result = Error()
     case4 = TestCase()
     case4.result = Skipped()
     suite.add_testcase(case1)
     suite.add_testcase(case2)
     suite.add_testcase(case3)
     suite.add_testcase(case4)
     suite.update_statistics()
     self.assertEqual(suite.tests, 4)
     self.assertEqual(suite.failures, 1)
     self.assertEqual(suite.errors, 1)
     self.assertEqual(suite.skipped, 1)
Exemple #6
0
def test_is_compliant_suite_returns_true_WHEN_no_failures_AND_no_errors_in_JUnitXML(
):
    # Create cases
    case1 = TestCase('case1')
    case2 = TestCase('case2')
    case2.result = [Skipped()]
    # Create suite and add cases
    suite = TestSuite('suite1')
    suite.add_property('build', '55')
    suite.add_testcase(case1)
    suite.add_testcase(case2)

    # Add suite to JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)

    (control_result, message) = is_compliant_suite(xml)
    assert control_result is True
    assert message == "All tests passed"
Exemple #7
0
    def __exit__(self, type, value, traceback):
        xunit_file = os.path.join(self.artifacts_dir, "xunit.xml")
        tests, failures, skipped, errors = 0, 0, 0, 0
        if os.path.exists(xunit_file):
            xml = JUnitXml.fromfile(xunit_file)
            for i, suite in enumerate(xml):
                for case in suite:
                    name = "scenario_{}: {}".format(i, case.name)
                    result = case.result
                    if isinstance(result, Error):
                        failures += 1
                        result = Failure(result.message, result.type)
                    elif isinstance(result, Failure):
                        errors += 1
                        result = Error(result.message, result.type)
                    elif isinstance(result, Skipped):
                        skipped += 1
                    else:
                        tests += 1

                    tc = TestCase(name)
                    tc.result = result
                    self.ts.add_testcase(tc)
        else:
            tc = TestCase(self.name)
            tc.result = Skipped()
            self.ts.add_testcase(tc)

        self.ts.hostname = self.env_name
        self.ts.timestamp = self.timer.start
        self.ts.time = self.timer.diff()
        self.ts.tests = tests
        self.ts.failures = failures
        self.ts.skipped = skipped
        self.ts.errors = errors
        self.ts.update_statistics()
        self.junit_xml.add_testsuite(self.ts)
 def run_tests(self):
     load_dotenv()
     skip_due_to = None
     for test in self.__tests:
         tc = TestCase(test.name, classname=os.environ['SCENARIONAME'])
         if skip_due_to is not None:
             tc.result = [
                 Skipped(
                     message=f"Skipped due to failing test: {skip_due_to}")
             ]
         else:
             attempt = 1
             while attempt <= test.retry:
                 print(
                     f"##[group][{test.name}] - Attempts ({attempt}/{test.retry})"
                 )
                 tc = self.run_test_and_get_tc(test.name, test.func)
                 if isinstance(tc.result, Failure):
                     attempt += 1
                     if attempt > test.retry and test.raise_on_error:
                         self.log.warning(
                             f"Breaking test case failed: {test.name}; Skipping remaining tests"
                         )
                         skip_due_to = test.name
                     else:
                         self.log.warning(
                             f"(Attempt {attempt-1}/Total {test.retry}) Test {test.name} failed"
                         )
                         if attempt <= test.retry:
                             self.log.warning("retrying in 10 secs")
                             time.sleep(10)
                     print("##[endgroup]")
                 else:
                     print("##[endgroup]")
                     break
         self.__test_suite.add_testcase(tc)
    def run(self):
        self.prepare()
        # Default to Zephyr's checkpatch if ZEPHYR_BASE is set
        checkpatch = os.path.join(self.zephyr_base or self.repo_path,
                                  'scripts', 'checkpatch.pl')
        if not os.path.exists(checkpatch):
            self.case.result = Skipped("checkpatch script not found",
                                       "skipped")

        diff = subprocess.Popen(('git', 'diff', '%s' % (self.commit_range)),
                                stdout=subprocess.PIPE)
        try:
            subprocess.check_output(
                (checkpatch, '--mailback', '--no-tree', '-'),
                stdin=diff.stdout,
                stderr=subprocess.STDOUT,
                shell=True)

        except subprocess.CalledProcessError as ex:
            match = re.search("([1-9][0-9]*) errors,",
                              ex.output.decode('utf8'))
            if match:
                self.case.result = Failure("Checkpatch issues", "failure")
                self.case.result._elem.text = (ex.output.decode('utf8'))
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error

# Create cases
case1 = TestCase('case1')
case1.result = Skipped()
case2 = TestCase('case2')
case2.result = Error('Example error message', 'the_error_type')

# Create suite and add cases
suite = TestSuite('suite1')
suite.add_property('build', '55')
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case2)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)
xml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')
    def run(self):
        self.prepare()

        scancode = "/opt/scancode-toolkit/scancode"
        if not os.path.exists(scancode):
            self.case.result = Skipped("scancode-toolkit not installed",
                                       "skipped")
            return

        os.makedirs("scancode-files", exist_ok=True)
        new_files = sh.git("diff", "--name-only", "--diff-filter=A",
                           self.commit_range, **sh_special_args)

        if not new_files:
            return

        for newf in new_files:
            file = str(newf).rstrip()
            os.makedirs(os.path.join('scancode-files', os.path.dirname(file)),
                        exist_ok=True)
            copy = os.path.join("scancode-files", file)
            copyfile(file, copy)

        try:
            cmd = [
                scancode, '--verbose', '--copyright', '--license',
                '--license-diag', '--info', '--classify', '--summary',
                '--html', 'scancode.html', '--json', 'scancode.json',
                'scancode-files/'
            ]

            cmd_str = " ".join(cmd)
            logging.info(cmd_str)

            subprocess.check_output(cmd_str,
                                    stderr=subprocess.STDOUT,
                                    shell=True)

        except subprocess.CalledProcessError as ex:
            logging.error(ex.output)
            self.case.result = Error("Exception when running scancode",
                                     "error")
            return

        report = ""

        whitelist_extensions = ['.yaml', '.html']
        whitelist_languages = ['CMake', 'HTML']
        with open('scancode.json', 'r') as json_fp:
            scancode_results = json.load(json_fp)
            for file in scancode_results['files']:
                if file['type'] == 'directory':
                    continue

                original_fp = str(file['path']).replace('scancode-files/', '')
                licenses = file['licenses']
                if (file['is_script'] or file['is_source']) and (
                        file['programming_language'] not in whitelist_languages
                ) and (file['extension'] not in whitelist_extensions):
                    if not file['licenses']:
                        report += (
                            "* {} missing license.\n".format(original_fp))
                    else:
                        for lic in licenses:
                            if lic['key'] != "apache-2.0":
                                report += (
                                    "* {} is not apache-2.0 licensed: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['category'] != 'Permissive':
                                report += (
                                    "* {} has non-permissive license: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['key'] == 'unknown-spdx':
                                report += (
                                    "* {} has unknown SPDX: {}\n".format(
                                        original_fp, lic['key']))

                    if not file['copyrights']:
                        report += (
                            "* {} missing copyright.\n".format(original_fp))

        if report != "":
            self.case.result = Failure("License/Copyright issues", "failure")
            preamble = "In most cases you do not need to do anything here, especially if the files reported below are going into ext/ and if license was approved for inclusion into ext/ already. Fix any missing license/copyright issues. The license exception if a JFYI for the maintainers and can be overriden when merging the pull request.\n"
            self.case.result._elem.text = preamble + report
Exemple #12
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure('A'), Failure('A'))
     self.assertNotEqual(Skipped('B'), Skipped('A'))
     self.assertNotEqual(Error('C'), Error('B'))
    def run(self):
        self.prepare()

        if not self.zephyr_base:
            self.case.result = Skipped("Not a Zephyr tree", "skipped")
            return

        # Put the Kconfiglib path first to make sure no local Kconfiglib version is
        # used
        kconfig_path = os.path.join(self.zephyr_base, "scripts", "kconfig")
        if not os.path.exists(kconfig_path):
            self.case.result = Error("Can't find Kconfig", "error")
            return

        sys.path.insert(0, kconfig_path)
        import kconfiglib

        # Look up Kconfig files relative to ZEPHYR_BASE
        os.environ["srctree"] = self.zephyr_base

        # Parse the entire Kconfig tree, to make sure we see all symbols
        os.environ["SOC_DIR"] = "soc/"
        os.environ["ARCH_DIR"] = "arch/"
        os.environ["BOARD_DIR"] = "boards/*/*"
        os.environ["ARCH"] = "*"
        os.environ["PROJECT_BINARY_DIR"] = tempfile.gettempdir()
        os.environ['GENERATED_DTS_BOARD_CONF'] = "dummy"

        # For multi repo support
        open(os.path.join(tempfile.gettempdir(), "Kconfig.modules"),
             'a').close()

        # Enable strict Kconfig mode in Kconfiglib, which assumes there's just a
        # single Kconfig tree and warns for all references to undefined symbols
        os.environ["KCONFIG_STRICT"] = "y"

        try:
            kconf = kconfiglib.Kconfig()
        except kconfiglib.KconfigError as e:
            self.case.result = Failure("error while parsing Kconfig files",
                                       "failure")
            self.case.result._elem.text = str(e)
            return

        #
        # Look for undefined symbols
        #

        undef_ref_warnings = [
            warning for warning in kconf.warnings
            if "undefined symbol" in warning
        ]

        # Generating multiple JUnit <failure>s would be neater, but Shippable only
        # seems to display the first one
        if undef_ref_warnings:
            self.case.result = Failure("undefined Kconfig symbols", "failure")
            self.case.result._elem.text = "\n\n\n".join(undef_ref_warnings)
            return

        #
        # Check for stuff being added to the top-level menu
        #

        max_top_items = 50

        n_top_items = 0
        node = kconf.top_node.list
        while node:
            # Only count items with prompts. Other items will never be
            # shown in the menuconfig (outside show-all mode).
            if node.prompt:
                n_top_items += 1
            node = node.next

        if n_top_items > max_top_items:
            self.case.result = Failure("new entries in top menu", "failure")
            self.case.result._elem.text = """
Expected no more than {} potentially visible items (items with prompts) in the
top-level Kconfig menu, found {} items. If you're deliberately adding new
entries, then bump the 'max_top_items' variable in {}.
""".format(max_top_items, n_top_items, __file__)
Exemple #14
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure("A"), Failure("A"))
     self.assertNotEqual(Skipped("B"), Skipped("A"))
     self.assertNotEqual(Error("C"), Error("B"))
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--apikey",
                        required=True,
                        default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a")
    parser.add_argument("--port", type=int, default=4723)
    parsed_args = parser.parse_args(args)
    client_site_url = parsed_args.url
    if not client_site_url.endswith("/"):
        client_site_url = client_site_url + "/"
    apikey = parsed_args.apikey
    port = parsed_args.port
    s = socket.socket()
    try:
        s.bind(('localhost', port))
    except socket.error as err:
        if err.errno == 98:
            #Create Test Cases
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Test failed. Can not connect because port is actually used',
                err)
            #Create Test Suite
            suite = TestSuite('Suite1')
            suite.name = 'Test suite 1'
            suite.add_testcase(case1)
            #Add info into JunitXml
            xml = JUnitXml()
            xml.add_testsuite(suite)
            xml.write('junit_test.xml')
            sys.exit(
                "Port {port} is already in use.\n"
                "Is there another instance of {process} already running?\n"
                "To run multiple instances of {process} at once use the "
                "--port <num> option.".format(port=port, process=sys.argv[0]))
        else:
            raise
    try:
        response = requests.get(client_site_url,
                                headers=dict(Authorization=apikey))
    except requests.exceptions.RequestException as err:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test the connection to client_site_url'
        case1.result = Failure(
            'Test failed. Cannot connect to the client_site_url', err)
        #Create Test Suite
        suite = TestSuite('Suite1')
        suite.name = 'Test suite 1'
        suite.add_testcase(case1)
        #Add info into JunitXml
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write('junit_test.xml')
        sys.exit(
            "The client could not connect with the client site due to {error}".
            format(error=err))
    success, response = get_resources_to_check(client_site_url, apikey)
    data = response.json()
    if success:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test for get_resources'
        case1.result = Skipped(
            'Test passed successfully with 50 resources obtained')
    else:
        #Create Test Cases
        if not response.ok:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Client could not get the list with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Error(
                'Client could not get the list correctly, it only have got {0} resources'
                .format(len(data)), 'error_list')
    resource_id = data[0]
    success, response = get_url_for_id(client_site_url, apikey, resource_id)
    if success:
        #Create Test Cases
        case2 = TestCase('Test2')
        case2.name = 'Test for get_url_for_resource_id'
        case2.result = Skipped(
            'Test passed successfully with the url obtained correctly')
    else:
        #Create Test Cases
        if not response.ok:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Failure(
                'Client could not get the url for the resource with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Error('Client could not get the url correctly',
                                 'the_error_type')
    #Create Test Suite
    suite = TestSuite('Suite1')
    suite.name = 'Test suite 1'
    suite.add_testcase(case1)
    suite.add_testcase(case2)
    #Add info into JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('junit_test.xml')
Exemple #16
0
    def test_case_attributes(self):
        case = TestCase()
        case.name = "testname"
        case.classname = "testclassname"
        case.time = 15.123
        case.result = [Skipped()]
        case.result[0].text = "woah skipped"
        self.assertEqual(case.name, "testname")
        self.assertEqual(case.classname, "testclassname")
        self.assertEqual(case.time, 15.123)
        self.assertIsInstance(case.result[0], Skipped)
        self.assertEqual(case.result[0].text, "woah skipped")

    def test_case_init_with_attributes(self):
        case = TestCase("testname", "testclassname", 15.123)
        case.result = [Skipped()]
        self.assertEqual(case.name, "testname")
        self.assertEqual(case.classname, "testclassname")
        self.assertEqual(case.time, 15.123)
        self.assertIsInstance(case.result[0], Skipped)

    def test_case_output(self):
        case = TestCase()
        case.system_err = "error message"
        case.system_out = "out message"
        self.assertEqual(case.system_err, "error message")
        self.assertEqual(case.system_out, "out message")
        case.system_err = "error2"
        case.system_out = "out2"
        self.assertEqual(case.system_err, "error2")
        self.assertEqual(case.system_out, "out2")
        self.assertIsInstance(case.result, Skipped)

    def test_case_output(self):
        case = TestCase()
        case.system_err = "error message"
        case.system_out = "out message"
        self.assertEqual(case.system_err, "error message")
        self.assertEqual(case.system_out, "out message")
        case.system_err = "error2"
        case.system_out = "out2"
        self.assertEqual(case.system_err, "error2")
        self.assertEqual(case.system_out, "out2")

    def test_set_multiple_results(self):
        case = TestCase()
        case.result = Skipped()
        case.result = Failure()
        self.assertIsInstance(case.result, Failure)

    def test_monkypatch(self):
        TestCase.id = Attr("id")
        case = TestCase()
        case.id = "100"
        self.assertEqual(case.id, "100")

    def test_equal(self):
        case = TestCase()
        case.name = "test1"
        case2 = TestCase()
        case2.name = "test1"
        self.assertEqual(case, case2)