Example #1
0
    def run(self):
        self.prepare()
        git_root = sh.git("rev-parse", "--show-toplevel").strip()
        codeowners = os.path.join(git_root, "CODEOWNERS")
        if not os.path.exists(codeowners):
            self.case.result = Skipped(
                "CODEOWNERS not available in this repo.", "skipped")
            return

        commit = sh.git("diff", "--name-only", "--diff-filter=A",
                        self.commit_range, **sh_special_args)
        new_files = commit.split("\n")
        files_in_tree = sh.git("ls-files", **sh_special_args).split("\n")
        git = set(files_in_tree)
        if new_files:
            owned = self.parse_codeowners(git_root, codeowners)
            new_not_owned = []
            for f in new_files:
                if not f:
                    continue
                if f not in owned:
                    new_not_owned.append(f)

            if new_not_owned:
                self.case.result = Error("CODEOWNERS Issues", "failure")
                self.case.result._elem.text = "New files added that are not covered in CODEOWNERS:\n\n"
                self.case.result._elem.text += "\n".join(new_not_owned)
                self.case.result._elem.text += "\n\nPlease add one or more entries in the CODEWONERS file to cover those files"
    def main(self, results_path, test_threshold, output_path, extra=''):
        result = ""
        filename = os.path.basename(results_path)
        with open(results_path, 'r') as output:
            for line in output.readlines():
                result += line

        test = self.test_factory.getTest(result)
        results, params = test.parse_output(result)
        test_type = list(test.type.keys())[0]

        testcase = TestCase(test_type)

        if test.failed(results, test_threshold):
            testcase.result = Error(test.fail_msg, test_type)

        suite = TestSuite(test_type + extra)
        for name, value in results.items():
            suite.add_property(name, value)

        for name, value in params.items():
            suite.add_property(name, value)

        testcase.system_out = result
        suite.add_testcase(testcase)

        if self.formatter is None:
            raise ValueError("No Formatter given !")

        junit = self.formatter
        junit.add_testsuite(suite)

        if output_path[-1] != '/':
            output_path.append('/')
        junit.write(output_path + 'junit-' + filename + '-' + extra + self.format)
Example #3
0
    def run(self):
        self.prepare()

        if os.path.exists(DOCS_WARNING_FILE) and os.path.getsize(DOCS_WARNING_FILE) > 0:
            with open(DOCS_WARNING_FILE, "rb") as f:
                log = f.read()

                self.case.result = Error("Documentation Issues", "failure")
                self.case.result._elem.text = log.decode('utf8')
Example #4
0
def main():
    for artifact_path in find_artifacts(ARTIFACT_DEST_DIR):
        logger.info(
            f'Found Artifact in path[{artifact_path}]. Building Artifact')
        notebook_name: str = os.path.basename(artifact_path).rsplit('.', 1)[0]
        extraction_path: str = tempfile.mkdtemp(prefix=notebook_name)
        build_script_path: str = None
        with tarfile.open(artifact_path, "r:gz") as tar:
            for member in tar.getmembers():
                if member.isdir():
                    dir_path: str = os.path.join(extraction_path, member.path)
                    os.makedirs(dir_path)

                elif member.isfile():
                    filepath: str = os.path.join(extraction_path, member.path)
                    with open(filepath, 'wb') as stream:
                        stream.write(tar.extractfile(member).read())

                    if os.path.basename(member.path) == 'build.sh':
                        build_script_path = filepath

                else:
                    raise NotImplementedError

        owd: str = os.getcwd()
        build_dir: str = os.path.dirname(build_script_path)
        logger.info(f'Changing to build_dir[{build_dir}]')
        os.chdir(build_dir)
        BUILD_STATE[notebook_name] = {'stdout': [], 'stderr': []}
        start = datetime.utcnow()
        for return_code, comm, in run_command(['bash', 'build.sh']):
            if return_code > 0:
                logger.error(comm)
                BUILD_STATE[notebook_name]['exit-code'] = return_code
                BUILD_STATE[notebook_name]['stderr'].append(comm)

            else:
                BUILD_STATE[notebook_name]['exit-code'] = return_code
                BUILD_STATE[notebook_name]['stdout'].append(comm)
                logger.info(comm)

        delta = datetime.utcnow() - start
        logger.info(f'Changing back to old working dir[{owd}]')
        os.chdir(owd)
        test_case = TestCase(f'{notebook_name} Test')
        if BUILD_STATE[notebook_name]['exit-code'] > 0:
            test_case.result = Error(
                '\n'.join(BUILD_STATE[notebook_name]['stderr']),
                BUILD_STATE[notebook_name]['exit-code'])
            TEST_CASES.append(test_case)

        TEST_CASES.append(test_case)

    test_suite = TestSuite(f'Notebooks Test Suite')
    [test_suite.add_testcase(case) for case in TEST_CASES]
Example #5
0
    def error(self, msg):
        """
        Signals a problem with running the test, with message 'msg'.

        Raises an exception internally, so you do not need to put a 'return'
        after error().

        Any failures generated prior to the error() are included automatically
        in the message. Usually, any failures would indicate problems with the
        test code.
        """
        if self.case.result:
            msg += "\n\nFailures before error: " + self.case.result._elem.text

        self.case.result = Error(msg, "error")

        raise EndTest
Example #6
0
 def test_add_case(self):
     suite = TestSuite()
     case1 = TestCase()
     case2 = TestCase()
     case2.result = Failure()
     case3 = TestCase()
     case3.result = Error()
     case4 = TestCase()
     case4.result = Skipped()
     suite.add_testcase(case1)
     suite.add_testcase(case2)
     suite.add_testcase(case3)
     suite.add_testcase(case4)
     suite.update_statistics()
     self.assertEqual(suite.tests, 4)
     self.assertEqual(suite.failures, 1)
     self.assertEqual(suite.errors, 1)
     self.assertEqual(suite.skipped, 1)
Example #7
0
def test_is_compliant_suite_returns_false_WHEN_errors_in_JUnitXML():
    # Create cases
    case1 = TestCase('case1')
    case1.result = [Error()]
    case2 = TestCase('case2')

    # Create suite and add cases
    suite = TestSuite('suite1')
    suite.add_property('build', '55')
    suite.add_testcase(case1)
    suite.add_testcase(case2)

    # Add suite to JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)

    (control_result, message) = is_compliant_suite(xml)
    assert control_result is False
    assert message == "Tests contain errors"
Example #8
0
    def __exit__(self, type, value, traceback):
        xunit_file = os.path.join(self.artifacts_dir, "xunit.xml")
        tests, failures, skipped, errors = 0, 0, 0, 0
        if os.path.exists(xunit_file):
            xml = JUnitXml.fromfile(xunit_file)
            for i, suite in enumerate(xml):
                for case in suite:
                    name = "scenario_{}: {}".format(i, case.name)
                    result = case.result
                    if isinstance(result, Error):
                        failures += 1
                        result = Failure(result.message, result.type)
                    elif isinstance(result, Failure):
                        errors += 1
                        result = Error(result.message, result.type)
                    elif isinstance(result, Skipped):
                        skipped += 1
                    else:
                        tests += 1

                    tc = TestCase(name)
                    tc.result = result
                    self.ts.add_testcase(tc)
        else:
            tc = TestCase(self.name)
            tc.result = Skipped()
            self.ts.add_testcase(tc)

        self.ts.hostname = self.env_name
        self.ts.timestamp = self.timer.start
        self.ts.time = self.timer.diff()
        self.ts.tests = tests
        self.ts.failures = failures
        self.ts.skipped = skipped
        self.ts.errors = errors
        self.ts.update_statistics()
        self.junit_xml.add_testsuite(self.ts)
Example #9
0
    def run(self):
        self.prepare()

        scancode = "/opt/scancode-toolkit/scancode"
        if not os.path.exists(scancode):
            self.case.result = Skipped("scancode-toolkit not installed",
                                       "skipped")
            return

        os.makedirs("scancode-files", exist_ok=True)
        new_files = sh.git("diff", "--name-only", "--diff-filter=A",
                           self.commit_range, **sh_special_args)

        if not new_files:
            return

        for newf in new_files:
            file = str(newf).rstrip()
            os.makedirs(os.path.join('scancode-files', os.path.dirname(file)),
                        exist_ok=True)
            copy = os.path.join("scancode-files", file)
            copyfile(file, copy)

        try:
            cmd = [
                scancode, '--verbose', '--copyright', '--license',
                '--license-diag', '--info', '--classify', '--summary',
                '--html', 'scancode.html', '--json', 'scancode.json',
                'scancode-files/'
            ]

            cmd_str = " ".join(cmd)
            logging.info(cmd_str)

            subprocess.check_output(cmd_str,
                                    stderr=subprocess.STDOUT,
                                    shell=True)

        except subprocess.CalledProcessError as ex:
            logging.error(ex.output)
            self.case.result = Error("Exception when running scancode",
                                     "error")
            return

        report = ""

        whitelist_extensions = ['.yaml', '.html']
        whitelist_languages = ['CMake', 'HTML']
        with open('scancode.json', 'r') as json_fp:
            scancode_results = json.load(json_fp)
            for file in scancode_results['files']:
                if file['type'] == 'directory':
                    continue

                original_fp = str(file['path']).replace('scancode-files/', '')
                licenses = file['licenses']
                if (file['is_script'] or file['is_source']) and (
                        file['programming_language'] not in whitelist_languages
                ) and (file['extension'] not in whitelist_extensions):
                    if not file['licenses']:
                        report += (
                            "* {} missing license.\n".format(original_fp))
                    else:
                        for lic in licenses:
                            if lic['key'] != "apache-2.0":
                                report += (
                                    "* {} is not apache-2.0 licensed: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['category'] != 'Permissive':
                                report += (
                                    "* {} has non-permissive license: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['key'] == 'unknown-spdx':
                                report += (
                                    "* {} has unknown SPDX: {}\n".format(
                                        original_fp, lic['key']))

                    if not file['copyrights']:
                        report += (
                            "* {} missing copyright.\n".format(original_fp))

        if report != "":
            self.case.result = Failure("License/Copyright issues", "failure")
            preamble = "In most cases you do not need to do anything here, especially if the files reported below are going into ext/ and if license was approved for inclusion into ext/ already. Fix any missing license/copyright issues. The license exception if a JFYI for the maintainers and can be overriden when merging the pull request.\n"
            self.case.result._elem.text = preamble + report
Example #10
0
    def run(self):
        self.prepare()

        if not self.zephyr_base:
            self.case.result = Skipped("Not a Zephyr tree", "skipped")
            return

        # Put the Kconfiglib path first to make sure no local Kconfiglib version is
        # used
        kconfig_path = os.path.join(self.zephyr_base, "scripts", "kconfig")
        if not os.path.exists(kconfig_path):
            self.case.result = Error("Can't find Kconfig", "error")
            return

        sys.path.insert(0, kconfig_path)
        import kconfiglib

        # Look up Kconfig files relative to ZEPHYR_BASE
        os.environ["srctree"] = self.zephyr_base

        # Parse the entire Kconfig tree, to make sure we see all symbols
        os.environ["SOC_DIR"] = "soc/"
        os.environ["ARCH_DIR"] = "arch/"
        os.environ["BOARD_DIR"] = "boards/*/*"
        os.environ["ARCH"] = "*"
        os.environ["PROJECT_BINARY_DIR"] = tempfile.gettempdir()
        os.environ['GENERATED_DTS_BOARD_CONF'] = "dummy"

        # For multi repo support
        open(os.path.join(tempfile.gettempdir(), "Kconfig.modules"),
             'a').close()

        # Enable strict Kconfig mode in Kconfiglib, which assumes there's just a
        # single Kconfig tree and warns for all references to undefined symbols
        os.environ["KCONFIG_STRICT"] = "y"

        try:
            kconf = kconfiglib.Kconfig()
        except kconfiglib.KconfigError as e:
            self.case.result = Failure("error while parsing Kconfig files",
                                       "failure")
            self.case.result._elem.text = str(e)
            return

        #
        # Look for undefined symbols
        #

        undef_ref_warnings = [
            warning for warning in kconf.warnings
            if "undefined symbol" in warning
        ]

        # Generating multiple JUnit <failure>s would be neater, but Shippable only
        # seems to display the first one
        if undef_ref_warnings:
            self.case.result = Failure("undefined Kconfig symbols", "failure")
            self.case.result._elem.text = "\n\n\n".join(undef_ref_warnings)
            return

        #
        # Check for stuff being added to the top-level menu
        #

        max_top_items = 50

        n_top_items = 0
        node = kconf.top_node.list
        while node:
            # Only count items with prompts. Other items will never be
            # shown in the menuconfig (outside show-all mode).
            if node.prompt:
                n_top_items += 1
            node = node.next

        if n_top_items > max_top_items:
            self.case.result = Failure("new entries in top menu", "failure")
            self.case.result._elem.text = """
Expected no more than {} potentially visible items (items with prompts) in the
top-level Kconfig menu, found {} items. If you're deliberately adding new
entries, then bump the 'max_top_items' variable in {}.
""".format(max_top_items, n_top_items, __file__)
Example #11
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure("A"), Failure("A"))
     self.assertNotEqual(Skipped("B"), Skipped("A"))
     self.assertNotEqual(Error("C"), Error("B"))
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error

# Create cases
case1 = TestCase('case1')
case1.result = Skipped()
case2 = TestCase('case2')
case2.result = Error('Example error message', 'the_error_type')

# Create suite and add cases
suite = TestSuite('suite1')
suite.add_property('build', '55')
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case2)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)
xml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--apikey",
                        required=True,
                        default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a")
    parser.add_argument("--port", type=int, default=4723)
    parsed_args = parser.parse_args(args)
    client_site_url = parsed_args.url
    if not client_site_url.endswith("/"):
        client_site_url = client_site_url + "/"
    apikey = parsed_args.apikey
    port = parsed_args.port
    s = socket.socket()
    try:
        s.bind(('localhost', port))
    except socket.error as err:
        if err.errno == 98:
            #Create Test Cases
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Test failed. Can not connect because port is actually used',
                err)
            #Create Test Suite
            suite = TestSuite('Suite1')
            suite.name = 'Test suite 1'
            suite.add_testcase(case1)
            #Add info into JunitXml
            xml = JUnitXml()
            xml.add_testsuite(suite)
            xml.write('junit_test.xml')
            sys.exit(
                "Port {port} is already in use.\n"
                "Is there another instance of {process} already running?\n"
                "To run multiple instances of {process} at once use the "
                "--port <num> option.".format(port=port, process=sys.argv[0]))
        else:
            raise
    try:
        response = requests.get(client_site_url,
                                headers=dict(Authorization=apikey))
    except requests.exceptions.RequestException as err:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test the connection to client_site_url'
        case1.result = Failure(
            'Test failed. Cannot connect to the client_site_url', err)
        #Create Test Suite
        suite = TestSuite('Suite1')
        suite.name = 'Test suite 1'
        suite.add_testcase(case1)
        #Add info into JunitXml
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write('junit_test.xml')
        sys.exit(
            "The client could not connect with the client site due to {error}".
            format(error=err))
    success, response = get_resources_to_check(client_site_url, apikey)
    data = response.json()
    if success:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test for get_resources'
        case1.result = Skipped(
            'Test passed successfully with 50 resources obtained')
    else:
        #Create Test Cases
        if not response.ok:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Client could not get the list with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Error(
                'Client could not get the list correctly, it only have got {0} resources'
                .format(len(data)), 'error_list')
    resource_id = data[0]
    success, response = get_url_for_id(client_site_url, apikey, resource_id)
    if success:
        #Create Test Cases
        case2 = TestCase('Test2')
        case2.name = 'Test for get_url_for_resource_id'
        case2.result = Skipped(
            'Test passed successfully with the url obtained correctly')
    else:
        #Create Test Cases
        if not response.ok:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Failure(
                'Client could not get the url for the resource with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Error('Client could not get the url correctly',
                                 'the_error_type')
    #Create Test Suite
    suite = TestSuite('Suite1')
    suite.name = 'Test suite 1'
    suite.add_testcase(case1)
    suite.add_testcase(case2)
    #Add info into JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('junit_test.xml')
Example #14
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure('A'), Failure('A'))
     self.assertNotEqual(Skipped('B'), Skipped('A'))
     self.assertNotEqual(Error('C'), Error('B'))