Exemple #1
0
    def error(self, msg):
        """
        Signals a problem with running the test, with message 'msg'.

        Raises an exception internally, so you do not need to put a 'return'
        after error().

        Any failures generated prior to the error() are included automatically
        in the message. Usually, any failures would indicate problems with the
        test code.
        """
        if self.case.result:
            msg += "\n\nFailures before error: " + self.case.result._elem.text

        self.case.result = Error(msg, "error")

        raise EndTest
Exemple #2
0
 def test_add_case(self):
     suite = TestSuite()
     case1 = TestCase()
     case2 = TestCase()
     case2.result = Failure()
     case3 = TestCase()
     case3.result = Error()
     case4 = TestCase()
     case4.result = Skipped()
     suite.add_testcase(case1)
     suite.add_testcase(case2)
     suite.add_testcase(case3)
     suite.add_testcase(case4)
     suite.update_statistics()
     self.assertEqual(suite.tests, 4)
     self.assertEqual(suite.failures, 1)
     self.assertEqual(suite.errors, 1)
     self.assertEqual(suite.skipped, 1)
Exemple #3
0
def test_is_compliant_suite_returns_false_WHEN_errors_in_JUnitXML():
    # Create cases
    case1 = TestCase('case1')
    case1.result = [Error()]
    case2 = TestCase('case2')

    # Create suite and add cases
    suite = TestSuite('suite1')
    suite.add_property('build', '55')
    suite.add_testcase(case1)
    suite.add_testcase(case2)

    # Add suite to JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)

    (control_result, message) = is_compliant_suite(xml)
    assert control_result is False
    assert message == "Tests contain errors"
Exemple #4
0
    def __exit__(self, type, value, traceback):
        xunit_file = os.path.join(self.artifacts_dir, "xunit.xml")
        tests, failures, skipped, errors = 0, 0, 0, 0
        if os.path.exists(xunit_file):
            xml = JUnitXml.fromfile(xunit_file)
            for i, suite in enumerate(xml):
                for case in suite:
                    name = "scenario_{}: {}".format(i, case.name)
                    result = case.result
                    if isinstance(result, Error):
                        failures += 1
                        result = Failure(result.message, result.type)
                    elif isinstance(result, Failure):
                        errors += 1
                        result = Error(result.message, result.type)
                    elif isinstance(result, Skipped):
                        skipped += 1
                    else:
                        tests += 1

                    tc = TestCase(name)
                    tc.result = result
                    self.ts.add_testcase(tc)
        else:
            tc = TestCase(self.name)
            tc.result = Skipped()
            self.ts.add_testcase(tc)

        self.ts.hostname = self.env_name
        self.ts.timestamp = self.timer.start
        self.ts.time = self.timer.diff()
        self.ts.tests = tests
        self.ts.failures = failures
        self.ts.skipped = skipped
        self.ts.errors = errors
        self.ts.update_statistics()
        self.junit_xml.add_testsuite(self.ts)
    def run(self):
        self.prepare()

        scancode = "/opt/scancode-toolkit/scancode"
        if not os.path.exists(scancode):
            self.case.result = Skipped("scancode-toolkit not installed",
                                       "skipped")
            return

        os.makedirs("scancode-files", exist_ok=True)
        new_files = sh.git("diff", "--name-only", "--diff-filter=A",
                           self.commit_range, **sh_special_args)

        if not new_files:
            return

        for newf in new_files:
            file = str(newf).rstrip()
            os.makedirs(os.path.join('scancode-files', os.path.dirname(file)),
                        exist_ok=True)
            copy = os.path.join("scancode-files", file)
            copyfile(file, copy)

        try:
            cmd = [
                scancode, '--verbose', '--copyright', '--license',
                '--license-diag', '--info', '--classify', '--summary',
                '--html', 'scancode.html', '--json', 'scancode.json',
                'scancode-files/'
            ]

            cmd_str = " ".join(cmd)
            logging.info(cmd_str)

            subprocess.check_output(cmd_str,
                                    stderr=subprocess.STDOUT,
                                    shell=True)

        except subprocess.CalledProcessError as ex:
            logging.error(ex.output)
            self.case.result = Error("Exception when running scancode",
                                     "error")
            return

        report = ""

        whitelist_extensions = ['.yaml', '.html']
        whitelist_languages = ['CMake', 'HTML']
        with open('scancode.json', 'r') as json_fp:
            scancode_results = json.load(json_fp)
            for file in scancode_results['files']:
                if file['type'] == 'directory':
                    continue

                original_fp = str(file['path']).replace('scancode-files/', '')
                licenses = file['licenses']
                if (file['is_script'] or file['is_source']) and (
                        file['programming_language'] not in whitelist_languages
                ) and (file['extension'] not in whitelist_extensions):
                    if not file['licenses']:
                        report += (
                            "* {} missing license.\n".format(original_fp))
                    else:
                        for lic in licenses:
                            if lic['key'] != "apache-2.0":
                                report += (
                                    "* {} is not apache-2.0 licensed: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['category'] != 'Permissive':
                                report += (
                                    "* {} has non-permissive license: {}\n".
                                    format(original_fp, lic['key']))
                            if lic['key'] == 'unknown-spdx':
                                report += (
                                    "* {} has unknown SPDX: {}\n".format(
                                        original_fp, lic['key']))

                    if not file['copyrights']:
                        report += (
                            "* {} missing copyright.\n".format(original_fp))

        if report != "":
            self.case.result = Failure("License/Copyright issues", "failure")
            preamble = "In most cases you do not need to do anything here, especially if the files reported below are going into ext/ and if license was approved for inclusion into ext/ already. Fix any missing license/copyright issues. The license exception if a JFYI for the maintainers and can be overriden when merging the pull request.\n"
            self.case.result._elem.text = preamble + report
    def run(self):
        self.prepare()

        if not self.zephyr_base:
            self.case.result = Skipped("Not a Zephyr tree", "skipped")
            return

        # Put the Kconfiglib path first to make sure no local Kconfiglib version is
        # used
        kconfig_path = os.path.join(self.zephyr_base, "scripts", "kconfig")
        if not os.path.exists(kconfig_path):
            self.case.result = Error("Can't find Kconfig", "error")
            return

        sys.path.insert(0, kconfig_path)
        import kconfiglib

        # Look up Kconfig files relative to ZEPHYR_BASE
        os.environ["srctree"] = self.zephyr_base

        # Parse the entire Kconfig tree, to make sure we see all symbols
        os.environ["SOC_DIR"] = "soc/"
        os.environ["ARCH_DIR"] = "arch/"
        os.environ["BOARD_DIR"] = "boards/*/*"
        os.environ["ARCH"] = "*"
        os.environ["PROJECT_BINARY_DIR"] = tempfile.gettempdir()
        os.environ['GENERATED_DTS_BOARD_CONF'] = "dummy"

        # For multi repo support
        open(os.path.join(tempfile.gettempdir(), "Kconfig.modules"),
             'a').close()

        # Enable strict Kconfig mode in Kconfiglib, which assumes there's just a
        # single Kconfig tree and warns for all references to undefined symbols
        os.environ["KCONFIG_STRICT"] = "y"

        try:
            kconf = kconfiglib.Kconfig()
        except kconfiglib.KconfigError as e:
            self.case.result = Failure("error while parsing Kconfig files",
                                       "failure")
            self.case.result._elem.text = str(e)
            return

        #
        # Look for undefined symbols
        #

        undef_ref_warnings = [
            warning for warning in kconf.warnings
            if "undefined symbol" in warning
        ]

        # Generating multiple JUnit <failure>s would be neater, but Shippable only
        # seems to display the first one
        if undef_ref_warnings:
            self.case.result = Failure("undefined Kconfig symbols", "failure")
            self.case.result._elem.text = "\n\n\n".join(undef_ref_warnings)
            return

        #
        # Check for stuff being added to the top-level menu
        #

        max_top_items = 50

        n_top_items = 0
        node = kconf.top_node.list
        while node:
            # Only count items with prompts. Other items will never be
            # shown in the menuconfig (outside show-all mode).
            if node.prompt:
                n_top_items += 1
            node = node.next

        if n_top_items > max_top_items:
            self.case.result = Failure("new entries in top menu", "failure")
            self.case.result._elem.text = """
Expected no more than {} potentially visible items (items with prompts) in the
top-level Kconfig menu, found {} items. If you're deliberately adding new
entries, then bump the 'max_top_items' variable in {}.
""".format(max_top_items, n_top_items, __file__)
Exemple #7
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure("A"), Failure("A"))
     self.assertNotEqual(Skipped("B"), Skipped("A"))
     self.assertNotEqual(Error("C"), Error("B"))
from junitparser import TestCase, TestSuite, JUnitXml, Skipped, Error

# Create cases
case1 = TestCase('case1')
case1.result = Skipped()
case2 = TestCase('case2')
case2.result = Error('Example error message', 'the_error_type')

# Create suite and add cases
suite = TestSuite('suite1')
suite.add_property('build', '55')
suite.add_testcase(case1)
suite.add_testcase(case2)
suite.remove_testcase(case2)

# Add suite to JunitXml
xml = JUnitXml()
xml.add_testsuite(suite)
xml.write('C:/Users/RAG/Desktop/venky-python/junit.xml')
def main(args=None):

    if args is None:
        args = sys.argv[1:]

    parser = argparse.ArgumentParser()
    parser.add_argument("--url", required=True)
    parser.add_argument("--apikey",
                        required=True,
                        default="cdeb2184-cb23-40a1-bdfd-d0fe2715547a")
    parser.add_argument("--port", type=int, default=4723)
    parsed_args = parser.parse_args(args)
    client_site_url = parsed_args.url
    if not client_site_url.endswith("/"):
        client_site_url = client_site_url + "/"
    apikey = parsed_args.apikey
    port = parsed_args.port
    s = socket.socket()
    try:
        s.bind(('localhost', port))
    except socket.error as err:
        if err.errno == 98:
            #Create Test Cases
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Test failed. Can not connect because port is actually used',
                err)
            #Create Test Suite
            suite = TestSuite('Suite1')
            suite.name = 'Test suite 1'
            suite.add_testcase(case1)
            #Add info into JunitXml
            xml = JUnitXml()
            xml.add_testsuite(suite)
            xml.write('junit_test.xml')
            sys.exit(
                "Port {port} is already in use.\n"
                "Is there another instance of {process} already running?\n"
                "To run multiple instances of {process} at once use the "
                "--port <num> option.".format(port=port, process=sys.argv[0]))
        else:
            raise
    try:
        response = requests.get(client_site_url,
                                headers=dict(Authorization=apikey))
    except requests.exceptions.RequestException as err:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test the connection to client_site_url'
        case1.result = Failure(
            'Test failed. Cannot connect to the client_site_url', err)
        #Create Test Suite
        suite = TestSuite('Suite1')
        suite.name = 'Test suite 1'
        suite.add_testcase(case1)
        #Add info into JunitXml
        xml = JUnitXml()
        xml.add_testsuite(suite)
        xml.write('junit_test.xml')
        sys.exit(
            "The client could not connect with the client site due to {error}".
            format(error=err))
    success, response = get_resources_to_check(client_site_url, apikey)
    data = response.json()
    if success:
        #Create Test Cases
        case1 = TestCase('Test1')
        case1.name = 'Test for get_resources'
        case1.result = Skipped(
            'Test passed successfully with 50 resources obtained')
    else:
        #Create Test Cases
        if not response.ok:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Failure(
                'Client could not get the list with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case1 = TestCase('Test1')
            case1.name = 'Test for get_resources'
            case1.result = Error(
                'Client could not get the list correctly, it only have got {0} resources'
                .format(len(data)), 'error_list')
    resource_id = data[0]
    success, response = get_url_for_id(client_site_url, apikey, resource_id)
    if success:
        #Create Test Cases
        case2 = TestCase('Test2')
        case2.name = 'Test for get_url_for_resource_id'
        case2.result = Skipped(
            'Test passed successfully with the url obtained correctly')
    else:
        #Create Test Cases
        if not response.ok:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Failure(
                'Client could not get the url for the resource with code error {0} and reason {1}'
                .format(response.status_code,
                        response.reason), 'failure_of_connection')
        else:
            case2 = TestCase('Test2')
            case2.name = 'Test for get_url_for_resource_id'
            case2.result = Error('Client could not get the url correctly',
                                 'the_error_type')
    #Create Test Suite
    suite = TestSuite('Suite1')
    suite.name = 'Test suite 1'
    suite.add_testcase(case1)
    suite.add_testcase(case2)
    #Add info into JunitXml
    xml = JUnitXml()
    xml.add_testsuite(suite)
    xml.write('junit_test.xml')
Exemple #10
0
 def test_result_eq(self):
     # TODO: Weird, need to think of a better API
     self.assertEqual(Failure('A'), Failure('A'))
     self.assertNotEqual(Skipped('B'), Skipped('A'))
     self.assertNotEqual(Error('C'), Error('B'))