Beispiel #1
0
    def test(self):
        if "hsw" in self.o.hardware or "byt" in self.o.hardware or "ivb" in self.o.hardware:
            self.env["MESA_GLES_VERSION_OVERRIDE"] = "3.1"
        t = bs.DeqpTester()
        all_results = bs.DeqpTrie()

        if not self.version:
            self.version = bs.mesa_version()
        if "glk" in self.o.hardware:
            if "13.0" in self.version or "17.0" in self.version:
                return

        modules = ["gles2", "egl"]
        if self.supports_gles_3():
            modules += ["gles3"]
        if self.supports_gles_31():
            modules += ["gles31"]

        for module in modules:
            binary = self.pm.build_root() + "/opt/deqp/modules/" + module + "/deqp-" + module
            results = t.test(binary,
                             DeqpLister(binary),
                             [],
                             self.env)
            all_results.merge(results)

        config = bs.get_conf_file(self.o.hardware, self.o.arch, project=self.pm.current_project())
        t.generate_results(all_results, bs.ConfigFilter(config, self.o))
Beispiel #2
0
    def test(self):
        cts_blacklist = bs.CtsTestList().tests()
        for suite in ["ES32", "ES31", "ES3", "ES2"]:
            if suite + "-CTS" in cts_blacklist._trie:
                cts_blacklist._trie["dEQP-GL" + suite] = cts_blacklist._trie[suite + "-CTS"]
        if "hsw" in self.o.hardware or "byt" in self.o.hardware or "ivb" in self.o.hardware:
            self.env["MESA_GLES_VERSION_OVERRIDE"] = "3.1"
        t = bs.DeqpTester()
        all_results = bs.DeqpTrie()

        if not self.version:
            self.version = bs.mesa_version()
        if "glk" in self.o.hardware:
            if "13.0" in self.version or "17.0" in self.version:
                return

        modules = ["gles2", "gles3", "egl"]
        for hardware in ["skl", "bdw", "bsw", "hsw", "byt", "ivb"]:
            if hardware in self.o.hardware:
                modules += ["gles31"]
                break

        for module in modules:
            binary = self.pm.build_root() + "/opt/deqp/modules/" + module + "/deqp-" + module
            results = t.test(binary,
                             DeqpLister(binary, cts_blacklist),
                             [],
                             self.env)
            all_results.merge(results)

        config = bs.get_conf_file(self.o.hardware, self.o.arch, project=self.pm.current_project())
        t.generate_results(all_results, bs.ConfigFilter(config, self.o))
Beispiel #3
0
 def test(self):
     mv = bs.mesa_version()
     if "17.2" in mv or "17.1" in mv:
         return
     t = bs.DeqpTester()
     results = t.test(self.pm.build_root() + "/bin/es/modules/glcts",
                      GLESCTSList(),
                      env={"MESA_GLES_VERSION_OVERRIDE": "3.2"})
     o = bs.Options()
     config = bs.get_conf_file(self.o.hardware,
                               self.o.arch,
                               project=self.pm.current_project())
     t.generate_results(results, bs.ConfigFilter(config, o))
Beispiel #4
0
    def test(self):
        mv = bs.mesa_version()
        if "17.2" in mv or "17.1" in mv:
            print("NOTICE: GLCTS will NOT be run since the system has Mesa version <17.3")
            return
        t = bs.DeqpTester()
        results = t.test(self.pm.build_root() + "/bin/gl/modules/glcts",
                         GLCTSList(),
                         env = {"MESA_GL_VERSION_OVERRIDE" : "4.6",
                                "MESA_GLSL_VERSION_OVERRIDE" : "460"})

        o = bs.Options()
        config = bs.get_conf_file(self.o.hardware, self.o.arch, project=self.pm.current_project())
        t.generate_results(results, bs.ConfigFilter(config, o))
Beispiel #5
0
    def test(self):
        t = bs.DeqpTester(runtime=30)
        all_results = bs.DeqpTrie()

        if not self.version:
            self.version = bs.mesa_version()

        modules = ["gles2", "gles3", "gles31"]

        for module in modules:
            binary = os.path.join(self.pm.build_root(), "opt/deqp/modules",
                                  module, "deqp-" + module)
            results = t.test(binary, DeqpRuntimeLister(binary), [], self.env)
            all_results.merge(results)

        config = bs.get_conf_file(self.o.hardware,
                                  self.o.arch,
                                  project=self.pm.current_project())
        t.generate_results(all_results, bs.ConfigFilter(config, self.o))
Beispiel #6
0
 def test(self):
     pm = bs.ProjectMap()
     global_opts = bs.Options()
     if global_opts.arch == "m64":
         icd_name = "intel_icd.x86_64.json"
     elif global_opts.arch == "m32":
         icd_name = "intel_icd.i686.json"
     env = {"VK_ICD_FILENAMES" : pm.build_root() + \
            "/share/vulkan/icd.d/" + icd_name,
            "ANV_ABORT_ON_DEVICE_LOSS" : "true"}
     tester = bs.DeqpTester()
     binary = pm.build_root() + "/opt/deqp/modules/vulkan/deqp-vk"
     results = tester.test(binary,
                           VulkanTestList(), ["--deqp-surface-type=fbo"],
                           env=env)
     o = bs.Options()
     config = bs.get_conf_file(o.hardware,
                               o.arch,
                               project=pm.current_project())
     tester.generate_results(results, bs.ConfigFilter(config, o))
Beispiel #7
0
    def test(self):
        t = bs.DeqpTester()
        version = bs.mesa_version()
        if "bxt" in self.o.hardware:
            if "13.0" in version:
                # unsupported platforms
                return
        if "glk" in self.o.hardware:
            if "17.0" in version:
                # unsupported platforms
                return

        results = t.test(self.pm.build_root() + "/bin/es/cts/glcts",
                         bs.CtsTestList(),
                         [],
                         self.env)

        o = bs.Options()
        config = bs.get_conf_file(self.o.hardware, self.o.arch, project=self.pm.current_project())
        t.generate_results(results, bs.ConfigFilter(config, o))
Beispiel #8
0
    def test(self):
        o = bs.Options()
        pm = bs.ProjectMap()

        if not self.version:
            self.version = bs.mesa_version()

        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        # invoke piglit
        self.env["PIGLIT_CTS_GL_BIN"] = self.build_root + "/bin/gl/cts/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            include_tests = testlist.RetestIncludes(project="cts-test")
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        # this test is flaky in glcts.  It passes enough for
        # submission, but as per Ken, no developer will want to look
        # at it to figure out why the test is flaky.
        extra_excludes = ["packed_depth_stencil.packed_depth_stencil_copyteximage"]

        suite_names = []
        # disable gl cts on stable versions of mesa, which do not
        # support the feature set.
        if "13.0" in self.version or "12.0" in self.version:
            return
        suite_names.append("cts_gl")
        # as per Ian, only run gl45
        extra_excludes += ["gl30-cts",
                           "gl31-cts",
                           "gl32-cts",
                           "gl33-cts",
                           "gl40-cts",
                           "gl41-cts",
                           "gl42-cts",
                           "gl43-cts",
                           "gl44-cts"]
        if "hsw" in o.hardware:
            # flaky cts_gl tests
            extra_excludes += ["shader_image_load_store.multiple-uniforms",
                               "shader_image_size.basic-nonms-fs",
                               "shader_image_size.advanced-nonms-fs",
                               "texture_gather.gather-tesselation-shader",
                               "vertex_attrib_binding.basic-inputl-case1",
                               "gpu_shader_fp64.named_uniform_blocks",
                               # gpu hang
                               "gl45-cts.tessellation_shader.vertex_spacing",
                               "gl45-cts.tessellation_shader.vertex_ordering",
                               "gl45-cts.tessellation_shader.tessellation_control_to_tessellation_evaluation.gl_maxpatchvertices_position_pointsize"]

        exclude_tests = []
        for  a in extra_excludes:
            exclude_tests += ["--exclude-tests", a]
        if not suite_names:
            # for master, on old hardware, this component will not
            # test anything.  The gles tests are instead targeted with
            # the gles32 cts, in the glescts-test component
            return
        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               exclude_tests + \
               include_tests + suite_names + [out_dir]

        bs.run_batch_command(cmd, env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts",
                                   o.hardware,
                                   o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(revisions,
                              out_dir + "/results.xml",
                              single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = ["cp", "-a", "-n",
                   self.build_root + "/../test", pm.source_root()]
            bs.run_batch_command(cmd)
            bs.Export().export_tests()
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.check_gpu_hang()
Beispiel #9
0
def post_process_results(xml):
    t = ET.parse(xml)
    o = bs.Options()
    conf = None
    try:
        conf = bs.get_conf_file(o.hardware, o.arch, project="crucible-test")
    except bs.NoConfigFile:
        pass
    if conf:
        # key=name, value=status
        expected_status = {}
        c = ConfigParser.SafeConfigParser(allow_no_value=True)
        c.read(conf)
        for section in c.sections():
            for (test, _) in c.items(section):
                if test in expected_status:
                    raise Exception("test has multiple entries: " + test)
                expected_status[test] = section
        for atest in t.findall(".//testcase"):
            if atest.attrib["name"] not in expected_status:
                continue
            if atest.attrib["status"] == "lost":
                atest.attrib["status"] = "crash"

            expected = expected_status[atest.attrib["name"]]
            if expected == "expected-failures":
                # change fail to pass
                if atest.attrib["status"] == "fail":
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "crash":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test crashed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected failure"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])
            elif expected == "expected-crashes":
                # change error to pass
                if atest.attrib["status"] == "crash":
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "fail":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test failed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected crash"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])

    for atest in t.findall(".//testcase"):
        atest.attrib["name"] = atest.attrib["name"] + "." + o.hardware + o.arch
    t.write(xml)
Beispiel #10
0
    def test(self):
        o = bs.Options()
        pm = bs.ProjectMap()

        if not self.version:
            self.version = bs.mesa_version()

        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        # invoke piglit
        self.env["PIGLIT_CTS_GL_BIN"] = self.build_root + "/bin/gl/cts/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            include_tests = testlist.RetestIncludes(project="cts-test")
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        # this test is flaky in glcts.  It passes enough for
        # submission, but as per Ken, no developer will want to look
        # at it to figure out why the test is flaky.
        extra_excludes = [
            "packed_depth_stencil.packed_depth_stencil_copyteximage"
        ]

        suite_names = []
        # disable gl cts on stable versions of mesa, which do not
        # support the feature set.
        if "13.0" in self.version:
            return
        if "17.0" in self.version and "glk" in o.hardware:
            # glk not supported by stable mesa
            return
        suite_names.append("cts_gl")
        # as per Ian, only run gl45
        extra_excludes += [
            "gl30-cts", "gl31-cts", "gl32-cts", "gl33-cts", "gl40-cts",
            "gl41-cts", "gl42-cts", "gl43-cts", "gl44-cts"
        ]
        if "hsw" in o.hardware:
            # flaky cts_gl tests
            extra_excludes += [
                "shader_image_load_store.multiple-uniforms",
                "shader_image_size.basic-nonms-fs",
                "shader_image_size.advanced-nonms-fs",
                "texture_gather.gather-tesselation-shader",
                "vertex_attrib_binding.basic-inputl-case1",
                "gpu_shader_fp64.named_uniform_blocks",
                # gpu hang
                "gl45-cts.tessellation_shader.vertex_spacing",
                "gl45-cts.tessellation_shader.vertex_ordering",
                "gl45-cts.tessellation_shader.tessellation_control_to_tessellation_evaluation.gl_maxpatchvertices_position_pointsize"
            ]

        exclude_tests = []
        for a in extra_excludes:
            exclude_tests += ["--exclude-tests", a]
        if not suite_names:
            # for master, on old hardware, this component will not
            # test anything.  The gles tests are instead targeted with
            # the gles32 cts, in the glescts-test component
            return
        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               exclude_tests + \
               include_tests + suite_names + [out_dir]

        bs.run_batch_command(cmd,
                             env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts", o.hardware, o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(
                revisions, out_dir + "/results.xml",
                single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = [
                "cp", "-a", "-n", self.build_root + "/../test",
                pm.source_root()
            ]
            bs.run_batch_command(cmd)
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.check_gpu_hang()
        bs.Export().export_tests()
Beispiel #11
0
def post_process_results(xml):
    t = ET.parse(xml)
    o = bs.Options()
    conf = None
    try:
        conf = bs.get_conf_file(o.hardware, o.arch, project="crucible-test")
    except bs.NoConfigFile:
        pass
    if conf:
        # key=name, value=status
        expected_status = {}
        c = ConfigParser.SafeConfigParser(allow_no_value=True)
        c.read(conf)
        for section in c.sections():
            for (test, _) in c.items(section):
                if test in expected_status:
                    raise Exception("test has multiple entries: " + test)
                expected_status[test] = section
        for atest in t.findall(".//testcase"):
            if atest.attrib["name"] not in expected_status:
                continue
            if atest.attrib["status"] == "lost":
                atest.attrib["status"] = "crash"

            expected = expected_status[atest.attrib["name"]]
            if expected == "expected-failures":
                # change fail to pass
                if atest.attrib["status"] == "fail":
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "crash":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test crashed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected failure"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " + atest.attrib["name"]
                                    + " " + atest.attrib["status"])
            elif expected == "expected-crashes":
                # change error to pass
                if atest.attrib["status"] == "crash":
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "fail":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test failed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected crash"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " + atest.attrib["name"]
                                    + " " + atest.attrib["status"])

    for atest in t.findall(".//testcase"):
        atest.attrib["name"] = atest.attrib["name"] + "." + o.hardware + o.arch
    t.write(xml)
Beispiel #12
0
    def test(self):
        # todo: now that there is more than one component that needs
        # to call mesa_version, it should be moved to a more sharable
        # location
        o = bs.Options()
        pm = bs.ProjectMap()
        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        savedir = os.getcwd()
        cts_dir = self.build_root + "/bin/cts"
        os.chdir(cts_dir)

        # invoke piglit
        self.env["PIGLIT_CTS_BIN"] = cts_dir + "/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            for atest in testlist.Tests(project="cts-test"):
                test_name_good_chars = re.sub('[_ !:=]', ".", atest.test_name)
                # drop the spec
                test_name = ".".join(test_name_good_chars.split(".")[1:])
                include_tests = include_tests + ["--include-tests", test_name]
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        extra_excludes = []
        if ("ilk" in o.hardware or "g33" in o.hardware
            or "g45" in o.hardware or "g965" in o.hardware):
            extra_excludes = extra_excludes + ["--exclude-tests", "es3-cts"]
        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "es31-cts",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               extra_excludes + \
               include_tests + ["cts", out_dir]

        bs.run_batch_command(cmd, env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        os.chdir(savedir)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts",
                                   o.hardware,
                                   o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(revisions,
                              out_dir + "/results.xml",
                              single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = ["cp", "-a", "-n",
                   self.build_root + "/../test", pm.source_root()]
            bs.run_batch_command(cmd)
            bs.Export().export_tests()
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.PiglitTester().check_gpu_hang()
Beispiel #13
0
    def test(self):
        o = bs.Options()
        pm = bs.ProjectMap()

        mesa_version = bs.PiglitTester().mesa_version()
        if o.hardware == "bxt" or o.hardware == "kbl":
            if "11.0" in mesa_version:
                print "WARNING: bxt/kbl not supported by stable mesa"
                return

        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        savedir = os.getcwd()
        cts_dir = self.build_root + "/bin/cts"
        os.chdir(cts_dir)

        # invoke piglit
        self.env["PIGLIT_CTS_BIN"] = cts_dir + "/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            include_tests = testlist.RetestIncludes(project="cts-test")
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        extra_excludes = []
        if ("ilk" in o.hardware or "g33" in o.hardware
            or "g45" in o.hardware or "g965" in o.hardware):
            extra_excludes += ["--exclude-tests", "es3-cts",
                               "--exclude-tests", "es31-cts"]

        if ("snb" in o.hardware or
            "ivb" in o.hardware or
            "byt" in o.hardware or
            "hsw" in o.hardware):
            extra_excludes += ["--exclude-tests", "es31-cts"]

        if "11.1" in mesa_version or "11.0" in mesa_version:
            extra_excludes += ["--exclude-tests", "es31-cts"]

        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               extra_excludes + \
               include_tests + ["cts", out_dir]

        bs.run_batch_command(cmd, env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        os.chdir(savedir)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts",
                                   o.hardware,
                                   o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(revisions,
                              out_dir + "/results.xml",
                              single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = ["cp", "-a", "-n",
                   self.build_root + "/../test", pm.source_root()]
            bs.run_batch_command(cmd)
            bs.Export().export_tests()
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.PiglitTester().check_gpu_hang()
Beispiel #14
0
def post_process_results(xml):
    t = ET.parse(xml)
    o = bs.Options()
    conf = None
    long_revisions = bs.RepoSet().branch_missing_revisions()
    missing_revisions = [a_rev[:6] for a_rev in long_revisions]
    try:
        conf = bs.get_conf_file(o.hardware, o.arch, project="crucible-test")
    except bs.NoConfigFile:
        pass
    if conf:
        # key=name, value=status
        expected_status = {}
        changed_commit = {}
        c = ConfigParser.SafeConfigParser(allow_no_value=True)
        c.read(conf)
        for section in c.sections():
            for (test, commit) in c.items(section):
                if test in expected_status:
                    raise Exception("test has multiple entries: " + test)
                expected_status[test] = section
                changed_commit[test] = commit
        for atest in t.findall(".//testcase"):
            test_name = atest.attrib["name"]
            if atest.attrib["status"] == "lost":
                atest.attrib["status"] = "crash"
            if test_name not in expected_status:
                continue

            expected = expected_status[atest.attrib["name"]]
            test_is_stale = False
            for missing_commit in missing_revisions:
                if missing_commit in changed_commit[test_name]:
                    test_is_stale = True
                    # change stale test status to skip
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    atest.append(ET.Element("skipped"))
                    so = ET.Element("system-out")
                    so.text = "WARN: the results of this were changed by " + changed_commit[
                        test_name]
                    so.text += ", which is missing from this build."
                    atest.append(so)
                    break
            if test_is_stale:
                continue

            if expected == "expected-failures":
                # change fail to pass
                if atest.attrib["status"] == "fail":
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "crash":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test crashed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected failure"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])
            elif expected == "expected-crashes":
                # change error to pass
                if atest.attrib["status"] == "crash":
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "fail":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test failed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected crash"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])

    for atest in t.findall(".//testcase"):
        atest.attrib["name"] = atest.attrib["name"] + "." + o.hardware + o.arch
    t.write(xml)
Beispiel #15
0
    def test(self):
        # todo: now that there is more than one component that needs
        # to call mesa_version, it should be moved to a more sharable
        # location
        mesa_version = bs.PiglitTester().mesa_version()
        if "10.5" in mesa_version or "10.6" in mesa_version:
            print "WARNING: deqp not supported on 10.6 and earlier."
            return
        
        o = bs.Options()
        pm = bs.ProjectMap()
        src_dir = pm.project_source_dir(pm.current_project())
        savedir = os.getcwd()

        deqp_options = ["./deqp-gles2",
                        "--deqp-surface-type=fbo",
                        "--deqp-log-images=disable",
                        "--deqp-surface-width=256",
                        "--deqp-surface-height=256"]


        expectations_dir = None
        # identify platform
        if "byt" in o.hardware:
            expectations_dir = src_dir + "/chromiumos-autotest/graphics_dEQP/expectations/baytrail"
        elif "bdw" in o.hardware:
            expectations_dir = pm.project_build_dir(pm.current_project()) + "/bdw_expectations"
        elif "hsw" in o.hardware:
            expectations_dir = pm.project_build_dir(pm.current_project()) + "/hsw_expectations"
        elif "ivb" in o.hardware:
            expectations_dir = src_dir + "/chromiumos-autotest/graphics_dEQP/expectations/ivybridge"
        elif "snb" in o.hardware:
            expectations_dir = src_dir + "/chromiumos-autotest/graphics_dEQP/expectations/sandybridge"
        elif "bsw" in o.hardware:
            expectations_dir = pm.project_build_dir(pm.current_project()) + "/bsw_expectations"

        conf_file = bs.get_conf_file(o.hardware, o.arch, "deqp-test")

        for module in ["gles2", "gles3"]:
            skip = DeqpTrie()
            # for each skip list, parse into skip trie
            if expectations_dir and os.path.exists(expectations_dir):
                for askipfile in os.listdir(expectations_dir):
                    if module not in askipfile.lower():
                        continue
                    skip.add_txt(expectations_dir + "/" + askipfile)
            else:
                skip._trie["empty"] = None

            # create test trie
            os.chdir(self.build_root + "/opt/deqp/modules/" + module)
            # generate list of tests
            bs.run_batch_command(["./deqp-" + module] + deqp_options + ["--deqp-runmode=xml-caselist"],
                                 env=self.env)
            outfile = "dEQP-" + module.upper() + "-cases.xml"
            assert(os.path.exists(outfile))
            testlist = DeqpTrie()
            testlist.add_xml(outfile)

            # filter skip trie from testlist trie
            testlist.filter(skip)

            # filter intermittent tests
            # TODO(janesma) : write bug
            skips = ["functional.fragment_ops.interaction.basic_shader",
                     "functional.shaders.random.basic_expression.combined",
                     "functional.shaders.random.conditionals.combined",
                     # fails intermittently on at least bdw and hsw
                     "functional.flush_finish.flush",
                     "functional.flush_finish.finish",
                     "functional.flush_finish.finish_wait"]
            
            if "snb" in o.hardware:
                skips = skips + ["functional.shaders.random.texture.vertex.45",
                                 "functional.shaders.random.texture.vertex.1",
                                 "functional.shaders.random.texture.vertex.34"]
                
            intermittent = DeqpTrie()
            for skip in skips:
                intermittent.add_line("dEQP-" + module.upper() + "." + skip)
            testlist.filter(intermittent)

            # generate testlist file
            caselist_fn = module + "-cases.txt"
            caselist = open(caselist_fn, "w")
            testlist.write_caselist(caselist)
            caselist.close()
            self.shard_caselist(caselist_fn, o.shard)

        os.chdir(savedir)

        # invoke piglit
        self.env["PIGLIT_DEQP_GLES2_BIN"] = self.build_root + "/opt/deqp/modules/gles2/deqp-gles2"
        self.env["PIGLIT_DEQP_GLES2_EXTRA_ARGS"] =  ("--deqp-surface-type=fbo "
                                                     "--deqp-log-images=disable "
                                                     '--deqp-surface-width=256 '
                                                     '--deqp-surface-height=256 '
                                                     "--deqp-caselist-file=" +
                                                     self.build_root + "/opt/deqp/modules/gles2/gles2-cases.txt")
        self.env["PIGLIT_DEQP_GLES3_EXE"] = self.build_root + "/opt/deqp/modules/gles3/deqp-gles3"
        self.env["PIGLIT_DEQP_GLES3_EXTRA_ARGS"] = ("--deqp-surface-type=fbo "
                                                    "--deqp-log-images=disable "
                                                    '--deqp-surface-width=256 '
                                                    '--deqp-surface-height=256 '
                                                    "--deqp-caselist-file=" +
                                                    self.build_root + "/opt/deqp/modules/gles3/gles3-cases.txt")
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            for atest in testlist.Tests(project="deqp-test"):
                test_name_good_chars = re.sub('[_ !:=]', ".", atest.test_name)
                # drop the spec
                test_name = ".".join(test_name_good_chars.split(".")[1:])
                include_tests = include_tests + ["--include-tests", test_name]
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return
            
        cmd = [self.build_root + "/bin/piglit",
               "run",
               "-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--junit_suffix", "." + o.hardware + o.arch] + \
            include_tests + \
            ["deqp_gles2", "deqp_gles3", out_dir ]
        
        bs.run_batch_command(cmd, env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)

        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-deqp",
                                   o.hardware,
                                   o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(revisions,
                              out_dir + "/results.xml",
                              single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = ["cp", "-a", "-n",
                   self.build_root + "/../test", pm.source_root()]
            bs.run_batch_command(cmd)
            bs.Export().export_tests()
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.PiglitTester().check_gpu_hang()