Esempio n. 1
0
_revspec = bs.RevisionSpecification.from_xml_file(
    os.path.join(os.path.abspath(args.result_path), 'revisions.xml'))
_revspec.checkout()
_revspec = bs.RevisionSpecification()

# use the full sha for the blame, so it can be looked up in a map when
# processing the config file
blame[1] = str(repos.repo(blame[0]).git.rev_parse(blame[1]))

if not bs.retest_failures(args.result_path, retest_dir):
    print "ERROR: retest failed"
    sys.exit(-1)

# make sure there is enough time for the test files to sync to nfs
time.sleep(20)
reproduced_failures = bs.TestLister(retest_dir + "/test/")

print "Found failures:"
reproduced_failures.Print()

for a_fail in reproduced_failures.Tests():
    a_fail.bisected_revision = " ".join(blame)
    a_fail.UpdateConf()

if args.to:
    patch_text = git.Repo().git.diff()
    print patch_text
    msg = MIMEText(patch_text)
    msg["Subject"] = "[PATCH] mesa jenkins updates due to " + args.blame_revision
    msg["From"] = "Do Not Reply <*****@*****.**>"
    msg["To"] = args.to
Esempio n. 2
0
j = bs.Jenkins(_revspec, bisect_dir)
o = bs.Options(["bisect_all.py"])
o.result_path = bisect_dir
o.retest_path = args.result_path
depGraph = bs.DependencyGraph(["piglit-gpu-all"], o)

print "Retesting mesa to: " + bisect_dir
try:
    j.build_all(depGraph, print_summary=False)
except bs.BuildFailure:
    print "ERROR: some builds failed"

# make sure there is enough time for the test files to sync to nfs
time.sleep(20)
reproduced_failures = bs.TestLister(bisect_dir + "/test/")

print "Found failures:"
reproduced_failures.Print()

for a_fail in reproduced_failures.Tests():
    a_fail.bisected_revision = " ".join(blame)
    a_fail.UpdateConf()

if args.to:
    patch_text = git.Repo().git.diff()
    print patch_text
    msg = MIMEText(patch_text)
    msg["Subject"] = "[PATCH] mesa jenkins updates due to " + args.blame_revision
    msg["From"] = "Do Not Reply <*****@*****.**>"
    msg["To"] = args.to
Esempio n. 3
0
bs.rmtree(bisect_dir + "/piglit-test")
bs.rmtree(bisect_dir + "/deqp-test")
bs.rmtree(bisect_dir + "/cts-test")

j = bs.Jenkins(_revspec, bisect_dir)
o = bs.Options(["bisect_all.py"])
o.result_path = bisect_dir
o.retest_path = args.result_path
depGraph = bs.DependencyGraph(["piglit-gpu-all"], o)

print "Retesting mesa to: " + bisect_dir
j.build_all(depGraph, print_summary=False)

# make sure there is enough time for the test files to sync to nfs
time.sleep(40)
new_failures = bs.TestLister(bisect_dir + "/test/")

if not new_failures.Tests():
    print "All tests fixed"
    sys.exit(0)

#test_arg = make_test_list(new_failures)
print "Found failures:"
new_failures.Print()

# build old mesa to see what mesa regressions were
revspec = bs.RevisionSpecification(
    from_cmd_line=["mesa=" + mesa_commits[-1].hexsha])
revspec.checkout()
revspec = bs.RevisionSpecification()
hashstr = revspec.to_cmd_line_param().replace(" ", "_")
Esempio n. 4
0
    def test(self):
        pm = bs.ProjectMap()
        build_root = pm.build_root()
        global_opts = bs.Options()
        if global_opts.arch == "m64":
            icd_name = "intel_icd.x86_64.json"
        elif global_opts.arch == "m32":
            icd_name = "intel_icd.i686.json"
        env = {
            "LD_LIBRARY_PATH": build_root + "/lib",
            "VK_ICD_FILENAMES": build_root + "/share/vulkan/icd.d/" + icd_name,
            "ANV_ABORT_ON_DEVICE_LOSS": "true"
        }
        o = bs.Options()
        o.update_env(env)
        br = bs.ProjectMap().build_root()
        out_dir = br + "/../test"
        if not path.exists(out_dir):
            os.makedirs(out_dir)
        out_xml = out_dir + "/piglit-crucible_" + o.hardware + "_" + o.arch + ".xml"
        include_tests = []
        if o.retest_path:
            include_tests = bs.TestLister(
                o.retest_path + "/test/").RetestIncludes("crucible-test")

        # flaky
        excludes = ["!func.query.timestamp", "!func.ssbo.interleve"]
        parallelism = []

        if "hsw" in o.hardware:
            # issue 4
            excludes += [
                "!func.copy.copy-buffer.large",
                "!func.interleaved-cmd-buffers.end1*",
                "!func.miptree.d32-sfloat.aspect-depth.view*",
                "!func.miptree.r8g8b8a8-unorm.aspect-color.view*",
                "!func.miptree.s8-uint.aspect-stencil*",
                "!func.renderpass.clear.color08", "!func.ssbo.interleve"
            ]
        if "ivb" in o.hardware:
            # issue 5
            excludes += [
                "!func.depthstencil*",
                "!func.miptree.r8g8b8a8-unorm.aspect-color.view*",
                "!func.miptree.s8-uint.aspect-stencil*",
                "!func.miptree.d32-sfloat.aspect-depth.view*",
                "!stress.lots-of-surface-state.fs.static"
            ]
            parallelism = ['-j', '1']

        if "byt" in o.hardware:
            # issue 6
            excludes += [
                "!func.miptree.d32-sfloat.aspect-depth.view-3d.levels0*",
                "!func.depthstencil*", "!func.miptree.s8-uint.aspect-stencil*",
                "!stress.lots-of-surface-state.fs.static"
            ]
            parallelism = ['-j', '1']

        if "bsw" in o.hardware:
            excludes += ["!func.event.cmd_buffer"]  # intermittent fail/crash

        if "bxt" in o.hardware:
            excludes += [
                "!func.miptree.s8-uint.aspect-stencil*",
                "!stress.lots-of-surface-state.fs.static"
            ]

        bs.run_batch_command([
            br + "/bin/crucible", "run", "--fork", "--log-pids",
            "--junit-xml=" + out_xml
        ] + parallelism + include_tests + excludes,
                             env=env,
                             expected_return_code=None)
        post_process_results(out_xml)
        bs.run_batch_command(["cp", "-a", "-n", out_dir, pm.source_root()])

        bs.check_gpu_hang()
        bs.Export().export_tests()
Esempio n. 5
0
    def test(self):
        o = bs.Options()
        pm = bs.ProjectMap()

        if not self.version:
            self.version = bs.mesa_version()

        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        # invoke piglit
        self.env["PIGLIT_CTS_GL_BIN"] = self.build_root + "/bin/gl/cts/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            include_tests = testlist.RetestIncludes(project="cts-test")
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        # this test is flaky in glcts.  It passes enough for
        # submission, but as per Ken, no developer will want to look
        # at it to figure out why the test is flaky.
        extra_excludes = [
            "packed_depth_stencil.packed_depth_stencil_copyteximage"
        ]

        suite_names = []
        # disable gl cts on stable versions of mesa, which do not
        # support the feature set.
        if "13.0" in self.version:
            return
        if "17.0" in self.version and "glk" in o.hardware:
            # glk not supported by stable mesa
            return
        suite_names.append("cts_gl")
        # as per Ian, only run gl45
        extra_excludes += [
            "gl30-cts", "gl31-cts", "gl32-cts", "gl33-cts", "gl40-cts",
            "gl41-cts", "gl42-cts", "gl43-cts", "gl44-cts"
        ]
        if "hsw" in o.hardware:
            # flaky cts_gl tests
            extra_excludes += [
                "shader_image_load_store.multiple-uniforms",
                "shader_image_size.basic-nonms-fs",
                "shader_image_size.advanced-nonms-fs",
                "texture_gather.gather-tesselation-shader",
                "vertex_attrib_binding.basic-inputl-case1",
                "gpu_shader_fp64.named_uniform_blocks",
                # gpu hang
                "gl45-cts.tessellation_shader.vertex_spacing",
                "gl45-cts.tessellation_shader.vertex_ordering",
                "gl45-cts.tessellation_shader.tessellation_control_to_tessellation_evaluation.gl_maxpatchvertices_position_pointsize"
            ]

        exclude_tests = []
        for a in extra_excludes:
            exclude_tests += ["--exclude-tests", a]
        if not suite_names:
            # for master, on old hardware, this component will not
            # test anything.  The gles tests are instead targeted with
            # the gles32 cts, in the glescts-test component
            return
        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               exclude_tests + \
               include_tests + suite_names + [out_dir]

        bs.run_batch_command(cmd,
                             env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts", o.hardware, o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(
                revisions, out_dir + "/results.xml",
                single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = [
                "cp", "-a", "-n", self.build_root + "/../test",
                pm.source_root()
            ]
            bs.run_batch_command(cmd)
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.check_gpu_hang()
        bs.Export().export_tests()
Esempio n. 6
0
def collate_tests(result_path, out_test_dir, make_tar=False):
    src_test_dir = result_path + "/test"
    print "collecting tests from " + src_test_dir
    i = 0
    while i < 10 and not os.path.exists(src_test_dir):
        i += 1
        print "sleeping, waiting for test directory: " + src_test_dir
        time.sleep(10)
    if not os.path.exists(src_test_dir):
        print "no test directory found: " + src_test_dir
        return

    cmd = ["cp", "-a", "-n", src_test_dir, out_test_dir]
    bs.run_batch_command(cmd)

    # Junit files must have a recent time stamp or else Jenkins will
    # not parse them.
    for a_file in os.listdir(out_test_dir + "/test"):
        os.utime(out_test_dir + "/test/" + a_file, None)

    revisions_path = bs.ProjectMap().source_root() + "/revisions.txt"
    with open(revisions_path, "w") as revs:
        revs.write(create_revision_table())

    if not make_tar:
        return

    # else generate a results.tgz that can be used with piglit summary
    save_dir = os.getcwd()
    os.chdir("/tmp/")
    tar = tarfile.open(out_test_dir + "/test/results.tar", "w:")
    shards = {}
    for a_file in os.listdir(out_test_dir + "/test"):
        if "piglit" not in a_file:
            continue
        if ":" in a_file:
            shard_base_name = "_".join(a_file.split("_")[:-1])
            if not shards.has_key(shard_base_name):
                shards[shard_base_name] = []
            shards[shard_base_name].append(a_file)
            continue
        t = et.parse(out_test_dir + "/test/" + a_file)
        r = t.getroot()
        strip_passes(r)
        t.write(a_file)
        # occasionally, t.write() finishes, but the file is not available
        t = None
        for _ in range(0, 5):
            try:
                tar.add(a_file)
                break
            except:
                print "WARN: failed to add file: " + a_file
                time.sleep(10)
        os.unlink(a_file)
    for (shard, files) in shards.items():
        t = et.parse(out_test_dir + "/test/" + files[0])
        r = t.getroot()
        strip_passes(r)
        suite = r.find("testsuite")
        for shards in files[1:]:
            st = et.parse(out_test_dir + "/test/" + shards)
            sr = st.getroot()
            strip_passes(sr)
            for a_suite in sr.findall("testsuite"):
                for a_test in a_suite.findall("testcase"):
                    suite.append(a_test)
        shard_file = shard + ".xml"
        t.write(shard_file)
        # occasionally, t.write() finishes, but the file is not available
        t = None
        for _ in range(0, 5):
            try:
                tar.add(shard_file)
                break
            except:
                print "WARN: failed to add file: " + shard_file
                time.sleep(10)
        os.unlink(shard_file)

    if os.path.exists(out_test_dir + "/test/logs"):
        save_dir = os.getcwd()
        os.chdir(out_test_dir + "/test")
        tar.add("logs")
        os.chdir(save_dir)

    tar.close()
    bs.run_batch_command(["xz", "-9", out_test_dir + "/test/results.tar"])
    os.chdir(save_dir)

    tl = bs.TestLister(out_test_dir + "/test")
    tests = tl.Tests()
    if tests:
        with open("test_summary.txt", "w") as fh:
            for atest in tests:
                atest.PrettyPrint(fh)
            fh.flush()
            # end users report that sometimes the summary is empty
            os.fsync(fh.fileno())
            fh.close()
        time.sleep(10)

        tc = bs.TestLister(out_test_dir + "/test", include_passes=True)
        all_tests = len(tc.Tests())

        failed_tests = 0
        tf = bs.TestLister(out_test_dir + "/test")
        failed_tests = len(tf.Tests())
        passed_tests = all_tests - failed_tests
        percent = (passed_tests * 100) / all_tests
        percentage = format(percent, '.2f')
        if all_tests:
            with open("test_summary.txt", "a") as fh:
                fh.write("""

        Tests passed: {} / {} ({}%)
                """.format(passed_tests, all_tests, percentage))