示例#1
0
def create_revision_table():
    repo_set = bs.RepoSet()

    table_repo = []
    table_message = []
    table_sha = []

    for project in repo_set.projects():
        repo = repo_set.repo(project)
        commit = repo.commit()
        message = commit.message.splitlines()[0]
        sha = repo.git.rev_parse(commit.hexsha, short=True)

        table_repo.append(project)
        table_sha.append(sha)
        table_message.append(message)

    x = PrettyTable()
    x.add_column("Project", [p for p in table_repo])
    x.add_column("sha", [s for s in table_sha])
    x.add_column("Message", [m for m in table_message])

    x.align["Project"] = "l"
    x.align["Commit"] = "c"
    x.align["Message"] = "l"

    text_table = str(x)
    return text_table
示例#2
0
def main():
    parser = argparse.ArgumentParser(
        description="checks out branches and commits")
    parser.add_argument(
        '--branch',
        type=str,
        default="mesa_master",
        help="The branch to base the checkout on. (default: %(default)s)")
    parser.add_argument('commits',
                        metavar='commits',
                        type=str,
                        nargs='*',
                        help='commits to check out, in repo=sha format')
    args = parser.parse_args()

    repos = bs.RepoSet()
    repos.clone()
    for i in range(5):
        repos.fetch()
        try:
            print("Checking out specified commit (try {}/5)".format(i + 1))
            bs.BuildSpecification().checkout(args.branch, args.commits)
        except git.GitCommandError:
            print("Unable to checkout specified commit, retrying in 5s..")
            time.sleep(5)
        else:
            return
    raise Exception("ERROR: Unable to checkout specified commit.")
示例#3
0
def test_repo_status():
    #pytest.set_trace()
    reposet = bs.RepoSet()
    reposet.fetch()
    rs = bs.RepoStatus()
    assert (rs.poll() == [])
    rs._branches[0]._project_branches["mesa"].sha = "bogus"
    assert (rs.poll() == ["mesa_master"])
示例#4
0
def main():
    parser = argparse.ArgumentParser(description="checks out branches and commits")
    parser.add_argument('--branch', type=str, default="mesa_master",
                        help="The branch to base the checkout on. (default: %(default)s)")
    parser.add_argument('commits', metavar='commits', type=str, nargs='*',
                        help='commits to check out, in repo=sha format')
    args = parser.parse_args()

    repos = bs.RepoSet()
    repos.clone()
    repos.fetch()
    bs.BuildSpecification().checkout(args.branch, args.commits)
示例#5
0
results_dir = spec_xml.find("build_master").attrib["results_dir"]
retest_dir = args.dir
if retest_dir == "":
    retest_dir = results_dir + "/update/" + datetime.datetime.now().isoformat()

if rev_hash[blame[0]] == blame[1]:
    # rsync to save build if the blame is the same as the build
    src_dir = "/".join(dirnames[:-1]) + "/"
    dest_dir = bs.convert_rsync_path(retest_dir)
    cmd = ["rsync", "-rlptD", "--exclude", "/*test/", src_dir, dest_dir]
    bs.run_batch_command(cmd)
else:
    rev_hash[blame[0]] = blame[1]

# retest the set of failed tests on the specified blame revision
repos = bs.RepoSet()
_revspec = bs.RevisionSpecification.from_xml_file(
    os.path.join(os.path.abspath(args.result_path), 'revisions.xml'))
_revspec.checkout()
_revspec = bs.RevisionSpecification()

# use the full sha for the blame, so it can be looked up in a map when
# processing the config file
blame[1] = str(repos.repo(blame[0]).git.rev_parse(blame[1]))

if not bs.retest_failures(args.result_path, retest_dir):
    print "ERROR: retest failed"
    sys.exit(-1)

# make sure there is enough time for the test files to sync to nfs
time.sleep(20)
示例#6
0
    def test(self):
        o = bs.Options()
        pm = bs.ProjectMap()

        if not self.version:
            self.version = bs.mesa_version()

        conf_file = bs.get_conf_file(o.hardware, o.arch, "cts-test")

        # invoke piglit
        self.env["PIGLIT_CTS_GL_BIN"] = self.build_root + "/bin/gl/cts/glcts"
        out_dir = self.build_root + "/test/" + o.hardware

        include_tests = []
        if o.retest_path:
            testlist = bs.TestLister(o.retest_path + "/test/")
            include_tests = testlist.RetestIncludes(project="cts-test")
            if not include_tests:
                # we were supposed to retest failures, but there were none
                return

        # this test is flaky in glcts.  It passes enough for
        # submission, but as per Ken, no developer will want to look
        # at it to figure out why the test is flaky.
        extra_excludes = [
            "packed_depth_stencil.packed_depth_stencil_copyteximage"
        ]

        suite_names = []
        # disable gl cts on stable versions of mesa, which do not
        # support the feature set.
        if "13.0" in self.version:
            return
        if "17.0" in self.version and "glk" in o.hardware:
            # glk not supported by stable mesa
            return
        suite_names.append("cts_gl")
        # as per Ian, only run gl45
        extra_excludes += [
            "gl30-cts", "gl31-cts", "gl32-cts", "gl33-cts", "gl40-cts",
            "gl41-cts", "gl42-cts", "gl43-cts", "gl44-cts"
        ]
        if "hsw" in o.hardware:
            # flaky cts_gl tests
            extra_excludes += [
                "shader_image_load_store.multiple-uniforms",
                "shader_image_size.basic-nonms-fs",
                "shader_image_size.advanced-nonms-fs",
                "texture_gather.gather-tesselation-shader",
                "vertex_attrib_binding.basic-inputl-case1",
                "gpu_shader_fp64.named_uniform_blocks",
                # gpu hang
                "gl45-cts.tessellation_shader.vertex_spacing",
                "gl45-cts.tessellation_shader.vertex_ordering",
                "gl45-cts.tessellation_shader.tessellation_control_to_tessellation_evaluation.gl_maxpatchvertices_position_pointsize"
            ]

        exclude_tests = []
        for a in extra_excludes:
            exclude_tests += ["--exclude-tests", a]
        if not suite_names:
            # for master, on old hardware, this component will not
            # test anything.  The gles tests are instead targeted with
            # the gles32 cts, in the glescts-test component
            return
        cmd = [self.build_root + "/bin/piglit",
               "run",
               #"-p", "gbm",
               "-b", "junit",
               "--config", conf_file,
               "-c",
               "--exclude-tests", "esext-cts",
               "--junit_suffix", "." + o.hardware + o.arch] + \
               exclude_tests + \
               include_tests + suite_names + [out_dir]

        bs.run_batch_command(cmd,
                             env=self.env,
                             expected_return_code=None,
                             streamedOutput=True)
        single_out_dir = self.build_root + "/../test"
        if not os.path.exists(single_out_dir):
            os.makedirs(single_out_dir)

        if os.path.exists(out_dir + "/results.xml"):
            # Uniquely name all test files in one directory, for
            # jenkins
            filename_components = ["/piglit-cts", o.hardware, o.arch]
            if o.shard != "0":
                # only put the shard suffix on for non-zero shards.
                # Having _0 suffix interferes with bisection.
                filename_components.append(o.shard)

            revisions = bs.RepoSet().branch_missing_revisions()
            print "INFO: filtering tests from " + out_dir + "/results.xml"
            self.filter_tests(
                revisions, out_dir + "/results.xml",
                single_out_dir + "_".join(filename_components) + ".xml")

            # create a copy of the test xml in the source root, where
            # jenkins can access it.
            cmd = [
                "cp", "-a", "-n", self.build_root + "/../test",
                pm.source_root()
            ]
            bs.run_batch_command(cmd)
        else:
            print "ERROR: no results at " + out_dir + "/results.xml"

        bs.check_gpu_hang()
        bs.Export().export_tests()
示例#7
0
#!/usr/bin/python

import argparse
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(sys.argv[0])), "..")))
import build_support as bs

parser = argparse.ArgumentParser(description="checks out branches and commits")
parser.add_argument('--branch', type=str, default="mesa_master",
                    help="The branch to base the checkout on. (default: %(default)s)")
parser.add_argument('commits', metavar='commits', type=str, nargs='*',
                    help='commits to check out, in repo=sha format')
args = parser.parse_args()

repos = bs.RepoSet(clone=True)
repos.fetch()
bs.BuildSpecification().checkout(args.branch, args.commits)


示例#8
0
def main():
    parser = argparse.ArgumentParser(description='Build projects locally.')

    # TODO: provide a pull action to update the repos
    parser.add_argument('--action', type=str, default=["build"],
                        choices=CsvChoice('fetch', 'build', 'clean', 'test'),
                        action=CsvAction,
                        help="Action to recurse with. 'build', 'clean' "\
                        "or 'test'. (default: %(default)s)")

    parser.add_argument('--project',
                        dest='project',
                        type=str,
                        default="mesa",
                        help='project to build. (default: %(default)s)')
    parser.add_argument('--arch',
                        dest='arch',
                        type=str,
                        default='m64',
                        choices=['m64', 'm32'],
                        help='arch to build. (default: %(default)s)')
    parser.add_argument('--config',
                        type=str,
                        default="release",
                        choices=['release', 'debug'],
                        help="Release or Debug build. (default: %(default)s)")

    parser.add_argument('--type', type=str, default="developer",
                        choices=['developer', 'percheckin',
                                 'daily', 'release'],
                        help="category of tests to run. "\
                        "(default: %(default)s)")

    parser.add_argument('--branch', type=str, default="none",
                        help="Branch specification to build.  "\
                        "See build_specification.xml/branches")
    parser.add_argument(
        '--env',
        type=str,
        default="",
        help="If specified, overrides environment variable settings"
        "EG: 'LIBGL_DEBUG=1 INTEL_DEBUG=perf'")
    parser.add_argument('--hardware',
                        type=str,
                        default='builder',
                        help="The hardware to be targeted for test "
                        "('builder', 'snbgt1', 'ivb', 'hsw', 'bdw'). "
                        "(default: %(default)s)")

    args = parser.parse_args()
    project = args.project

    if "fetch" in args.action:
        # fetch not supported by build.py scripts, which will parse argv
        bs.RepoSet().fetch()
    branch = args.branch
    if (branch != "none"):
        bs.BuildSpecification().checkout(branch)

    # some build_local params are not handled by the Options, which is
    # used by other modules
    o = bs.Options(["bogus"])
    vdict = vars(args)
    del vdict["project"]
    del vdict["branch"]
    if "fetch" in vdict["action"]:
        vdict["action"].remove("fetch")
    o.__dict__.update(vdict)
    sys.argv = ["bogus"] + o.to_list()

    if "clean" in args.action:
        bs.rmtree(bs.ProjectMap().build_root())

    graph = bs.DependencyGraph(project, o)
    ready = graph.ready_builds()
    pm = bs.ProjectMap()
    while ready:
        for bi in ready:
            graph.build_complete(bi)
            proj_build_dir = pm.project_build_dir(bi.project)
            script = proj_build_dir + "/build.py"
            if os.path.exists(script):
                bs.run_batch_command([sys.executable, script] + o.to_list())
        ready = graph.ready_builds()
示例#9
0
def post_process_results(xml):
    t = ET.parse(xml)
    o = bs.Options()
    conf = None
    long_revisions = bs.RepoSet().branch_missing_revisions()
    missing_revisions = [a_rev[:6] for a_rev in long_revisions]
    try:
        conf = bs.get_conf_file(o.hardware, o.arch, project="crucible-test")
    except bs.NoConfigFile:
        pass
    if conf:
        # key=name, value=status
        expected_status = {}
        changed_commit = {}
        c = ConfigParser.SafeConfigParser(allow_no_value=True)
        c.read(conf)
        for section in c.sections():
            for (test, commit) in c.items(section):
                if test in expected_status:
                    raise Exception("test has multiple entries: " + test)
                expected_status[test] = section
                changed_commit[test] = commit
        for atest in t.findall(".//testcase"):
            test_name = atest.attrib["name"]
            if atest.attrib["status"] == "lost":
                atest.attrib["status"] = "crash"
            if test_name not in expected_status:
                continue

            expected = expected_status[atest.attrib["name"]]
            test_is_stale = False
            for missing_commit in missing_revisions:
                if missing_commit in changed_commit[test_name]:
                    test_is_stale = True
                    # change stale test status to skip
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    atest.append(ET.Element("skipped"))
                    so = ET.Element("system-out")
                    so.text = "WARN: the results of this were changed by " + changed_commit[
                        test_name]
                    so.text += ", which is missing from this build."
                    atest.append(so)
                    break
            if test_is_stale:
                continue

            if expected == "expected-failures":
                # change fail to pass
                if atest.attrib["status"] == "fail":
                    for ftag in atest.findall("failure"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "crash":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test crashed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected failure"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected failure"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])
            elif expected == "expected-crashes":
                # change error to pass
                if atest.attrib["status"] == "crash":
                    for ftag in atest.findall("error"):
                        atest.remove(ftag)
                    so = ET.Element("system-out")
                    so.text = "Passing test as an expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "fail":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test failed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "pass":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test passed when it expected crash"
                    atest.append(so)
                elif atest.attrib["status"] == "skip":
                    atest.append(ET.Element("failure"))
                    so = ET.Element("system-out")
                    so.text = "ERROR: this test skipped when it expected crash"
                    atest.append(so)
                else:
                    raise Exception("test has unknown status: " +
                                    atest.attrib["name"] + " " +
                                    atest.attrib["status"])

    for atest in t.findall(".//testcase"):
        atest.attrib["name"] = atest.attrib["name"] + "." + o.hardware + o.arch
    t.write(xml)