retest_dir = args.dir if retest_dir == "": retest_dir = results_dir + "/update/" + datetime.datetime.now().isoformat() if rev_hash[blame[0]] == blame[1]: # rsync to save build if the blame is the same as the build src_dir = "/".join(dirnames[:-1]) + "/" dest_dir = bs.convert_rsync_path(retest_dir) cmd = ["rsync", "-rlptD", "--exclude", "/*test/", src_dir, dest_dir] bs.run_batch_command(cmd) else: rev_hash[blame[0]] = blame[1] # retest the set of failed tests on the specified blame revision repos = bs.RepoSet() _revspec = bs.RevisionSpecification(from_cmd_line=[k + "=" + v for k,v in rev_hash.items()]) _revspec.checkout() _revspec = bs.RevisionSpecification() # use the full sha for the blame, so it can be looked up in a map when # processing the config file blame[1] = str(repos.repo(blame[0]).git.rev_parse(blame[1])) if not bs.retest_failures(args.result_path, retest_dir): print "ERROR: retest failed" sys.exit(-1) # make sure there is enough time for the test files to sync to nfs time.sleep(20) reproduced_failures = bs.TestLister(retest_dir + "/test/")
if rev_hash[blame[0]] == blame[1]: # rsync to save build if the blame is the same as the build src_dir = "/".join(dirnames[:-1]) + "/" dest_dir = bs.convert_rsync_path(retest_dir) cmd = ["rsync", "-rlptD", "--exclude", "/*test/", src_dir, dest_dir] bs.run_batch_command(cmd) else: rev_hash[blame[0]] = blame[1] # retest the set of failed tests on the specified blame revision repos = bs.RepoSet() _revspec = bs.RevisionSpecification.from_xml_file( os.path.join(os.path.abspath(args.result_path), 'revisions.xml')) _revspec.checkout() _revspec = bs.RevisionSpecification() # use the full sha for the blame, so it can be looked up in a map when # processing the config file blame[1] = str(repos.repo(blame[0]).git.rev_parse(blame[1])) if not bs.retest_failures(args.result_path, retest_dir): print "ERROR: retest failed" sys.exit(-1) # make sure there is enough time for the test files to sync to nfs time.sleep(20) reproduced_failures = bs.TestLister(retest_dir + "/test/") print "Found failures:" reproduced_failures.Print()
# get revisions from out directory test_dir = os.path.abspath(args.result_path + "/test") if not os.path.exists(test_dir): print "ERROR: no tests in --result_path: " + test_dir sys.exit(-1) dirnames = os.path.abspath(test_dir).split("/") hash_dir = dirnames[5] revs = hash_dir.split("_") pm = bs.ProjectMap() spec_xml = pm.build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] repos = bs.RepoSet() _revspec = bs.RevisionSpecification(from_cmd_line=revs) _revspec.checkout() def make_test_list(testlister): _test_list = [] for atest in testlister.Tests(): test_name_good_chars = re.sub('[_ !:=]', ".", atest.test_name) _test_list.append(test_name_good_chars + ".all_platforms") return "--piglit_test=" + ",".join(_test_list) good_revisions = {} # accept either a single rev or a revision hash for arev in args.good_rev.split(): if "=" not in arev:
def test_revspec(): #pytest.set_trace() rs = bs.RevisionSpecification() rs.checkout()
# get revisions from out directory test_dir = os.path.abspath(args.result_path + "/test") if not os.path.exists(test_dir): print "ERROR: no tests in --result_path: " + test_dir sys.exit(-1) dirnames = os.path.abspath(test_dir).split("/") hash_dir = dirnames[5] revs = hash_dir.split("_") pm = bs.ProjectMap() spec_xml = pm.build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] repos = bs.RepoSet() _revspec = bs.RevisionSpecification(from_cmd_line=revs) _revspec.checkout() proj_rev = args.good_rev.split("=") proj = proj_rev[0] good_rev = proj_rev[1] proj_repo = repos.repo(proj) print proj + " revisions under bisection:" found = False commits = [] for commit in proj_repo.iter_commits(max_count=5000): commits.append(commit) print commit.hexsha if good_rev in commit.hexsha: found = True
def main(): # reuse the options from the gasket o = bs.Options([sys.argv[0]]) description = "builds a component on jenkins" parser = argparse.ArgumentParser(description=description, parents=[o._parser], conflict_handler="resolve") parser.add_argument('--branch', type=str, default="mesa_master", help="Branch specification to build. "\ "See build_specification.xml/branches") parser.add_argument('--revision', type=str, default="", help="specific set of revisions to build.") parser.add_argument('--test', type=str, default=None, help="Name of test to execute. Arch/hardware suffix "\ "will override those options") args = parser.parse_args() branch = args.branch revision = args.revision test = args.test # some build_local params are not handled by the Options, which is # used by other modules. This code strips out incompatible args o = bs.Options(["bogus"]) vdict = vars(args) del vdict["branch"] del vdict["revision"] del vdict["test"] # override hardware/arch with suffix if available if not test: print "ERROR: --test argument required" sys.exit(-1) test_suffix = test.split(".")[-1] if test_suffix[-3:] in ["m32", "m64"]: vdict["arch"] = test_suffix[-3:] vdict["hardware"] = test_suffix[:-3] else: if vdict["hardware"] == "builder": # can't run tests on a builder vdict["hardware"] = "bdwgt2" # set the suffix in the way that piglit-test expects, eg "ilkm32" test = test + "." + vdict["hardware"] + vdict["arch"] o.__dict__.update(vdict) sys.argv = ["bogus"] + o.to_list() # check out the branch, refined by any manually-specfied revisions bspec = bs.BuildSpecification() bspec.checkout(branch) if (revision): revspec = bs.RevisionSpecification.from_cmd_line_param( revision.split()) revspec.checkout() revspec = bs.RevisionSpecification() print "Building revision: " + revspec.to_cmd_line_param() # create a result_path that is unique for this set of builds spec_xml = bs.ProjectMap().build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] result_path = "/".join([ results_dir, branch, revspec.to_cmd_line_param().replace(" ", "_"), "single_test" ]) o.result_path = result_path # allow re-execution of tests (if different test was specified) bs.rmtree(result_path + "/test") depGraph = bs.DependencyGraph("piglit-test", o) bi = bs.ProjectInvoke(project="piglit-test", options=o) # remove the test build, because we want to build it directly depGraph.build_complete(bi) bi.set_info("status", "single-test-rebuild") jen = bs.Jenkins(result_path=result_path, revspec=revspec) jen.build_all(depGraph) jen.build(bi, extra_arg="--piglit_test=" + test) jen.wait_for_build() time.sleep(10) pm = bs.ProjectMap() out_test_dir = pm.output_dir() if os.path.exists(out_test_dir): bs.rmtree(out_test_dir) os.makedirs(out_test_dir) collate_tests(result_path, out_test_dir)
"(default: %(default)s)") args = parser.parse_args(sys.argv[1:]) if not args.series_name: print "ERROR: --series_name required" sys.exit(-1) repos = bs.RepoSet() repos.fetch() found = False for project in ["mesa", "piglit", "waffle", "drm", "crucible"]: try: revspec = bs.RevisionSpecification(revisions={project: args.start_rev}) revspec.checkout() except: print args.start_rev + " not found in " + project continue print args.start_rev + " found in " + project found = True break if not found: sys.exit(-1) repo = repos.repo(project) try:
def main(): # reuse the options from the gasket o = bs.Options([sys.argv[0]]) description = "builds a component on jenkins" parser = argparse.ArgumentParser(description=description, parents=[o._parser], conflict_handler="resolve") parser.add_argument('--project', dest='project', type=str, default="", help='Project to build. Default project is specified '\ 'for the branch in build_specification.xml') parser.add_argument('--branch', type=str, default="mesa_master", help="Branch specification to build. "\ "See build_specification.xml/branches") parser.add_argument('--revision', type=str, default="", help="specific set of revisions to build.") parser.add_argument('--rebuild', type=str, default="false", choices=['true', 'false'], help="specific set of revisions to build." "(default: %(default)s)") parser.add_argument("--tar", help="generate tar for email notification", action="store_true") parser.add_argument('--results_subdir', type=str, default="", help="Subdirectory under results_dir to place results." " Use this to prevent conflicts when running" "multiple concurrent tests on the same branch.") args = parser.parse_args() projects = [] if args.project: projects = args.project.split(",") branch = args.branch revision = args.revision rebuild = args.rebuild results_subdir = args.results_subdir or branch # some build_local params are not handled by the Options, which is # used by other modules. This code strips out incompatible args o = bs.Options(["bogus"]) vdict = vars(args) del vdict["project"] del vdict["branch"] del vdict["revision"] del vdict["rebuild"] o.__dict__.update(vdict) sys.argv = ["bogus"] + o.to_list() bspec = bs.BuildSpecification() pm = bs.ProjectMap() bs.rmtree(pm.source_root() + "/test_summary.txt") bs.rmtree(pm.source_root() + "results/test/results.tgz") # start with the specified branch, then layer any revision spec on # top of it bspec.checkout(branch) revspec = None if (revision): revspec = bs.RevisionSpecification.from_cmd_line_param( revision.split()) revspec.checkout() revspec = bs.RevisionSpecification() print "Building revision: " + revspec.to_cmd_line_param() hashstr = revspec.to_cmd_line_param().replace(" ", "_") # create a result_path that is unique for this set of builds spec_xml = pm.build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] result_path = "/".join([results_dir, results_subdir, hashstr, o.type]) o.result_path = result_path if rebuild == "true" and os.path.exists(result_path): print "Removing existing results." mvdir = os.path.normpath(result_path + "/../" + datetime.datetime.now().isoformat()) os.rename(result_path, mvdir) if not projects: branchspec = bspec.branch_specification(branch) projects = [branchspec.project] # use a global, so signal handler can abort builds when scheduler # is interrupted global jen jen = bs.Jenkins(result_path=result_path, revspec=revspec) depGraph = bs.DependencyGraph(projects, o) out_test_dir = pm.output_dir() if os.path.exists(out_test_dir): bs.rmtree(out_test_dir) os.makedirs(out_test_dir) # to collate all logs in the scheduler out_log_dir = pm.output_dir() if os.path.exists(out_log_dir): bs.rmtree(out_log_dir) os.makedirs(out_log_dir) # Add a revisions.xml file if not os.path.exists(result_path): os.makedirs(result_path) revspec.to_elementtree().write(os.path.join(result_path, 'revisions.xml')) # use a global, so signal handler can abort builds when scheduler # is interrupted try: jen.build_all(depGraph, branch=branch) finally: collate_tests(result_path, out_test_dir, make_tar=args.tar)
if not bs.retest_failures(args.result_path, bisect_dir): print "ERROR: retest failed" # make sure there is enough time for the test files to sync to nfs time.sleep(20) new_failures = bs.TestLister(bisect_dir + "/test/") if not new_failures.Tests(): print "All tests fixed" sys.exit(0) print "Found failures:" new_failures.Print() revspec = bs.RevisionSpecification(revisions={proj: commits[-1].hexsha}) revspec.checkout() revspec = bs.RevisionSpecification() hashstr = revspec.to_cmd_line_param().replace(" ", "_") old_out_dir = "/".join([bisect_dir, hashstr]) print "Building old mesa to: " + old_out_dir bs.retest_failures(bisect_dir, old_out_dir) time.sleep(20) tl = bs.TestLister(old_out_dir + "/test/") print "old failures:" tl.Print() print "failures due to " + proj + ":" proj_failures = new_failures.TestsNotIn(tl) for a_test in proj_failures:
# get revisions from out directory test_dir = os.path.abspath(args.result_path + "/test") if not os.path.exists(test_dir): print "ERROR: no tests in --result_path: " + test_dir sys.exit(-1) dirnames = os.path.abspath(test_dir).split("/") hash_dir = dirnames[5] revs = hash_dir.split("_") spec_xml = bs.ProjectMap().build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] repos = bs.RepoSet() _revspec = bs.RevisionSpecification(from_cmd_line=revs) _revspec.checkout() def make_test_list(testlister): _test_list = [] for atest in testlister.Tests(): test_name_good_chars = re.sub('[_ !:=]', ".", atest.test_name) _test_list.append(test_name_good_chars + ".all_platforms") return "--piglit_test=" + ",".join(_test_list) good_revisions = {} # accept either a single rev or a revision hash for arev in args.good_rev.split(): if "=" not in arev:
def main(): # reuse the options from the gasket o = bs.Options([sys.argv[0]]) description="builds a component on jenkins" parser= argparse.ArgumentParser(description=description, parents=[o._parser], conflict_handler="resolve") parser.add_argument('--project', dest='project', type=str, default="", help='Project to build. Default project is specified '\ 'for the branch in build_specification.xml') parser.add_argument('--revision', type=str, default="", help="mesa revision to test.") args = parser.parse_args() projects = [] if args.project: projects = args.project.split(",") revision = args.revision bspec = bs.BuildSpecification() bspec.checkout("mesa_perf") mesa_repo = git.Repo(bs.ProjectMap().project_source_dir("mesa")) if ":" in revision: (start_rev, end_rev) = revision.split(":") if not end_rev: # user selected the last point in a plot. Build current master revision = "mesa=" + mesa_repo.git.rev_parse("HEAD", short=True) elif not start_rev: print "ERROR: user-generated perf builds cannot add older data points to the plot" sys.exit(-1) else: commits = [] start_commit = mesa_repo.commit(start_rev) found = False for commit in mesa_repo.iter_commits(end_rev, max_count=8000): if commit == start_commit: found = True break commits.append(commit.hexsha) if not found: print "ERROR: " + start_rev + " not found in history of " + end_rev sys.exit(-1) revision = "mesa=" + commits[len(commits)/2] # some build_local params are not handled by the Options, which is # used by other modules. This code strips out incompatible args o = bs.Options(["bogus"]) vdict = vars(args) del vdict["project"] del vdict["revision"] o.__dict__.update(vdict) sys.argv = ["bogus"] + o.to_list() pm = bs.ProjectMap() bs.rmtree(pm.source_root() + "/test_summary.txt") # checkout the desired revision on top of recent revisions if not revision: # randomly select a commit post 11.2 branch_commit = mesa_repo.tags["17.0-branchpoint"].commit.hexsha commits = [] for commit in mesa_repo.iter_commits('origin/master', max_count=8000): if commit.hexsha == branch_commit: break commits.append(commit.hexsha) revision = "mesa=" + str(commits[int(random.random() * len(commits))]) revspec = bs.RevisionSpecification(from_cmd_line=revision.split()) revspec.checkout() revspec = bs.RevisionSpecification() hashstr = "mesa=" + revspec.revision("mesa") print "Building revision: " + hashstr # create a result_path that is unique for this set of builds spec_xml = pm.build_spec() results_dir = spec_xml.find("build_master").attrib["results_dir"] result_path = "/".join([results_dir, "perf", hashstr]) o.result_path = result_path if not projects: projects = ["perf-all"] # use a global, so signal handler can abort builds when scheduler # is interrupted global jen jen = bs.Jenkins(result_path=result_path, revspec=revspec) depGraph = bs.DependencyGraph(projects, o) for i in depGraph.all_builds(): if i.project != "mesa-perf": i.set_info("status", "rebuild") # use a global, so signal handler can abort builds when scheduler # is interrupted try: jen.build_all(depGraph, branch="mesa_master") except Exception as e: print "ERROR: encountered failure: " + str(e) raise