def delete_old_pre_release_builds(): s3Dir = "sumatrapdf/prerel/" keys = s3.list(s3Dir) files_by_ver = {} for k in keys: # print(k.name) # sumatrapdf/prerel/SumatraPDF-prerelease-4819.pdb.zip ver = re.findall( r'sumatrapdf/prerel/SumatraPDF-prerelease-(\d+)*', k.name) ver = int(ver[0]) # print(ver) val = files_by_ver.get(ver, []) # print(val) val.append(k.name) # print(val) files_by_ver[ver] = val versions = files_by_ver.keys() versions.sort() # print(versions) todelete = versions[:-3] # print(todelete) for vertodelete in todelete: for f in files_by_ver[vertodelete]: #print("Deleting %s" % f) s3.delete(f)
def delete_old_pre_release_builds(): s3Dir = "sumatrapdf/prerel/" keys = s3.list(s3Dir) files_by_ver = {} for k in keys: # print(k.name) # sumatrapdf/prerel/SumatraPDF-prerelease-4819.pdb.zip ver = re.findall(r'sumatrapdf/prerel/SumatraPDF-prerelease-(\d+)*', k.name) ver = int(ver[0]) # print(ver) val = files_by_ver.get(ver, []) # print(val) val.append(k.name) # print(val) files_by_ver[ver] = val versions = files_by_ver.keys() versions.sort() # print(versions) todelete = versions[:-3] # print(todelete) for vertodelete in todelete: for f in files_by_ver[vertodelete]: #print("Deleting %s" % f) s3.delete(f)
def has_already_been_built(ver): s3_dir = "sumatrapdf/buildbot/" n1 = s3_dir + ver + "/analyze.html" n2 = s3_dir + ver + "/release_build_log.txt" keys = s3.list(s3_dir) for k in keys: if k.name in [n1, n2]: return True return False
def test_s3(): linecount_path = os.path.join(THIS_DIR, 'resources', 'linecount.txt') s3.upload_file(linecount_path) app_args = "--readUrl {} --writeUrl {}".format( s3.s3n_url('linecount.txt'), s3.s3n_url("linecount-out")) args = ["--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job"] utils.run_tests(app_url=_scala_test_jar_url(), app_args=app_args, expected_output="Read 3 lines", app_name="/spark", args=args) assert len(list(s3.list("linecount-out"))) > 0 app_args = "--readUrl {} --countOnly".format(s3.s3n_url('linecount.txt')) args = ["--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job"] utils.run_tests(app_url=_scala_test_jar_url(), app_args=app_args, expected_output="Read 3 lines", app_name="/spark", args=args) app_args = "--countOnly --readUrl {}".format(s3.s3n_url('linecount.txt')) args = ["--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job"] utils.run_tests(app_url=_scala_test_jar_url(), app_args=app_args, expected_output="Read 3 lines", app_name="/spark", args=args)
def test_s3(): linecount_path = os.path.join(THIS_DIR, 'resources', 'linecount.txt') s3.upload_file(linecount_path) app_args = "{} {}".format(s3.s3n_url('linecount.txt'), s3.s3n_url("linecount-out")) args = [ "--conf", "spark.mesos.driverEnv.AWS_ACCESS_KEY_ID={}".format( os.environ["AWS_ACCESS_KEY_ID"]), "--conf", "spark.mesos.driverEnv.AWS_SECRET_ACCESS_KEY={}".format( os.environ["AWS_SECRET_ACCESS_KEY"]), "--class", "S3Job" ] utils.run_tests(_upload_file(os.environ["SCALA_TEST_JAR_PATH"]), app_args, "", args) assert len(list(s3.list("linecount-out"))) > 0
def build_index_html(stats_for_ver, checkin_comment_for_ver): s3_dir = "sumatrapdf/buildbot/" html = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>%s</head><body>\n' % g_index_html_css html += "<p>SumatraPDF buildbot results:</p>\n" names = [n.name for n in s3.list(s3_dir)] # filter out top-level files like index.html and sizes.js names = [n[len(s3_dir):] for n in names if len(n.split("/")) == 4] names.sort(reverse=True, key=lambda name: int(name.split("/")[0])) html += '<table id="table-5"><tr>' + th("svn") + th("/analyze") html += th("build") + th("tests") + th("SumatraPDF.exe") html += th("Installer.exe") + th("efi") + th("checkin comment") + '</tr>\n' files_by_ver = group_by_ver(names) for arr in files_by_ver[:512]: (ver, files) = arr if "stats.txt" not in files: print("stats.txt missing in %s (%s)" % (ver, str(files))) assert ("stats.txt" in files) try: stats = stats_for_ver(ver) except: print("names: %s" % str(names)) print("ver: %s" % str(ver)) print("files: %s" % str(files)) raise total_warnings = stats.analyze_sumatra_warnings_count + stats.analyze_mupdf_warnings_count + stats.analyze_ext_warnings_count if int( ver ) >= g_first_analyze_build and total_warnings > 0 and not stats.rel_failed: assert ("analyze.html" in files) s3_ver_url = "http://kjkpub.s3.amazonaws.com/" + s3_dir + ver + "/" html += " <tr>\n" # build number src_url = "https://code.google.com/p/sumatrapdf/source/detail?r=" + ver html += td(a(src_url, ver), 4) + "\n" # /analyze warnings count if int(ver) >= g_first_analyze_build and total_warnings > 0: url = s3_ver_url + "analyze.html" s = "%d %d %d" % (stats.analyze_sumatra_warnings_count, stats.analyze_mupdf_warnings_count, stats.analyze_ext_warnings_count) html += td(a(url, s), 4) else: html += td("", 4) # release build status if stats.rel_failed: url = s3_ver_url + "release_build_log.txt" s = '<b>' + a(url, "fail") + '</b>' else: s = '<font color="green"<b>ok!</b></font>' html += td(s, 4) + "\n" # tests status if "tests_error.txt" in files: url = s3_ver_url + "tests_error.txt" s = '<b>' + a(url, "fail") + '</b>' else: s = '<font color="green"<b>ok!</b></font>' html += td(s, 4) + "\n" # SumatraPDF.exe, Installer.exe size if stats.rel_failed: html += td("", 4) + "\n" + td("", 4) + "\n" else: prev_stats = stats_for_previous_successful_build( ver, stats_for_ver) if None == prev_stats: num_s = formatInt(stats.rel_sumatrapdf_exe_size) html += td(num_s, 4) + "\n" num_s = formatInt(stats.rel_installer_exe_size) html += td(num_s, 4) + "\n" else: s = size_diff_html(stats.rel_sumatrapdf_exe_size - prev_stats.rel_sumatrapdf_exe_size) num_s = formatInt(stats.rel_sumatrapdf_exe_size) s = num_s + s html += td(s, 4) + "\n" s = size_diff_html(stats.rel_installer_exe_size - prev_stats.rel_installer_exe_size) num_s = formatInt(stats.rel_installer_exe_size) s = num_s + s html += td(s, 4) + "\n" # efi diff if "efi_diff.txt" in files: url = s3_ver_url + "efi_diff.txt" html += td(a(url, "diff"), 4) else: html += td("") # checkin comment (comment, trimmed) = util.trim_str(checkin_comment_for_ver(ver)) comment = comment.decode('utf-8') comment = cgi.escape(comment) if trimmed: comment += a(src_url, "...") html += td(comment, 4) + "\n" html += " </tr>\n" html += "</table>" html += "</body></html>\n" return html
def get_s3_files(): global g_s3_files if g_s3_files == None: files = s3.list("sumatrapdf/buildbot/") g_s3_files = [f.name for f in files] return g_s3_files
def build_index_html(stats_for_ver, checkin_comment_for_ver): s3_dir = "sumatrapdf/buildbot/" html = '<html><head><meta http-equiv="Content-Type" content="text/html; charset=utf-8"/>%s</head><body>\n' % g_index_html_css html += "<p>SumatraPDF buildbot results:</p>\n" names = [n.name for n in s3.list(s3_dir)] # filter out top-level files like index.html and sizes.js names = [n[len(s3_dir):] for n in names if len(n.split("/")) == 4] names.sort(reverse=True, key=lambda name: int(name.split("/")[0])) html += '<table id="table-5"><tr>' + th("svn") + th("/analyze") html += th("build") + th("tests") + th("SumatraPDF.exe") html += th("Installer.exe") + th("efi") + th("checkin comment") + '</tr>\n' files_by_ver = group_by_ver(names) for arr in files_by_ver[:512]: (ver, files) = arr if "stats.txt" not in files: print("stats.txt missing in %s (%s)" % (ver, str(files))) assert("stats.txt" in files) try: stats = stats_for_ver(ver) except: print("names: %s" % str(names)) print("ver: %s" % str(ver)) print("files: %s" % str(files)) raise total_warnings = stats.analyze_sumatra_warnings_count + \ stats.analyze_mupdf_warnings_count + \ stats.analyze_ext_warnings_count if int(ver) >= g_first_analyze_build and total_warnings > 0 and not stats.rel_failed: assert("analyze.html" in files) s3_ver_url = "http://kjkpub.s3.amazonaws.com/" + s3_dir + ver + "/" html += " <tr>\n" # build number src_url = "https://code.google.com/p/sumatrapdf/source/detail?r=" + ver html += td(a(src_url, ver), 4) + "\n" # /analyze warnings count if int(ver) >= g_first_analyze_build and total_warnings > 0: url = s3_ver_url + "analyze.html" s = "%d %d %d" % (stats.analyze_sumatra_warnings_count, stats.analyze_mupdf_warnings_count, stats.analyze_ext_warnings_count) html += td(a(url, s), 4) else: html += td("", 4) # release build status if stats.rel_failed: url = s3_ver_url + "release_build_log.txt" s = '<b>' + a(url, "fail") + '</b>' else: s = '<font color="green"<b>ok!</b></font>' html += td(s, 4) + "\n" # tests status if "tests_error.txt" in files: url = s3_ver_url + "tests_error.txt" s = '<b>' + a(url, "fail") + '</b>' else: s = '<font color="green"<b>ok!</b></font>' html += td(s, 4) + "\n" # SumatraPDF.exe, Installer.exe size if stats.rel_failed: html += td("", 4) + "\n" + td("", 4) + "\n" else: prev_stats = stats_for_previous_successful_build( ver, stats_for_ver) if None == prev_stats: num_s = formatInt(stats.rel_sumatrapdf_exe_size) html += td(num_s, 4) + "\n" num_s = formatInt(stats.rel_installer_exe_size) html += td(num_s, 4) + "\n" else: s = size_diff_html( stats.rel_sumatrapdf_exe_size - prev_stats.rel_sumatrapdf_exe_size) num_s = formatInt(stats.rel_sumatrapdf_exe_size) s = num_s + s html += td(s, 4) + "\n" s = size_diff_html( stats.rel_installer_exe_size - prev_stats.rel_installer_exe_size) num_s = formatInt(stats.rel_installer_exe_size) s = num_s + s html += td(s, 4) + "\n" # efi diff if "efi_diff.txt" in files: url = s3_ver_url + "efi_diff.txt" html += td(a(url, "diff"), 4) else: html += td("") # checkin comment (comment, trimmed) = util.trim_str(checkin_comment_for_ver(ver)) comment = comment.decode('utf-8') comment = cgi.escape(comment) if trimmed: comment += a(src_url, "...") html += td(comment, 4) + "\n" html += " </tr>\n" html += "</table>" html += "</body></html>\n" return html
def GET(self, request, response): list_type = response.params.get('list') if not list_type is None: response.data = dumps(s3.list(request.path[1:])) return response