def transform_video(inp_path, out_path, process_frame_fn, **kwargs): util.ensure_path_exists(out_path) with imageio.get_reader(inp_path) as reader: fps = reader.get_meta_data()['fps'] with imageio.get_writer(out_path, fps=fps, codec='h264', **kwargs) as writer: for frame in util.progressbar(reader): writer.append_data(process_frame_fn(frame))
def make_efficient_example(ex, further_expansion_factor=1, further_scale_up=1, dir_suffix=''): """Make example by storing the image in a cropped and resized version for efficient loading""" # Determine which area we will need from the image # This is a bit larger than the tight crop because of the geometric augmentations max_rotate = np.pi / 6 padding_factor = 1 / 0.85 scale_up_factor = 1 / 0.85 * further_scale_up scale_down_factor = 1 / 0.85 shift_factor = 1.1 base_dst_side = 256 box_center = boxlib.center(ex.bbox) s, c = np.sin(max_rotate), np.cos(max_rotate) w, h = ex.bbox[2:] rot_bbox_side = max(c * w + s * h, c * h + s * w) rot_bbox = boxlib.box_around(box_center, rot_bbox_side) scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor, 1) expansion_factor = (padding_factor * shift_factor * scale_down_factor * further_expansion_factor) expanded_bbox = boxlib.expand(rot_bbox, expansion_factor) expanded_bbox = boxlib.intersect(expanded_bbox, np.array([0, 0, 1000, 1000])) new_camera = copy.deepcopy(ex.camera) new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2] new_camera.scale_output(scale_factor) new_camera.undistort() new_im_relpath = ex.image_path.replace('h36m', f'h36m_downscaled{dir_suffix}') new_im_path = f'{paths.DATA_ROOT}/{new_im_relpath}' if not (util.is_file_newer(new_im_path, "2019-11-14T23:33:14") and improc.is_image_readable(new_im_path)): im = improc.imread_jpeg(ex.image_path) dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]]) new_im = cameralib.reproject_image(im, ex.camera, new_camera, dst_shape) util.ensure_path_exists(new_im_path) imageio.imwrite(new_im_path, new_im) new_bbox_topleft = cameralib.reproject_image_points( ex.bbox[:2], ex.camera, new_camera) new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor]) ex = ps3d.Pose3DExample(new_im_relpath, ex.world_coords, new_bbox, new_camera, activity_name=ex.activity_name) return ex
def make_efficient_example(ex, root_muco, i_person): image_relpath = ex.image_path max_rotate = np.pi / 6 padding_factor = 1 / 0.85 scale_up_factor = 1 / 0.85 scale_down_factor = 1 / 0.85 shift_factor = 1.2 base_dst_side = 256 box_center = boxlib.center(ex.bbox) s = np.sin(max_rotate) c = np.cos(max_rotate) rot_bbox_size = (np.array([[c, s], [s, c]]) @ ex.bbox[2:, np.newaxis])[:, 0] side = np.max(rot_bbox_size) rot_bbox_size = np.array([side, side]) rot_bbox = boxlib.box_around(box_center, rot_bbox_size) scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor, 1) expansion_factor = padding_factor * shift_factor * scale_down_factor expanded_bbox = boxlib.expand(rot_bbox, expansion_factor) expanded_bbox = boxlib.intersect(expanded_bbox, boxlib.full_box([2048, 2048])) new_camera = ex.camera.copy() new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2] new_camera.scale_output(scale_factor) new_camera.undistort() dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]]) new_im_path = f'{root_muco}_downscaled/{image_relpath[:-4]}_{i_person:01d}.jpg' if not (util.is_file_newer(new_im_path, "2020-02-15T23:28:26")): im = improc.imread_jpeg(f'{root_muco}/{image_relpath}') new_im = cameralib.reproject_image(im, ex.camera, new_camera, dst_shape, antialias_factor=4) util.ensure_path_exists(new_im_path) imageio.imwrite(new_im_path, new_im, quality=95) new_bbox_topleft = cameralib.reproject_image_points(ex.bbox[:2], ex.camera, new_camera) new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor]) if ex.mask is None: noext, ext = os.path.splitext(image_relpath[:-4]) noext = noext.replace('unaugmented_set_001/', '') mask = improc.decode_mask(util.load_pickle(f'{root_muco}/masks/{noext}.pkl')) else: mask = ex.mask if mask is False: new_mask_encoded = None else: new_mask = cameralib.reproject_image(mask, ex.camera, new_camera, dst_shape) new_mask_encoded = improc.encode_mask(new_mask) return p3ds.Pose3DExample( os.path.relpath(new_im_path, paths.DATA_ROOT), ex.world_coords.astype(np.float32), new_bbox.astype(np.float32), new_camera, mask=new_mask_encoded, univ_coords=ex.univ_coords.astype(np.float32))
def main(): verify_started_in_right_directory() # to avoid problems, we build a separate source tree, just for the buildbot src_path = os.path.join("..", "sumatrapdf_buildbot") ensure_path_exists(src_path) copy_secrets(src_path) os.chdir(src_path) get_cert_pwd() # early exit if problems #build_version("6698", skip_release=True) #build_index_html() #build_sizes_json() #build_curr() buildbot_loop()
def download_pretrained_weights(): import urllib.request import tarfile logging.info( f'Downloading ImageNet pretrained weights for {FLAGS.architecture}') filename = f'{FLAGS.architecture}_2017_04_14.tar.gz' target_path = f'{paths.DATA_ROOT}/pretrained/{FLAGS.architecture}_2017_04_14/{filename}' util.ensure_path_exists(target_path) urllib.request.urlretrieve( f'http://download.tensorflow.org/models/{filename}', target_path) with tarfile.open(target_path) as f: f.extractall( f'{paths.DATA_ROOT}/pretrained/{FLAGS.architecture}_2017_04_14') os.remove(target_path)
def save_results(f): ordered_indices = np.argsort(f.image_path) util.ensure_path_exists(FLAGS.pred_path) logging.info(f'Saving predictions to {FLAGS.pred_path}') np.savez( FLAGS.pred_path, image_path=f.image_path[ordered_indices], coords3d_true=f.coords3d_true_orig_cam[ordered_indices], coords3d_pred=f.coords3d_pred_orig_cam[ordered_indices], coords3d_true_world=f.coords3d_true_world[ordered_indices], coords3d_pred_world=f.coords3d_pred_world[ordered_indices], activity_name=f.activity_name[ordered_indices], scene_name=f.scene_name[ordered_indices], joint_validity_mask=f.joint_validity_mask[ordered_indices], )
def make_efficient_example(ex, rect_id): """Make example by storing the image in a cropped and resized version for efficient loading""" # Determine which area we will need # For rotation, usual padding around box, scale (shrink) augmentation and shifting padding_factor = 1 / 0.85 scale_up_factor = 1 / 0.85 scale_down_factor = 1 / 0.85 shift_factor = 1.1 max_rotate = np.pi / 6 rot_factor = np.sin(max_rotate) + np.cos(max_rotate) base_dst_side = 256 scale_factor = min(base_dst_side / ex.bbox[3] * scale_up_factor, 1) hopeful_factor = 0.9 expansion_factor = ( rot_factor * padding_factor * shift_factor * scale_down_factor * hopeful_factor) expanded_bbox = boxlib.expand(boxlib.expand_to_square(ex.bbox), expansion_factor) imsize = improc.image_extents(ex.image_path) full_box = np.array([0, 0, imsize[0], imsize[1]]) expanded_bbox = boxlib.intersect(expanded_bbox, full_box) old_camera = cameralib.Camera.create2D() new_camera = old_camera.copy() new_camera.shift_image(-expanded_bbox[:2]) new_camera.scale_output(scale_factor) dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]]) new_im_path = ex.image_path.replace('mpii', f'mpii_downscaled') without_ext, ext = os.path.splitext(new_im_path) new_im_path = f'{without_ext}_{rect_id:02d}{ext}' if not (util.is_file_newer(new_im_path, "2019-11-12T17:54:06") and improc.is_image_readable(new_im_path)): im = improc.imread_jpeg(ex.image_path) new_im = cameralib.reproject_image(im, old_camera, new_camera, dst_shape) util.ensure_path_exists(new_im_path) imageio.imwrite(new_im_path, new_im) new_bbox_topleft = cameralib.reproject_image_points(ex.bbox[:2], old_camera, new_camera) new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor]) new_coords = cameralib.reproject_image_points(ex.coords, old_camera, new_camera) ex = Pose2DExample(os.path.relpath(new_im_path, paths.DATA_ROOT), new_coords, bbox=new_bbox) return ex
def save_results(f): default_path = f'results_{util.timestamp()}.npz' result_path = FLAGS.result_path if FLAGS.result_path else default_path if not os.path.isabs(result_path): result_path = os.path.join(FLAGS.logdir, result_path) ordered_indices = np.argsort(f.image_path) util.ensure_path_exists(result_path) logging.info(f'Saving results to {result_path}') np.savez( result_path, image_path=f.image_path[ordered_indices], coords3d_true=f.coords3d_true_orig_cam[ordered_indices], coords3d_pred=f.coords3d_pred_orig_cam[ordered_indices], coords3d_true_world=f.coords3d_true_world[ordered_indices], coords3d_pred_world=f.coords3d_pred_world[ordered_indices], activity_name=f.activity_name[ordered_indices], scene_name=f.scene_name[ordered_indices], joint_validity_mask=f.joint_validity_mask[ordered_indices], )
def main(): global upload if len(args) != 0: usage() verify_started_in_right_directory() if build_prerelease: if svn_revision is None: run_cmd_throw("svn", "update") (out, err) = run_cmd_throw("svn", "info") ver = str(parse_svninfo_out(out)) else: # allow to pass in an SVN revision, in case SVN itself isn't available ver = svn_revision else: ver = extract_sumatra_version(os.path.join("src", "Version.h")) log("Version: '%s'" % ver) filename_base = "SumatraPDF-%s" % ver if build_prerelease: filename_base = "SumatraPDF-prerelease-%s" % ver s3_dir = "sumatrapdf/rel" if build_prerelease: s3_dir = "sumatrapdf/prerel" if upload_tmp: upload = True s3_dir += "tmp" if upload: log("Will upload to s3 at %s" % s3_dir) s3_prefix = "%s/%s" % (s3_dir, filename_base) s3_exe = s3_prefix + ".exe" s3_installer = s3_prefix + "-install.exe" s3_pdb_zip = s3_prefix + ".pdb.zip" s3_exe_zip = s3_prefix + ".zip" s3_files = [s3_exe, s3_installer, s3_pdb_zip] if not build_prerelease: s3_files.append(s3_exe_zip) cert_pwd = None cert_path = os.path.join("scripts", "cert.pfx") if upload: map(ensure_s3_doesnt_exist, s3_files) if not os.path.exists(os.path.join("scripts", "cert.pfx")): print("scripts/cert.pfx missing") sys.exit(1) import awscreds cert_pwd = awscreds.certpwd obj_dir = "obj-rel" if target_platform == "X64": obj_dir += "64" if not testing and not build_test_installer and not build_rel_installer: shutil.rmtree(obj_dir, ignore_errors=True) shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True) config = "CFG=rel" if build_test_installer and not build_prerelease: obj_dir = "obj-dbg" config = "CFG=dbg" extcflags = "" if build_prerelease: extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver platform = "PLATFORM=%s" % (target_platform or "X86") run_cmd_throw("nmake", "-f", "makefile.msvc", config, extcflags, platform, "all_sumatrapdf") exe = os.path.join(obj_dir, "SumatraPDF.exe") if upload: sign(exe, cert_pwd) sign(os.path.join(obj_dir, "uninstall.exe"), cert_pwd) build_installer_data(obj_dir) run_cmd_throw("nmake", "-f", "makefile.msvc", "Installer", config, platform, extcflags) if build_test_installer or build_rel_installer: sys.exit(0) installer = os.path.join(obj_dir, "Installer.exe") if upload: sign(installer, cert_pwd) pdb_zip = os.path.join(obj_dir, "%s.pdb.zip" % filename_base) zip_file(pdb_zip, os.path.join(obj_dir, "libmupdf.pdb")) zip_file(pdb_zip, os.path.join(obj_dir, "Installer.pdb"), append=True) zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF-no-MuPDF.pdb"), append=True) zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF.pdb"), append=True) builds_dir = os.path.join("builds", ver) if os.path.exists(builds_dir): shutil.rmtree(builds_dir) os.makedirs(builds_dir) copy_to_dst_dir(exe, builds_dir) copy_to_dst_dir(installer, builds_dir) copy_to_dst_dir(pdb_zip, builds_dir) if not build_prerelease: exe_zip = os.path.join(obj_dir, "%s.zip" % filename_base) zip_file(exe_zip, exe, "SumatraPDF.exe", compress=True) ensure_path_exists(exe_zip) copy_to_dst_dir(exe_zip, builds_dir) if not upload: return if build_prerelease: jstxt = 'var sumLatestVer = %s;\n' % ver jstxt += 'var sumBuiltOn = "%s";\n' % time.strftime("%Y-%m-%d") jstxt += 'var sumLatestName = "%s";\n' % s3_exe.split("/")[-1] jstxt += 'var sumLatestExe = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_exe jstxt += 'var sumLatestPdb = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_pdb_zip jstxt += 'var sumLatestInstaller = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_installer s3UploadFilePublic(installer, s3_installer) s3UploadFilePublic(pdb_zip, s3_pdb_zip) s3UploadFilePublic(exe, s3_exe) if build_prerelease: s3UploadDataPublic(jstxt, "sumatrapdf/sumatralatest.js") txt = "%s\n" % ver s3UploadDataPublic(txt, "sumatrapdf/sumpdf-prerelease-latest.txt") deleteOldPreReleaseBuilds() else: s3UploadFilePublic(exe_zip, s3_exe_zip)
def make_efficient_example(ex, new_image_path, further_expansion_factor=1, image_adjustments_3dhp=False, min_time=None): """Make example by storing the image in a cropped and resized version for efficient loading""" is3d = hasattr(ex, 'world_coords') w, h = (improc.image_extents(util.ensure_absolute_path(ex.image_path)) if isinstance(ex.image_path, str) else (ex.image_path.shape[1], ex.image_path.shape[0])) full_box = boxlib.full_box(imsize=[w, h]) if is3d: old_camera = ex.camera new_camera = ex.camera.copy() new_camera.turn_towards(target_image_point=boxlib.center(ex.bbox)) new_camera.undistort() else: old_camera = cameralib.Camera.create2D() new_camera = old_camera.copy() reprojected_box = reproject_box(ex.bbox, old_camera, new_camera, method='side_midpoints') reprojected_full_box = reproject_box(full_box, old_camera, new_camera, method='corners') expanded_bbox = (get_expanded_crop_box( reprojected_box, reprojected_full_box, further_expansion_factor) if further_expansion_factor > 0 else reprojected_box) scale_factor = min(1.2, 256 / np.max(reprojected_box[2:]) * 1.5) new_camera.shift_image(-expanded_bbox[:2]) new_camera.scale_output(scale_factor) reprojected_box = reproject_box(ex.bbox, old_camera, new_camera, method='side_midpoints') dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]]) new_image_abspath = util.ensure_absolute_path(new_image_path) if not (util.is_file_newer(new_image_abspath, min_time) and improc.is_image_readable(new_image_abspath)): im = improc.imread_jpeg(ex.image_path) if isinstance( ex.image_path, str) else ex.image_path #host_im, cuda_im = get_memory(im.shape) im = np.power((im.astype(np.float32) / 255), 2.2) #cuda_im.upload(host_im) new_im = cameralib.reproject_image(im, old_camera, new_camera, dst_shape, antialias_factor=2, interp=cv2.INTER_CUBIC) new_im = np.clip(new_im, 0, 1) if image_adjustments_3dhp: # enhance the 3dhp images to reduce the green tint and increase brightness new_im = (new_im**(1 / 2.2 * 0.67) * 255).astype(np.uint8) new_im = improc.white_balance(new_im, 110, 145) else: new_im = (new_im**(1 / 2.2) * 255).astype(np.uint8) util.ensure_path_exists(new_image_abspath) imageio.imwrite(new_image_abspath, new_im, quality=95) assert improc.is_image_readable(new_image_abspath) new_ex = copy.deepcopy(ex) new_ex.bbox = reprojected_box new_ex.image_path = new_image_path if is3d: new_ex.camera = new_camera else: new_ex.coords = cameralib.reproject_image_points( new_ex.coords, old_camera, new_camera) if hasattr(ex, 'mask') and ex.mask is not None: if isinstance(ex.mask, str): mask = improc.imread_jpeg(util.ensure_absolute_path(ex.mask)) host_mask, cuda_mask = get_memory(mask.shape) np.divide(mask.astype(np.float32), 255, out=host_mask) cuda_mask.upload(host_mask) mask_reproj = cameralib.reproject_image( cuda_mask, ex.camera, new_camera, dst_shape, antialias_factor=2).download() mask_reproj = 255 * (mask_reproj[..., 0] > 32 / 255).astype( np.uint8) new_ex.mask = get_connected_component_with_highest_iou( mask_reproj, reprojected_box) else: new_ex.mask = ex.mask return new_ex
def make_efficient_example(ex): image_relpath = ex.image_path max_rotate = np.pi / 6 padding_factor = 1 / 0.85 scale_up_factor = 1 / 0.85 scale_down_factor = 1 / 0.85 shift_factor = 1.2 base_dst_side = 256 box_center = boxlib.center(ex.bbox) s, c = np.sin(max_rotate), np.cos(max_rotate) w, h = ex.bbox[2:] rot_bbox_side = max(c * w + s * h, c * h + s * w) rot_bbox = boxlib.box_around(box_center, rot_bbox_side) scale_factor = min(base_dst_side / np.max(ex.bbox[2:]) * scale_up_factor, 1) expansion_factor = padding_factor * shift_factor * scale_down_factor expanded_bbox = boxlib.expand(rot_bbox, expansion_factor) expanded_bbox = boxlib.intersect(expanded_bbox, np.array([0, 0, 2048, 2048])) new_camera = ex.camera.copy() new_camera.intrinsic_matrix[:2, 2] -= expanded_bbox[:2] new_camera.scale_output(scale_factor) new_camera.undistort() dst_shape = improc.rounded_int_tuple(scale_factor * expanded_bbox[[3, 2]]) new_im_relpath = ex.image_path.replace('3dhp', f'3dhp_downscaled') new_im_path = os.path.join(paths.DATA_ROOT, new_im_relpath) if not (util.is_file_newer(new_im_path, "2019-11-14T23:32:07") and improc.is_image_readable(new_im_path)): im = improc.imread_jpeg(f'{paths.DATA_ROOT}/{image_relpath}') new_im = cameralib.reproject_image(im, ex.camera, new_camera, dst_shape) util.ensure_path_exists(new_im_path) imageio.imwrite(new_im_path, new_im) new_bbox_topleft = cameralib.reproject_image_points( ex.bbox[:2], ex.camera, new_camera) new_bbox = np.concatenate([new_bbox_topleft, ex.bbox[2:] * scale_factor]) mask_rle_relpath = new_im_path.replace('Images', 'FGmaskImages').replace( '.jpg', '.pkl') mask_rle_path = os.path.join(paths.DATA_ROOT, mask_rle_relpath) if util.is_file_newer(mask_rle_path, "2020-03-11T20:46:46"): mask_runlength = util.load_pickle(mask_rle_path) else: mask_relpath = ex.image_path.replace('Images', 'FGmaskImages').replace( '.jpg', '.png') mask = imageio.imread(os.path.join(paths.DATA_ROOT, mask_relpath)) mask_reproj = cameralib.reproject_image(mask, ex.camera, new_camera, dst_shape) mask_runlength = get_mask_with_highest_iou(mask_reproj, new_bbox) util.dump_pickle(mask_runlength, mask_rle_path) return p3ds.Pose3DExample(new_im_relpath, ex.world_coords, new_bbox, new_camera, mask=mask_runlength, univ_coords=ex.univ_coords)
def main(): global upload if len(args) != 0: usage() verify_started_in_right_directory() if build_prerelease: if svn_revision is None: run_cmd_throw("svn", "update") (out, err) = run_cmd_throw("svn", "info") ver = str(parse_svninfo_out(out)) else: # allow to pass in an SVN revision, in case SVN itself isn't available ver = svn_revision else: ver = extract_sumatra_version(os.path.join("src", "Version.h")) log("Version: '%s'" % ver) filename_base = "SumatraPDF-%s" % ver if build_prerelease: filename_base = "SumatraPDF-prerelease-%s" % ver s3_dir = "sumatrapdf/rel" if build_prerelease: s3_dir = "sumatrapdf/prerel" if upload_tmp: upload = True s3_dir += "tmp" if upload: log("Will upload to s3 at %s" % s3_dir) s3_prefix = "%s/%s" % (s3_dir, filename_base) s3_exe = s3_prefix + ".exe" s3_installer = s3_prefix + "-install.exe" s3_pdb_zip = s3_prefix + ".pdb.zip" s3_exe_zip = s3_prefix + ".zip" s3_files = [s3_exe, s3_installer, s3_pdb_zip] if not build_prerelease: s3_files.append(s3_exe_zip) cert_pwd = None cert_path = os.path.join("scripts", "cert.pfx") if upload: map(ensure_s3_doesnt_exist, s3_files) if not os.path.exists(os.path.join("scripts", "cert.pfx")): print("scripts/cert.pfx missing") sys.exit(1) import awscreds cert_pwd = awscreds.certpwd obj_dir = "obj-rel" if target_platform == "X64": obj_dir += "64" if not testing and not build_test_installer and not build_rel_installer: shutil.rmtree(obj_dir, ignore_errors=True) config = "CFG=rel" if build_test_installer and not build_prerelease: obj_dir = "obj-dbg" config = "CFG=dbg" extcflags = "" if build_prerelease: extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver platform = "PLATFORM=%s" % (target_platform or "X86") run_cmd_throw("nmake", "-f", "makefile.msvc", config, extcflags, platform, "all_sumatrapdf") exe = os.path.join(obj_dir, "SumatraPDF.exe") if upload: sign(exe, cert_pwd) sign(os.path.join(obj_dir, "uninstall.exe"), cert_pwd) build_installer_data(obj_dir) run_cmd_throw("nmake", "-f", "makefile.msvc", "Installer", config, platform, extcflags) if build_test_installer or build_rel_installer: sys.exit(0) installer = os.path.join(obj_dir, "Installer.exe") if upload: sign(installer, cert_pwd) pdb_zip = os.path.join(obj_dir, "%s.pdb.zip" % filename_base) zip_file(pdb_zip, os.path.join(obj_dir, "libmupdf.pdb")) zip_file(pdb_zip, os.path.join(obj_dir, "Installer.pdb"), append=True) zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF-no-MuPDF.pdb"), append=True) zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF.pdb"), append=True) builds_dir = os.path.join("builds", ver) if os.path.exists(builds_dir): shutil.rmtree(builds_dir) os.makedirs(builds_dir) copy_to_dst_dir(exe, builds_dir) copy_to_dst_dir(installer, builds_dir) copy_to_dst_dir(pdb_zip, builds_dir) if not build_prerelease: exe_zip = os.path.join(obj_dir, "%s.zip" % filename_base) zip_file(exe_zip, exe, "SumatraPDF.exe", compress=True) ensure_path_exists(exe_zip) copy_to_dst_dir(exe_zip, builds_dir) if not upload: return if build_prerelease: jstxt = 'var sumLatestVer = %s;\n' % ver jstxt += 'var sumBuiltOn = "%s";\n' % time.strftime("%Y-%m-%d") jstxt += 'var sumLatestName = "%s";\n' % s3_exe.split("/")[-1] jstxt += 'var sumLatestExe = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_exe jstxt += 'var sumLatestPdb = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_pdb_zip jstxt += 'var sumLatestInstaller = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_installer s3UploadFilePublic(installer, s3_installer) s3UploadFilePublic(pdb_zip, s3_pdb_zip) s3UploadFilePublic(exe, s3_exe) if build_prerelease: s3UploadDataPublic(jstxt, "sumatrapdf/sumatralatest.js") txt = "%s\n" % ver s3UploadDataPublic(txt, "sumatrapdf/sumpdf-prerelease-latest.txt") deleteOldPreReleaseBuilds() else: s3UploadFilePublic(exe_zip, s3_exe_zip)
def verify_build_ok(build_dir): for f in build_files: p = os.path.join(build_dir, f) ensure_path_exists(p) pdb = os.path.splitext(p)[0] + ".pdb" ensure_path_exists(pdb)