def get_registered_averaged(data_dict, crop_size): # Generate average intensity grayscale image at cross-polarization target_size = (crop_size, crop_size) avg = np.zeros(target_size, dtype=float) chunk_bbox = get_bounding_box(2 * [crop_size]) converted_bb = [] for i, xpl_crop in enumerate(data_dict['crops']): shift = data_dict['deltas'][i] debug_log("getting contrib to avg from", i, "shift", shift) warp_matrix = get_translation_matrix(shift)[:2] borders = scharr(warp_image(xpl_crop, warp_matrix, target_size)) borders[borders < np.percentile(borders, 90)] = 0 avg += borders converted_bb.append(transform_points(chunk_bbox, warp_matrix)) average_stretched = cvt.simple_grayscale_stretch(avg) common_box = adjust_bounding_box(chunk_bbox, intersect_bounding_boxes(converted_bb)) #averaged_common = crop_to_bounding_box(average_stretched, common_box) averaged_common = set_zero_out_of_bounding_box(average_stretched, common_box) averaged_bbox = get_bounding_box(averaged_common.shape) averaged_shift = np.array(common_box[0][::-1]) return averaged_common, averaged_bbox, averaged_shift
def get_registered_opaques(data_dict, crop_size): # Generate average intensity grayscale image at cross-polarization target_size = (crop_size, crop_size) opaque = np.ones(target_size, dtype=bool) chunk_bbox = get_bounding_box(2 * [crop_size]) converted_bb = [] for i, xpl_crop in enumerate(data_dict['crops']): shift = data_dict['deltas'][i] debug_log("getting contrib to opaque from", i, "shift", shift) warp_matrix = get_translation_matrix(shift)[:2] grays = scharr(warp_image(xpl_crop, warp_matrix, target_size)) opaques = (grays < 13) opaque &= opaques converted_bb.append(transform_points(chunk_bbox, warp_matrix)) common_box = adjust_bounding_box(chunk_bbox, intersect_bounding_boxes(converted_bb)) averaged_common = crop_to_bounding_box(opaque, common_box) #averaged_common = set_zero_out_of_bounding_box(average_stretched, common_box) averaged_bbox = get_bounding_box(opaque.shape) averaged_shift = np.array(common_box[0][::-1]) return averaged_common, averaged_bbox, averaged_shift
def image_list_grabber(kind, img_file, index, crop_box): # Get working area and original bounding box img = cvt.file_to_cv2(img_file) img_crop = cvt.color_to_gray(crop_to_bounding_box(img, crop_box)) return (kind, index, img_crop, get_bounding_box(img.shape))
def crop_center_chunk(image, crop_size=1024): center = get_bounding_box(image.shape)[1] / 2 center_crop_delta = np.r_[crop_size, crop_size] / 2 corner_start = center - center_crop_delta corner_end = center + center_crop_delta return crop_to_bounding_box(image, [corner_start, corner_end])
def image_list_grabber(img_file, index, crop_box, use_borders=False): img = cvt.file_to_cv2(img_file) img_crop = crop_to_bounding_box(img, crop_box) img_crop_tgt = (img_crop if not use_borders else get_borders(img_crop)) return (index, img_crop_tgt, get_bounding_box(img.shape))
def get_center_crop_bbox(image, crop_size): center = get_bounding_box(image.shape)[1] / 2 center_crop_delta = np.r_[crop_size, crop_size] / 2 corner_start = center - center_crop_delta corner_end = center + center_crop_delta crop_box = [corner_start, corner_end] return crop_box
def gather_images_data(file_list, crop_size, max_threads, use_borders=False, reference_crop_box_corner=None, assume_zero_displacements=False): # Pick the first file as the non-moving reference ref_file = file_list[0] img_ref = cvt.file_to_cv2(ref_file) img_ref_bb = get_bounding_box(img_ref.shape) # If not given, determine reference crop box corner from the reference file img_ref_br = (img_ref_bb[1] if reference_crop_box_corner is None else reference_crop_box_corner) crop_box = [(img_ref_br - crop_size)/2, (img_ref_br + crop_size)/2] img_crop = crop_to_bounding_box(img_ref, crop_box) img_ref_crop = (img_crop if not use_borders else get_borders(img_crop)) # Read the file lists with few threads as each image is potentially huge job_args = ( (img_file, i, crop_box, use_borders) for i, img_file in enumerate(file_list[1:]) ) if max_threads > 1: pool = mp.Pool(processes=max_threads) jobs = [pool.apply_async(image_list_grabber, args) for args in job_args] pool.close() pool.join() grab_iter = ( job.get() for job in jobs ) else: grab_iter = ( image_list_grabber(*args) for args in job_args ) data_list = sorted(grab_iter) data_dict = {'crops': [img_ref_crop,] + [ elem[1] for elem in data_list ], 'bboxes': [img_ref_bb,] + [ elem[2] for elem in data_list ], 'targets': file_list} if not assume_zero_displacements: # Compute displacements w.r.t reference via the image crops just obtained job_args = ( (i, data_dict) for i in range(1, len(data_dict['crops'])) ) if max_threads > 1: pool = mp.Pool(processes=max_threads) jobs = [ pool.apply_async(displacement_compute_worker, args) for args in job_args ] pool.close() pool.join() disp_iter = ( job.get() for job in jobs ) else: disp_iter = ( displacement_compute_worker(*args) for args in job_args ) deltas_idx = dict(disp_iter) deltas = [delta for i, delta in sorted(deltas_idx.items())] deltas_ref = add_pairwise_displacements(deltas, 0) else: deltas_ref = [ np.r_[0, 0] ]*len(data_dict['crops']) data_dict['deltas'] = deltas_ref basename_ref = pth.basename(ref_file) debug_log("Displacements relative to", basename_ref, deltas_ref) return data_dict
def reference_grabber(kind, img_file, crop_size): # Get working area (center crop) and bounding box img_ref = cvt.file_to_cv2(img_file) img_ref_bb = get_bounding_box(img_ref.shape) crop_box = get_center_crop_bbox(img_ref, crop_size) img_ref_crop = cvt.color_to_gray(crop_to_bounding_box(img_ref, crop_box)) return (kind, { 'crop-ref': img_ref_crop, 'bbox-ref': img_ref_bb, 'cropbox': crop_box })
def main(): opt = process_command_line() print opt ensure_dir(opt.work_dir) band = opt.crop_size basename_ref = os.path.basename(opt.reference) img1 = cvt.file_to_cv2(opt.reference) img1_bb = get_bounding_box(img1.shape) img1_br = img1_bb[1] img1_crop_bb = [(img1_br - band) / 2, (img1_br + band) / 2] target_size = get_bounding_box_size(img1_bb) img1_crop = cvt.color_to_gray(crop_to_bounding_box(img1, img1_crop_bb)) """ Using crop chunks centered at each images' center impedes resolving the image-to-image displacement """ #img1_crop = cvt.color_to_gray(crop_center_chunk(img1, band)) all_bboxes = [ img1_bb.tolist(), ] pre_aligned_files = [ opt.reference, ] # trainImages targets = sorted(set(opt.files) - set([ opt.reference, ])) for img_file in targets: basename = os.path.basename(img_file) img2 = cvt.file_to_cv2(img_file) img2_crop = cvt.color_to_gray(crop_to_bounding_box(img2, img1_crop_bb)) #img2_crop = cvt.color_to_gray(crop_center_chunk(img2, band)) debug_log("Computing translation of", basename, "relative to", basename_ref) peak_loc, peak_val = get_phasecorr_peak(img1_crop, img2_crop, 100) debug_log("Translation is", peak_loc, "value:", peak_val) warp_matrix = get_translation_matrix(peak_loc)[:2] img2_aligned = warp_image(img2, warp_matrix, target_size) img2_adj_bb = get_adjusted_bounding_box(img1_bb, get_bounding_box(img2.shape), warp_matrix) aligned_file = os.path.join(opt.work_dir, "pre-" + basename) cvt.cv2_to_file(img2_aligned, aligned_file) all_bboxes.append(img2_adj_bb) pre_aligned_files.append(aligned_file) debug_log("Alignment of", img_file, "done") common_box = intersect_bounding_boxes(all_bboxes) for fi_aligned in pre_aligned_files: debug_log("Cropping", fi_aligned, newline=False) aligned = cvt.file_to_cv2(fi_aligned) cropped = crop_to_bounding_box(aligned, common_box) cf_name = (("reg-" + basename_ref) if fi_aligned == opt.reference else os.path.basename(fi_aligned).replace("pre-", "reg-")) cropped_file = os.path.join(opt.work_dir, cf_name) success = cvt.cv2_to_file(cropped, cropped_file) if success: center_crop = crop_center_chunk(cropped, 1024) center_crop_name = "crop-" + cf_name.replace(".png", ".jpg") center_crop_file = os.path.join(opt.work_dir, center_crop_name) cvt.cv2_to_file(center_crop, center_crop_file) if not opt.keep_uncropped and fi_aligned != opt.reference: os.remove(fi_aligned) result = "done" if success else "failed" print(result)