Example #1
0
def get_registered_opaques(data_dict, crop_size):

    # Generate average intensity grayscale image at cross-polarization
    target_size = (crop_size, crop_size)
    opaque = np.ones(target_size, dtype=bool)
    chunk_bbox = get_bounding_box(2 * [crop_size])

    converted_bb = []
    for i, xpl_crop in enumerate(data_dict['crops']):
        shift = data_dict['deltas'][i]
        debug_log("getting contrib to opaque from", i, "shift", shift)
        warp_matrix = get_translation_matrix(shift)[:2]

        grays = scharr(warp_image(xpl_crop, warp_matrix, target_size))
        opaques = (grays < 13)
        opaque &= opaques

        converted_bb.append(transform_points(chunk_bbox, warp_matrix))

    common_box = adjust_bounding_box(chunk_bbox,
                                     intersect_bounding_boxes(converted_bb))
    averaged_common = crop_to_bounding_box(opaque, common_box)
    #averaged_common = set_zero_out_of_bounding_box(average_stretched, common_box)

    averaged_bbox = get_bounding_box(opaque.shape)
    averaged_shift = np.array(common_box[0][::-1])

    return averaged_common, averaged_bbox, averaged_shift
Example #2
0
def get_registered_averaged(data_dict, crop_size):

    # Generate average intensity grayscale image at cross-polarization
    target_size = (crop_size, crop_size)
    avg = np.zeros(target_size, dtype=float)
    chunk_bbox = get_bounding_box(2 * [crop_size])

    converted_bb = []
    for i, xpl_crop in enumerate(data_dict['crops']):
        shift = data_dict['deltas'][i]
        debug_log("getting contrib to avg from", i, "shift", shift)
        warp_matrix = get_translation_matrix(shift)[:2]

        borders = scharr(warp_image(xpl_crop, warp_matrix, target_size))
        borders[borders < np.percentile(borders, 90)] = 0
        avg += borders

        converted_bb.append(transform_points(chunk_bbox, warp_matrix))

    average_stretched = cvt.simple_grayscale_stretch(avg)
    common_box = adjust_bounding_box(chunk_bbox,
                                     intersect_bounding_boxes(converted_bb))
    #averaged_common = crop_to_bounding_box(average_stretched, common_box)
    averaged_common = set_zero_out_of_bounding_box(average_stretched,
                                                   common_box)

    averaged_bbox = get_bounding_box(averaged_common.shape)
    averaged_shift = np.array(common_box[0][::-1])

    return averaged_common, averaged_bbox, averaged_shift
Example #3
0
def get_adjusted_bounding_box(ref_bb, target_bb, warp_matrix):

    target_warped_bb = transform_points(target_bb, warp_matrix)
    adjusted_bb = adjust_bounding_box(ref_bb, target_warped_bb)

    return adjusted_bb
Example #4
0
def align_images(data, delta_xp, work_dir):

    # Transform all bounding boxes to compute the target
    all_bboxes = []
    for kind in data.keys():
        for i, target_bbox in enumerate(data[kind]['bboxes']):

            base_shift = data[kind]['deltas'][i]
            shift = base_shift if (kind == 'xpl') else (base_shift + delta_xp)

            warp_matrix = get_translation_matrix(shift)
            translated_bbox = transform_points(target_bbox, warp_matrix)
            all_bboxes.append(translated_bbox)
            debug_log("Image", data[kind]['targets'][i], "to be displaced by:",
                      shift, "from bb", target_bbox, "to bb:", translated_bbox)

    #Intermediate target size encompasses all bounding boxes
    full_bbox = join_bounding_boxes(all_bboxes)
    padding_shift = -full_bbox[0]
    target_size = get_bounding_box_size(full_bbox)
    common_box = intersect_bounding_boxes(all_bboxes) + padding_shift
    debug_log("Common crop box for alignment is", common_box, "\nPadding is",
              padding_shift, "\nFull box size is", target_size)

    # Then, align all image files (except the reference) and crop to common box
    manager = mp.Manager()
    partial_result_queue = manager.Queue()
    pool = mp.Pool(processes=4)
    jobs = []
    for kind in data.keys():

        base_shifts = data[kind]['deltas']
        ref_index = data[kind]['ref-index']
        if kind == 'xpl':
            shifts = base_shifts
            ref_align_index = 1 if (ref_index == 0) else (ref_index - 1)
        else:
            shifts = [(delta + delta_xp) for delta in base_shifts]

        for i, (target, delta) in enumerate(zip(data[kind]['targets'],
                                                shifts)):

            #put_in_queue = False
            #get_from_queue = False
            #if (kind == 'xpl'):
            #    get_from_queue = (i == ref_index)
            #    put_in_queue = (i == ref_align_index)

            #jobs.append(pool.apply_async(image_align_worker,
            #                            (target, delta, padding_shift,
            #                             target_size, common_box, work_dir,
            #                             partial_result_queue, put_in_queue,
            #                             get_from_queue)))

            jobs.append(
                pool.apply_async(image_align,
                                 (target, delta, padding_shift, target_size,
                                  common_box, work_dir, False, None)))
    pool.close()
    pool.join()
    success = all(job.get() for job in jobs)

    return success
def align_images(data_dict, work_dir, first_image_is_absolute=False,
                 max_threads=4, make_center_chunk=True, make_jpeg=False):

    # Transform all bounding boxes to compute the target
    all_bboxes = []
    for i, target_bbox in enumerate(data_dict['bboxes']):

        shift = data_dict['deltas'][i]
        warp_matrix = get_translation_matrix(shift)
        translated_bbox = transform_points(target_bbox, warp_matrix)
        all_bboxes.append(translated_bbox)
        #debug_log("Image", data_dict['targets'][i], "to be displaced by:",
        #            shift, "from bb", target_bbox, "to bb:", translated_bbox)

    if first_image_is_absolute:
        full_bbox = all_bboxes[0]
        padding_shift = np.r_[0, 0]
        common_box = full_bbox
    else:
        #Intermediate target size encompasses all bounding boxes
        full_bbox = join_bounding_boxes(all_bboxes)
        padding_shift = np.maximum([0, 0], -full_bbox[0])
        common_box = intersect_bounding_boxes(all_bboxes) + padding_shift

    target_size = get_bounding_box_size(full_bbox)

    # Finally, align image files and crop to common box
    job_args = []
    job_kwargs = {'make_jpeg': make_jpeg}
    for i, (srcimg, delta) in enumerate(zip(data_dict['targets'],
                                            data_dict['deltas'])):
        if 'tgtpaths' in data_dict:
            target_file = data_dict['tgtpaths'][srcimg]
            target_file_dir = pth.dirname(target_file)
            if not pth.exists(target_file_dir):
                os.makedirs(target_file_dir, 0755)
        else:
            target_file = get_target_filename(srcimg, work_dir)

        if first_image_is_absolute and (delta == np.r_[0, 0]).all():
            # No copy of the reference image is created, but a symlink instead
            os.symlink(srcimg, target_file)

            if make_center_chunk:
                # Small center chunk comes from the reference chunk
                ref_chunk = data_dict['crops'][i]
                gen_center_small_chunk(ref_chunk, target_file, 1024)
        else:
            args = (srcimg, target_file, delta, padding_shift, target_size,
                    common_box, make_center_chunk)
            job_args.append(args)

    if max_threads > 1:
        pool = mp.Pool(processes=max_threads)
        jobs = [ pool.apply_async(image_align_worker, args, job_kwargs) for args
                in job_args ]
        pool.close()
        pool.join()
        result_iter = ( job.get() for job in jobs )
    else:
        result_iter = ( image_align_worker(*args, **job_kwargs) for args in job_args )
    success = all(result_iter)

    return success