Пример #1
0
def image_align(img_file,
                delta,
                padding_shift,
                target_size,
                crop_box,
                work_dir,
                is_reference,
                correct_shift_ref_crop=None):

    basename = os.path.basename(img_file)
    cf_name = "reg-" + basename
    cropped_file = os.path.join(work_dir, cf_name)

    shift_total = delta + padding_shift[::-1] if not is_reference else delta
    debug_log("Shift to apply (with padding) to", basename + ":", shift_total)
    warp_matrix = get_translation_matrix(shift_total)[:2]
    img = cvt.file_to_cv2(img_file)
    img_pre_aligned = warp_image(img, warp_matrix, target_size)

    if correct_shift_ref_crop is None:
        img_aligned = img_pre_aligned
    else:
        align_corr_bbox, align_corr_crop = correct_shift_ref_crop
        this_crop = cvt.color_to_gray(
            crop_to_bounding_box(img_pre_aligned, align_corr_bbox))
        delta_pair = [align_corr_crop, this_crop]
        delta_align = compute_displacements_direct(delta_pair, 0)[1]
        warp_matrix = get_translation_matrix(delta_align)[:2]
        img_aligned = warp_image(img_pre_aligned, warp_matrix, target_size)

    #cropped = crop_to_bounding_box(img_aligned, crop_box)
    cropped = img_aligned
    success = cvt.cv2_to_file(cropped, cropped_file)

    if success:
        center_crop_box = get_center_crop_bbox(cropped, 1024)
        center_crop = crop_to_bounding_box(cropped, center_crop_box)
        center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
        center_crop_file = os.path.join(work_dir, center_crop_name)
        cvt.cv2_to_file(center_crop, center_crop_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", img_file, "into", cropped_file, result)

    #center_crop_box = get_center_crop_bbox(cropped, 4096)
    #center_crop = crop_to_bounding_box(cropped, center_crop_box)
    #return success, (center_crop_box, cvt.color_to_gray(center_crop))
    return success
Пример #2
0
def get_registered_averaged(data_dict, crop_size):

    # Generate average intensity grayscale image at cross-polarization
    target_size = (crop_size, crop_size)
    avg = np.zeros(target_size, dtype=float)
    chunk_bbox = get_bounding_box(2 * [crop_size])

    converted_bb = []
    for i, xpl_crop in enumerate(data_dict['crops']):
        shift = data_dict['deltas'][i]
        debug_log("getting contrib to avg from", i, "shift", shift)
        warp_matrix = get_translation_matrix(shift)[:2]

        borders = scharr(warp_image(xpl_crop, warp_matrix, target_size))
        borders[borders < np.percentile(borders, 90)] = 0
        avg += borders

        converted_bb.append(transform_points(chunk_bbox, warp_matrix))

    average_stretched = cvt.simple_grayscale_stretch(avg)
    common_box = adjust_bounding_box(chunk_bbox,
                                     intersect_bounding_boxes(converted_bb))
    #averaged_common = crop_to_bounding_box(average_stretched, common_box)
    averaged_common = set_zero_out_of_bounding_box(average_stretched,
                                                   common_box)

    averaged_bbox = get_bounding_box(averaged_common.shape)
    averaged_shift = np.array(common_box[0][::-1])

    return averaged_common, averaged_bbox, averaged_shift
Пример #3
0
def get_registered_opaques(data_dict, crop_size):

    # Generate average intensity grayscale image at cross-polarization
    target_size = (crop_size, crop_size)
    opaque = np.ones(target_size, dtype=bool)
    chunk_bbox = get_bounding_box(2 * [crop_size])

    converted_bb = []
    for i, xpl_crop in enumerate(data_dict['crops']):
        shift = data_dict['deltas'][i]
        debug_log("getting contrib to opaque from", i, "shift", shift)
        warp_matrix = get_translation_matrix(shift)[:2]

        grays = scharr(warp_image(xpl_crop, warp_matrix, target_size))
        opaques = (grays < 13)
        opaque &= opaques

        converted_bb.append(transform_points(chunk_bbox, warp_matrix))

    common_box = adjust_bounding_box(chunk_bbox,
                                     intersect_bounding_boxes(converted_bb))
    averaged_common = crop_to_bounding_box(opaque, common_box)
    #averaged_common = set_zero_out_of_bounding_box(average_stretched, common_box)

    averaged_bbox = get_bounding_box(opaque.shape)
    averaged_shift = np.array(common_box[0][::-1])

    return averaged_common, averaged_bbox, averaged_shift
Пример #4
0
def translate_crop(image, shift, crop_box, target_size):

    warp_matrix = get_translation_matrix(shift)[:2]
    img_aligned = warp_image(image, warp_matrix, target_size)
    cropped = crop_to_bounding_box(img_aligned, crop_box)

    return cropped
Пример #5
0
def get_aggregated_crops(data_dict, crop_size, intersect=True, use_borders=True,
                         borders_param=(100, 300), ref_index=0):

    target_size = data_dict['crops'][ref_index].shape[:2][::-1]
    chunk_bbox = get_bounding_box(2*[crop_size])

    shifts_crops = it.izip(data_dict['deltas'], data_dict['crops'])
    mats_crops = ( (get_translation_matrix(shift)[:2], chnk) for shift, chnk
                  in shifts_crops )
    tld_crops = ( (warp_image(chnk, mat, target_size),
                   get_adjusted_bounding_box(chunk_bbox, chunk_bbox, mat))
                 for mat, chnk in mats_crops )
    to_add = ( list(tld_crops) if not use_borders else
               [ (reg.get_borders(chnk, *borders_param), bbox) for chnk, bbox
                in tld_crops ] )
    stack = np.stack( crop for crop, _ in to_add )
    common_box = intersect_bounding_boxes(target_size,
                                          [bbox for _, bbox in to_add])

    agg = (stack/255).prod(axis=0) if intersect else stack.mean(axis=0)
    dyn_range = agg.max() - agg.min()
    agg_stretched = (( 255*(agg - agg.min()) / dyn_range) if dyn_range > 1 else
                      agg )

    agg_common = crop_to_bounding_box(agg_stretched, common_box)

    aggregated = agg_common.astype(np.uint8)
    agg_bbox = get_bounding_box(agg_common.shape)
    agg_shift = np.array(common_box[0][::-1])

    return aggregated, agg_bbox, agg_shift
Пример #6
0
def patch_body_corner_inmem(body, body_mat, corner_rot, corner_mat):

    print "Body dim:", body.shape
    print "Corner dim:", corner_rot.shape

    corner_shape = corner_rot.shape[:2]
    c_h, c_w = corner_shape
    body_mat[2, :2] = 0
    body_mat[:2, 2] = 0
    corner_mat[:2, 2] = 0
    corner_mat[2, :2] = 0

    corner_body_rotation = np.dot(body_mat, np.linalg.inv(corner_mat))
    corner = cv2.warpPerspective(corner_rot, corner_body_rotation,
                                 tuple(corner_rot.shape[:2][::-1]))

    g_body_chunk = cvt.color_to_gray(body[:c_h, :c_w, :])
    bl_left = find_left_border(g_body_chunk, c_w)
    bl_top = find_top_border(g_body_chunk, c_h)
    blacks = np.r_[bl_top, bl_left]
    chunk_dim = np.minimum(2 * blacks, corner_shape)

    g_body_chunk = g_body_chunk[:chunk_dim[0], :chunk_dim[1]]
    g_corner_chunk = cvt.color_to_gray(corner[:chunk_dim[0], :chunk_dim[1], :])

    corr_peak, peak_val = dsl.get_phasecorr_peak(g_body_chunk, g_corner_chunk)
    mat = dsl.get_translation_matrix(corr_peak)[:2]
    corner_trans = dsl.warp_image(corner[:chunk_dim[0], :chunk_dim[1], :], mat,
                                  tuple(g_body_chunk.shape[::-1]))

    blend_area = np.minimum((1.5 * blacks).astype(int), corner_shape)

    body_blend_area = body[:blend_area[0], :blend_area[1], :]
    corner_blend_area = corner_trans[:blend_area[0], :blend_area[1], :]

    weight_gen = lp2d.CornerDirichlet(blend_area[1], blend_area[0], bl_left,
                                      bl_top)
    weight_gen.set_boundaries([[1, 1, 0], [1, 1, 0]])
    solu, residuals = weight_gen.solve()
    corner_weight = weight_gen.get_solution()
    body_weight = 1 - corner_weight
    blend = np.uint8(corner_blend_area * corner_weight[:, :, np.newaxis] +
                     body_blend_area * body_weight[:, :, np.newaxis])
    body[:blend_area[0], :blend_area[1], :] = blend

    return body
Пример #7
0
def image_align(img_file, delta, target_size, work_dir):

    basename = os.path.basename(img_file)
    cf_name = "reg-" + basename
    aligned_file = os.path.join(work_dir, cf_name)

    warp_matrix = get_translation_matrix(delta)[:2]
    img = cvt.file_to_cv2(img_file)
    img_aligned = warp_image(img, warp_matrix, target_size)
    success = cvt.cv2_to_file(img_aligned, aligned_file)

    if success:
        center_crop_box = get_center_crop_bbox(img_aligned, 1024)
        center_crop = crop_to_bounding_box(img_aligned, center_crop_box)
        center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
        center_crop_file = os.path.join(work_dir, center_crop_name)
        cvt.cv2_to_file(center_crop, center_crop_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", img_file, "into", aligned_file, result)

    return success
Пример #8
0
def align_images(data, delta_xp, work_dir):

    # Transform all bounding boxes to compute the target
    all_bboxes = []
    for kind in data.keys():
        for i, target_bbox in enumerate(data[kind]['bboxes']):

            base_shift = data[kind]['deltas'][i]
            shift = base_shift if (kind == 'xpl') else (base_shift + delta_xp)

            warp_matrix = get_translation_matrix(shift)
            translated_bbox = transform_points(target_bbox, warp_matrix)
            all_bboxes.append(translated_bbox)
            debug_log("Image", data[kind]['targets'][i], "to be displaced by:",
                      shift, "from bb", target_bbox, "to bb:", translated_bbox)

    #Intermediate target size encompasses all bounding boxes
    full_bbox = join_bounding_boxes(all_bboxes)
    padding_shift = -full_bbox[0]
    target_size = get_bounding_box_size(full_bbox)
    common_box = intersect_bounding_boxes(all_bboxes) + padding_shift
    debug_log("Common crop box for alignment is", common_box, "\nPadding is",
              padding_shift, "\nFull box size is", target_size)

    # Then, align all image files (except the reference) and crop to common box
    manager = mp.Manager()
    partial_result_queue = manager.Queue()
    pool = mp.Pool(processes=4)
    jobs = []
    for kind in data.keys():

        base_shifts = data[kind]['deltas']
        ref_index = data[kind]['ref-index']
        if kind == 'xpl':
            shifts = base_shifts
            ref_align_index = 1 if (ref_index == 0) else (ref_index - 1)
        else:
            shifts = [(delta + delta_xp) for delta in base_shifts]

        for i, (target, delta) in enumerate(zip(data[kind]['targets'],
                                                shifts)):

            #put_in_queue = False
            #get_from_queue = False
            #if (kind == 'xpl'):
            #    get_from_queue = (i == ref_index)
            #    put_in_queue = (i == ref_align_index)

            #jobs.append(pool.apply_async(image_align_worker,
            #                            (target, delta, padding_shift,
            #                             target_size, common_box, work_dir,
            #                             partial_result_queue, put_in_queue,
            #                             get_from_queue)))

            jobs.append(
                pool.apply_async(image_align,
                                 (target, delta, padding_shift, target_size,
                                  common_box, work_dir, False, None)))
    pool.close()
    pool.join()
    success = all(job.get() for job in jobs)

    return success
Пример #9
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    targets = opt.files
    first_img = opt.files[0]
    basename_ref = os.path.basename(first_img)

    img1 = cvt.file_to_cv2(first_img)
    img1_crop = cvt.color_to_gray(crop_center_chunk(img1, opt.crop_size))
    img1_bb = get_bounding_box(img1.shape[:2])

    all_bboxes = [
        img1_bb,
    ]
    relative_displacements = []

    # Get pairwise relative displacements
    for img_file in targets[1:]:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_crop = cvt.color_to_gray(crop_center_chunk(img2, opt.crop_size))

        debug_log("Computing translation of", basename, "relative to",
                  basename_ref)

        peak_loc, peak_val = get_phasecorr_peak(img1_crop, img2_crop, 100)
        debug_log("Translation is", peak_loc, "value:", peak_val)

        relative_displacements.append(peak_loc)
        all_bboxes.append(get_bounding_box(img2.shape[:2]))

        img1, img1_crop, basename_ref = img2, img2_crop, basename

    del img1, img2, img1_crop, img2_crop

    # Determine largest bounding box
    bboxes_area = np.array(
        [get_bounding_box_area(bbox) for bbox in all_bboxes])
    largest_area = np.argmax(bboxes_area)
    largest_bbox = all_bboxes[largest_area]
    target_size = get_bounding_box_size(largest_bbox)
    reference = targets[largest_area]
    basename_ref = os.path.basename(reference)

    print "disps:", relative_displacements
    debug_log("Largest area image is", reference, "({})".format(largest_area))

    # Propagate displacements
    pre_aligned_files = []
    for i, img_file in enumerate(targets):

        # Displacements are applied relative to largest bbox
        if i == largest_area:
            pre_aligned_files.append(reference)
            continue

        basename = os.path.basename(img_file)
        img = cvt.file_to_cv2(img_file)

        # displacement[i] = pos[i+1] - pos[i]
        if largest_area < i:
            disp_chain = range(largest_area, i)
            direction = 1
        else:
            disp_chain = range(i, largest_area)
            direction = -1

        total_displacement = direction * sum(relative_displacements[j]
                                             for j in disp_chain)
        debug_log("Displacement from", reference, "to", img_file, "is",
                  total_displacement)
        print "dir", direction, "; chain", disp_chain
        warp_matrix = get_translation_matrix(total_displacement)[:2]

        img_aligned = align_image(img, warp_matrix, target_size)
        aligned_file = os.path.join(opt.work_dir, "pre-" + basename)
        success = cvt.cv2_to_file(img_aligned, aligned_file)
        if success:
            pre_aligned_files.append(aligned_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", img_file, "into", aligned_file, result)

    common_box = intersect_bounding_boxes(target_size, all_bboxes)

    for fi_aligned in pre_aligned_files:
        debug_log("Cropping", fi_aligned, newline=False)
        aligned = cvt.file_to_cv2(fi_aligned)
        cropped = crop_to_bounding_box(aligned, common_box)

        cf_name = (("reg-" + basename_ref) if fi_aligned == reference else
                   os.path.basename(fi_aligned).replace("pre-", "reg-"))
        cropped_file = os.path.join(opt.work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success:
            center_crop = crop_center_chunk(cropped, 1024)
            center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
            center_crop_file = os.path.join(opt.work_dir, center_crop_name)
            cvt.cv2_to_file(center_crop, center_crop_file)

            if not opt.keep_uncropped and fi_aligned != reference:
                os.remove(fi_aligned)

        result = "done" if success else "failed"
        print(result)
Пример #10
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    band = opt.crop_size

    basename_ref = os.path.basename(opt.reference)
    img1 = cvt.file_to_cv2(opt.reference)
    img1_bb = get_bounding_box(img1.shape)
    img1_br = img1_bb[1]
    img1_crop_bb = [(img1_br - band) / 2, (img1_br + band) / 2]
    target_size = get_bounding_box_size(img1_bb)

    img1_crop = cvt.color_to_gray(crop_to_bounding_box(img1, img1_crop_bb))
    """
    Using crop chunks centered at each images' center impedes resolving
    the image-to-image displacement
    """
    #img1_crop = cvt.color_to_gray(crop_center_chunk(img1, band))

    all_bboxes = [
        img1_bb.tolist(),
    ]
    pre_aligned_files = [
        opt.reference,
    ]

    # trainImages
    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))
    for img_file in targets:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_crop = cvt.color_to_gray(crop_to_bounding_box(img2, img1_crop_bb))
        #img2_crop = cvt.color_to_gray(crop_center_chunk(img2, band))

        debug_log("Computing translation of", basename, "relative to",
                  basename_ref)

        peak_loc, peak_val = get_phasecorr_peak(img1_crop, img2_crop, 100)
        debug_log("Translation is", peak_loc, "value:", peak_val)
        warp_matrix = get_translation_matrix(peak_loc)[:2]

        img2_aligned = warp_image(img2, warp_matrix, target_size)
        img2_adj_bb = get_adjusted_bounding_box(img1_bb,
                                                get_bounding_box(img2.shape),
                                                warp_matrix)
        aligned_file = os.path.join(opt.work_dir, "pre-" + basename)
        cvt.cv2_to_file(img2_aligned, aligned_file)
        all_bboxes.append(img2_adj_bb)
        pre_aligned_files.append(aligned_file)

        debug_log("Alignment of", img_file, "done")

    common_box = intersect_bounding_boxes(all_bboxes)

    for fi_aligned in pre_aligned_files:
        debug_log("Cropping", fi_aligned, newline=False)
        aligned = cvt.file_to_cv2(fi_aligned)
        cropped = crop_to_bounding_box(aligned, common_box)

        cf_name = (("reg-" + basename_ref) if fi_aligned == opt.reference else
                   os.path.basename(fi_aligned).replace("pre-", "reg-"))
        cropped_file = os.path.join(opt.work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success:
            center_crop = crop_center_chunk(cropped, 1024)
            center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
            center_crop_file = os.path.join(opt.work_dir, center_crop_name)
            cvt.cv2_to_file(center_crop, center_crop_file)

            if not opt.keep_uncropped and fi_aligned != opt.reference:
                os.remove(fi_aligned)

        result = "done" if success else "failed"
        print(result)
Пример #11
0
def align_images(data_dict, work_dir, first_image_is_absolute=False,
                 max_threads=4, make_center_chunk=True, make_jpeg=False):

    # Transform all bounding boxes to compute the target
    all_bboxes = []
    for i, target_bbox in enumerate(data_dict['bboxes']):

        shift = data_dict['deltas'][i]
        warp_matrix = get_translation_matrix(shift)
        translated_bbox = transform_points(target_bbox, warp_matrix)
        all_bboxes.append(translated_bbox)
        #debug_log("Image", data_dict['targets'][i], "to be displaced by:",
        #            shift, "from bb", target_bbox, "to bb:", translated_bbox)

    if first_image_is_absolute:
        full_bbox = all_bboxes[0]
        padding_shift = np.r_[0, 0]
        common_box = full_bbox
    else:
        #Intermediate target size encompasses all bounding boxes
        full_bbox = join_bounding_boxes(all_bboxes)
        padding_shift = np.maximum([0, 0], -full_bbox[0])
        common_box = intersect_bounding_boxes(all_bboxes) + padding_shift

    target_size = get_bounding_box_size(full_bbox)

    # Finally, align image files and crop to common box
    job_args = []
    job_kwargs = {'make_jpeg': make_jpeg}
    for i, (srcimg, delta) in enumerate(zip(data_dict['targets'],
                                            data_dict['deltas'])):
        if 'tgtpaths' in data_dict:
            target_file = data_dict['tgtpaths'][srcimg]
            target_file_dir = pth.dirname(target_file)
            if not pth.exists(target_file_dir):
                os.makedirs(target_file_dir, 0755)
        else:
            target_file = get_target_filename(srcimg, work_dir)

        if first_image_is_absolute and (delta == np.r_[0, 0]).all():
            # No copy of the reference image is created, but a symlink instead
            os.symlink(srcimg, target_file)

            if make_center_chunk:
                # Small center chunk comes from the reference chunk
                ref_chunk = data_dict['crops'][i]
                gen_center_small_chunk(ref_chunk, target_file, 1024)
        else:
            args = (srcimg, target_file, delta, padding_shift, target_size,
                    common_box, make_center_chunk)
            job_args.append(args)

    if max_threads > 1:
        pool = mp.Pool(processes=max_threads)
        jobs = [ pool.apply_async(image_align_worker, args, job_kwargs) for args
                in job_args ]
        pool.close()
        pool.join()
        result_iter = ( job.get() for job in jobs )
    else:
        result_iter = ( image_align_worker(*args, **job_kwargs) for args in job_args )
    success = all(result_iter)

    return success