Exemple #1
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)
    basenames = [os.path.basename(fi) for fi in opt.files]

    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create(
        contrastThreshold=opt.contrast_threshold, sigma=opt.sigma)

    # queryImage
    img1_subg = cvt.color_to_gray(cvt.file_to_cv2(opt.files[0]))
    target_size = img1_subg.shape[::-1]

    # trainImage
    img2 = cvt.file_to_cv2(opt.files[1])
    img2_subg = cvt.color_to_gray(img2)
    """
    debug_render_bands(img1_subg,
                       os.path.join(opt.work_dir, "banded-" + basenames[0]),
                       strip_width=2000)
    debug_render_bands(img2_subg,
                       os.path.join(opt.work_dir, "banded-" + basenames[1]),
                       strip_width=2000)
    """

    img1_bands = get_image_bands(img1_subg, opt.strip_width)
    img2_bands = get_image_bands(img2_subg, opt.strip_width)

    # find the keypoints and descriptors
    debug_log("Gather features of images")
    #kp1, des1 = get_features_on_bands(img1_subg, img1_bands, sift, basenames[0])
    kp1, des1 = get_features_on_bands(img1_subg, img1_bands, sift)
    kp2, des2 = get_features_on_bands(img2_subg, img2_bands, sift)
    """ Debug keypoint shifting in bands
    file_out = "/dev/shm/{}-banded.jpg".format(basenames[0])
    draw_keypoints(img1_subg, kp1, 80, (0, 255, 0), file_out)
    """

    ref_package = (kp1, des1)
    target_package = (kp2, des2)

    warp_matrix = match_get_transf(ref_package, target_package,
                                   opt.min_matches)
    print("Warp matrix:")
    print(warp_matrix)
    success = (warp_matrix is not None)

    if success:
        img2_aligned = align_image(img2, warp_matrix, target_size)
        aligned_file = os.path.join(opt.work_dir, "reg-" + basenames[1])
        cvt.cv2_to_file(img2_aligned, aligned_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", opt.files[1], result)
def get_rgb_average(images_file_list):

    avg = cvt.file_to_cv2(os.path.expanduser(
        images_file_list[0])).astype(float)
    for img_file in images_file_list[1:]:
        avg += cvt.file_to_cv2(os.path.expanduser(img_file))
    avg /= len(images_file_list)

    return avg.astype(np.uint8)
Exemple #3
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)
    basenames = [os.path.basename(fi) for fi in opt.files]

    # Initiate SIFT detector
    sift = cv2.xfeatures2d.SIFT_create(
        contrastThreshold=opt.contrast_threshold, sigma=opt.sigma)

    # queryImage
    #img1 = cvt.image_load_resize(opt.files[0], opt.reduction)
    img1 = cvt.file_to_cv2(opt.files[0])
    img1_subg = cvt.image_resize(cvt.color_to_gray(img1), opt.reduction)

    # trainImage
    #img2 = cvt.image_load_resize(opt.files[1], opt.reduction)
    img2 = cvt.file_to_cv2(opt.files[1])
    img2_subg = cvt.image_resize(cvt.color_to_gray(img2), opt.reduction)

    # find the keypoints and descriptors
    debug_log("Gather features of images")
    kp1, des1 = sift.detectAndCompute(img1_subg, None)
    kp2, des2 = sift.detectAndCompute(img2_subg, None)

    ref_package = (kp1, des1)
    target_package = (kp2, des2)
    """ Direct
    img2_aligned = match_and_align(ref_package, target_package,
                                   opt.min_matches)
    success = (img2_aligned is not None)
    if success:
        cvt.cv2_to_file(img1, os.path.join(opt.work_dir, basenames[0]))
        cvt.cv2_to_file(img2, os.path.join(opt.work_dir, basenames[1]))
        aligned_file = os.path.join(opt.work_dir, "reg-" + basenames[1])
        cvt.cv2_to_file(img2_aligned, aligned_file)
    """
    warp_matrix = match_get_transf(ref_package, target_package,
                                   opt.min_matches)
    print("Warp matrix:")
    print(warp_matrix)
    success = (warp_matrix is not None)

    if success:
        matrix_scaling = get_size_scaling_matrix(opt.reduction)
        scaled_wm = matrix_scaling * warp_matrix
        #scaled_wm = np.dot(matrix_scaling, warp_matrix)
        img2_aligned = align_image(img2, scaled_wm, img1.shape[:2][::-1])

        aligned_file = os.path.join(opt.work_dir, "reg-" + basenames[1])
        cvt.cv2_to_file(img2_aligned, aligned_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", opt.files[1], result)
Exemple #4
0
def opaque_builder(kind, file_list, threshold=12):

    img_max = cvt.file_to_cv2(file_list[0]).max(axis=2)
    img_opaque = (img_max < threshold)

    for img_file in file_list:
        img_max = cvt.file_to_cv2(img_file).max(axis=2)
        img_opaque &= (img_max < threshold)

    return (kind, img_opaque)
Exemple #5
0
def main():

    opt = process_command_line()
    print opt

    avg = cvt.file_to_cv2(os.path.expanduser(opt.files[0])).astype(float)
    for img_file in opt.files[1:]:
        avg += cvt.file_to_cv2(os.path.expanduser(img_file))
    avg /= len(opt.files)

    success = cvt.cv2_to_file(avg.astype(np.uint8), opt.output)
    result = "done" if success else "failed"
    debug_log("Average image construction into", opt.output, result)
Exemple #6
0
def apply_displacements(target_dir, angle, displacements, work_dirs, kind,
                        do_register, blur, files_in):

    file_out = os.path.join(target_dir,
                            "full-{}.{}".format(angle, img_data_fmt))
    if not pth.isfile(file_out):
        kind_dir = ensure_dir(pth.join(work_dirs['temp'], kind))
        my_work_dirs = setup_directories(pth.join(kind_dir, str(angle)))

        if blur is not None:
            new_dir = blankfield_guard(files_in, kind, my_work_dirs['in'],
                                       blur)
        else:
            new_dir = pth.dirname(files_in[0])

        coords_fi = pth.join(new_dir, "tiles.txt")
        TileConfigurator().generate(displacements, coords_fi)
        success, img, _ = fiji_grid_fuse(my_work_dirs,
                                         coords_fi,
                                         file_out,
                                         do_crop=False)
        shutil.rmtree(my_work_dirs['in'])
    else:
        debug_log("Ensemble", file_out, "already exists.")
        img = None

    cropped_fpath = os.path.join(target_dir,
                                 "center-crop-{}.jpg".format(angle))
    if not pth.isfile(cropped_fpath):
        load_img = img if img is not None else file_to_cv2(file_out)
        cropped_img = crop_center_chunk(load_img, 1024)
        cv2_to_file(cropped_img, cropped_fpath)
    else:
        debug_log("Center-cropped chunk", cropped_fpath, "already exists.")

    reduced_fpath = os.path.join(target_dir, "full-{}.small.jpg".format(angle))
    if not pth.isfile(reduced_fpath):
        load_img = img if img is not None else file_to_cv2(file_out)
        reduced_img = image_resize(load_img, 30)
        cv2_to_file(reduced_img, reduced_fpath)
    else:
        debug_log("Reduced size image", reduced_fpath, "already exists.")

    if not do_register:
        pyramid_archive = pth.join(target_dir, "pyramid-{}.tar".format(angle))
        if not pth.isfile(pyramid_archive):
            make_pyramid(img, pyramid_archive, "{}-{}.dzi".format(kind, angle))
        else:
            debug_log("Pyramid archive", pyramid_archive, "already exists.")

    return file_out
def image_list_grabber(kind, img_file, index, crop_box):

    # Get working area and original bounding box
    img = cvt.file_to_cv2(img_file)
    img_crop = cvt.color_to_gray(crop_to_bounding_box(img, crop_box))

    return (kind, index, img_crop, get_bounding_box(img.shape))
def image_list_grabber(img_file, index, crop_box, use_borders=False):

    img = cvt.file_to_cv2(img_file)

    img_crop = crop_to_bounding_box(img, crop_box)
    img_crop_tgt = (img_crop if not use_borders else
                    get_borders(img_crop))

    return (index, img_crop_tgt, get_bounding_box(img.shape))
def main():

    image_file = sys.argv[1]
    bfield_file = sys.argv[2]

    image = file_to_cv2(image_file)
    bfield = file_to_cv2(bfield_file)

    bfield_dark = is_image_dark(bfield)
    print("Is bfield dark: {}".format(bfield_dark))

    corrct = (blankfield_linear_correct(image, bfield)
              if bfield_dark else blankfield_dark_correct(image, bfield))

    img_out = os.path.abspath(
        os.path.splitext(os.path.basename(image_file))[0] + "-bfield.jpg")
    cv2_to_file(corrct, img_out, 99)
    print("corrected image written to: {}".format(img_out))
Exemple #10
0
def main():

    args = process_command_line()

    in_image = cvt.file_to_cv2(args['file-in'])
    border_h = args['border-horizontal']
    border_v = args['border-vertical']
    out_image = in_image[border_h:-border_h, border_v:-border_v, :]
    cvt.cv2_to_file(out_image, args['file-out'])
def main():

    opt = process_command_line()
    print opt

    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))

    image0 = cvt.file_to_cv2(opt.reference)

    for image_fi in targets:
        basename = os.path.basename(image_fi)
        debug_log("Matching", os.path.basename(opt.reference), "with",
                  basename)
        image = cvt.file_to_cv2(image_fi)
        output_file = os.path.join(opt.work_dir, "pre-" + basename)

        register_image_pair(image0, image, opt.reduction, output_file)
def gather_images_data(file_list, crop_size, max_threads, use_borders=False,
                       reference_crop_box_corner=None, assume_zero_displacements=False):

    # Pick the first file as the non-moving reference
    ref_file = file_list[0]
    img_ref = cvt.file_to_cv2(ref_file)
    img_ref_bb = get_bounding_box(img_ref.shape)
    # If not given, determine reference crop box corner from the reference file
    img_ref_br = (img_ref_bb[1] if reference_crop_box_corner is None else
                  reference_crop_box_corner)
    crop_box = [(img_ref_br - crop_size)/2, (img_ref_br + crop_size)/2]
    img_crop = crop_to_bounding_box(img_ref, crop_box)
    img_ref_crop = (img_crop if not use_borders else get_borders(img_crop))

    # Read the file lists with few threads as each image is potentially huge
    job_args = ( (img_file, i, crop_box, use_borders) for i, img_file
                in enumerate(file_list[1:]) )
    if max_threads > 1:
        pool = mp.Pool(processes=max_threads)
        jobs = [pool.apply_async(image_list_grabber, args) for args in job_args]
        pool.close()
        pool.join()
        grab_iter = ( job.get() for job in jobs )
    else:
        grab_iter = ( image_list_grabber(*args) for args in job_args )
    data_list = sorted(grab_iter)

    data_dict = {'crops': [img_ref_crop,] + [ elem[1] for elem in data_list ],
                 'bboxes': [img_ref_bb,] + [ elem[2] for elem in data_list ],
                 'targets': file_list}

    if not assume_zero_displacements:
        # Compute displacements w.r.t reference via the image crops just obtained
        job_args = ( (i, data_dict) for i in range(1, len(data_dict['crops'])) )
        if max_threads > 1:
            pool = mp.Pool(processes=max_threads)
            jobs = [ pool.apply_async(displacement_compute_worker, args)
                    for args in job_args ]
            pool.close()
            pool.join()
            disp_iter = ( job.get() for job in jobs )
        else:
            disp_iter = ( displacement_compute_worker(*args) for args in job_args )
        deltas_idx = dict(disp_iter)
        deltas = [delta for i, delta in sorted(deltas_idx.items())]
        deltas_ref = add_pairwise_displacements(deltas, 0)
    else:
        deltas_ref = [ np.r_[0, 0] ]*len(data_dict['crops'])

    data_dict['deltas'] = deltas_ref

    basename_ref = pth.basename(ref_file)
    debug_log("Displacements relative to", basename_ref, deltas_ref)

    return data_dict
def align_images(work_dir, ppl_files, xpl_files, warp_matrix):

    # Get reference bounding box
    for xpl_file in xpl_files:
        metadata_json = xpl_file.replace(".png", ".metadata.json")
        if os.path.isfile(metadata_json):
            xpl_h, xpl_w = json.load(open(metadata_json))
        else:
            xpl_image = cvt.file_to_cv2(xpl_file)
            xpl_h, xpl_w = xpl_image.shape[:2]
        break

    target_size = (xpl_w, xpl_h)

    # Finally, align image files and crop to common box
    for ppl_file in ppl_files:

        img = cvt.file_to_cv2(ppl_file)
        img_aligned = warp_image(img, warp_matrix, target_size)
        img_bbox = get_bounding_box(img.shape)
        new_bbox = transform_points(img_bbox, warp_matrix)

        print "Start bbox:", img_bbox[0], img_bbox[-1]
        print "New bbox:", new_bbox

        #cropped = crop_to_bounding_box(img_aligned, common_box)
        cropped = img_aligned

        basename = os.path.basename(ppl_file)
        cf_name = "reg-" + basename
        cropped_file = os.path.join(work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success:
            center_crop = crop_center_chunk(cropped, 1024)
            center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
            center_crop_file = os.path.join(work_dir, center_crop_name)
            cvt.cv2_to_file(center_crop, center_crop_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", ppl_file, "into", cropped_file, result)
def reference_grabber(kind, img_file, crop_size):

    # Get working area (center crop) and bounding box
    img_ref = cvt.file_to_cv2(img_file)
    img_ref_bb = get_bounding_box(img_ref.shape)
    crop_box = get_center_crop_bbox(img_ref, crop_size)
    img_ref_crop = cvt.color_to_gray(crop_to_bounding_box(img_ref, crop_box))

    return (kind, {
        'crop-ref': img_ref_crop,
        'bbox-ref': img_ref_bb,
        'cropbox': crop_box
    })
Exemple #15
0
def file_image_preprocess(source_file,
                          target_file,
                          reduction_percentage=100,
                          blankfield=None):

    if reduction_percentage == 100 and blankfield is None:
        shutil.copyfile(source_file, target_file)
    else:
        image = image_resize(file_to_cv2(source_file), reduction_percentage)
        corrected = (image if blankfield is None else
                     blankfield_linear_correct(image, blankfield))
        cv2_to_file(corrected, target_file, 99)

    return target_file
def image_align(img_file,
                delta,
                padding_shift,
                target_size,
                crop_box,
                work_dir,
                is_reference,
                correct_shift_ref_crop=None):

    basename = os.path.basename(img_file)
    cf_name = "reg-" + basename
    cropped_file = os.path.join(work_dir, cf_name)

    shift_total = delta + padding_shift[::-1] if not is_reference else delta
    debug_log("Shift to apply (with padding) to", basename + ":", shift_total)
    warp_matrix = get_translation_matrix(shift_total)[:2]
    img = cvt.file_to_cv2(img_file)
    img_pre_aligned = warp_image(img, warp_matrix, target_size)

    if correct_shift_ref_crop is None:
        img_aligned = img_pre_aligned
    else:
        align_corr_bbox, align_corr_crop = correct_shift_ref_crop
        this_crop = cvt.color_to_gray(
            crop_to_bounding_box(img_pre_aligned, align_corr_bbox))
        delta_pair = [align_corr_crop, this_crop]
        delta_align = compute_displacements_direct(delta_pair, 0)[1]
        warp_matrix = get_translation_matrix(delta_align)[:2]
        img_aligned = warp_image(img_pre_aligned, warp_matrix, target_size)

    #cropped = crop_to_bounding_box(img_aligned, crop_box)
    cropped = img_aligned
    success = cvt.cv2_to_file(cropped, cropped_file)

    if success:
        center_crop_box = get_center_crop_bbox(cropped, 1024)
        center_crop = crop_to_bounding_box(cropped, center_crop_box)
        center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
        center_crop_file = os.path.join(work_dir, center_crop_name)
        cvt.cv2_to_file(center_crop, center_crop_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", img_file, "into", cropped_file, result)

    #center_crop_box = get_center_crop_bbox(cropped, 4096)
    #center_crop = crop_to_bounding_box(cropped, center_crop_box)
    #return success, (center_crop_box, cvt.color_to_gray(center_crop))
    return success
Exemple #17
0
def align_images(file_list, delta_xp, work_dir):

    body_sets = { kind: sorted(fi for fi in file_list
                               if kind in fi and "crop" not in fi)
                 for kind in image_kinds }

    # Get image target size
    xpl_image = cvt.file_to_cv2(body_sets['xpl'][0])
    target_size = xpl_image.shape[:2][::-1]

    # Then, align all image files (except the reference) and crop to common box
    pool = mp.Pool(processes=4)
    jobs = []
    for target in body_sets['ppl']:
        jobs.append(pool.apply_async(image_align,
                                    (target, delta_xp, target_size, work_dir)))
    pool.close()
    pool.join()
    success = all(job.get() for job in jobs)

    return success
Exemple #18
0
def make_pyramid(image_in, pyramid_archive_path, descriptor_name):

    temp_mount = tempfile.mkdtemp(dir='/tmp')

    with ArchiveMount(pyramid_archive_path, temp_mount) as pyramid_contain_dir:

        pyramid_descriptor = pth.join(pyramid_contain_dir, descriptor_name)
        creator = deepzoom.ImageCreator(tile_size=128,
                                        tile_overlap=2,
                                        tile_format="jpg",
                                        image_quality=0.95,
                                        resize_filter="bicubic")
        load_img = (image_in if isinstance(image_in, np.ndarray) else
                    file_to_cv2(image_in))
        creator.create(load_img, pyramid_descriptor)

    debug_log("Pyramid archive", pyramid_archive_path, "created.")

    #os.rmdir(temp_mount)
    shutil.rmtree(temp_mount)

    return True
Exemple #19
0
def image_align(img_file, delta, target_size, work_dir):

    basename = os.path.basename(img_file)
    cf_name = "reg-" + basename
    aligned_file = os.path.join(work_dir, cf_name)

    warp_matrix = get_translation_matrix(delta)[:2]
    img = cvt.file_to_cv2(img_file)
    img_aligned = warp_image(img, warp_matrix, target_size)
    success = cvt.cv2_to_file(img_aligned, aligned_file)

    if success:
        center_crop_box = get_center_crop_bbox(img_aligned, 1024)
        center_crop = crop_to_bounding_box(img_aligned, center_crop_box)
        center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
        center_crop_file = os.path.join(work_dir, center_crop_name)
        cvt.cv2_to_file(center_crop, center_crop_file)

    result = "done" if success else "failed"
    debug_log("Alignment of", img_file, "into", aligned_file, result)

    return success
def image_align_worker(img_file, target_file, delta, padding_shift, target_size,
                       crop_box, make_center_chunk, make_jpeg=False):

    img = cvt.file_to_cv2(img_file)

    total_shift = delta + padding_shift[::-1]
    cropped = translate_crop(img, total_shift, crop_box, target_size)
    success = cvt.cv2_to_file(cropped, target_file)

    if success:
        registration_file = pth.splitext(target_file)[0] + ".json"
        registration_save(total_shift, crop_box, target_size, registration_file)

    if success and make_center_chunk:
        gen_center_small_chunk(cropped, target_file, 1024)

    if success and make_jpeg:
        reduced_fpath = pth.splitext(target_file)[0] + '.jpg'
        cvt.cv2_to_file(cropped, reduced_fpath)

    result = "done" if success else "failed"
    debug_log("Alignment of", img_file, "into", target_file, result)

    return success
Exemple #21
0
def assemble_twostep(directories, files, out_file_name, corner_dim_min=[2, 3]):

    n_rows, n_cols, row_cells, img_type, _, _, _ = parse_image_grid_list(files)

    body_dir = ensure_dir(os.path.join(directories['out'], "body"))
    debug_log("Assembling of body into", body_dir, "started.")
    body_work_dir = setup_directories(body_dir)
    body_simple_file = os.path.join(body_dir, "body-straight.png")

    (success, tiles_file, body_matrix, body_simple,
     pre_crop_file) = fiji_grid_stitch(body_work_dir,
                                       files,
                                       body_simple_file,
                                       True,
                                       keep_uncropped=True,
                                       pre_copy_files=False,
                                       threshold_reg=0.5,
                                       threshold_maxavg=10,
                                       threshold_abs=50)
    if not success:
        debug_log("Could not assemble body")
        return False

    c_h, c_w = body_simple.shape[:2]
    grey_chunk = color_to_gray(body_simple[:c_h / 2, :c_w / 2, :])
    left_black = find_left_border(grey_chunk, c_w / 2)
    top_black = find_top_border(grey_chunk, c_h / 2)
    pre_corner = body_simple[:top_black, :left_black]
    corner_is_black = (pre_corner.max() == 0)
    debug_log("Corner void size:", left_black, "x", top_black, corner_is_black)

    if (left_black == 0 and top_black == 0) or (not corner_is_black):
        debug_log("Apparently no corner stitching is necessary!")
        shutil.move(body_simple_file, out_file_name)
        return True

    dim_h, dim_w = file_to_cv2(files[0]).shape[:2]
    corner_dim = [
        max(corner_dim_min[0], 1 + 2 * int(ceil(float(top_black) / dim_h))),
        max(corner_dim_min[1], 1 + 2 * int(ceil(float(left_black) / dim_w)))
    ]

    debug_log("Corner dimensions: {}".format(corner_dim))
    corner_dir = ensure_dir(os.path.join(directories['out'], "corner"))
    corner_work_dir = setup_directories(corner_dir)
    corner_file = os.path.join(corner_dir, "corner.png")
    corner_input_files = [
        cell for row in row_cells[:corner_dim[0]]
        for cell in row[:corner_dim[1]]
    ]
    debug_log("Assembling of corner into", corner_dir, "started.")
    success, _, corner_matrix, corner, _ = fiji_grid_stitch(
        corner_work_dir,
        corner_input_files,
        corner_file,
        True,
        pre_copy_files=False)
    if not success:
        debug_log("Could not assemble corner")
        return False

    debug_log("Generating Laplacian blending weights for corner")
    body = patch_body_corner_inmem(body_simple, body_matrix, corner,
                                   corner_matrix)

    debug_log("Saving blended image to", out_file_name)
    success = cv2_to_file(body, out_file_name)
    result_name = "done" if success else "failed"
    debug_log("Saving blended image", out_file_name, result_name)

    return success
Exemple #22
0
def assemble_multigrid(directories,
                       files,
                       out_file_name,
                       pre_resize=100,
                       base_cell_size=4,
                       max_threads=cpu_count(),
                       keep_uncropped=False,
                       blankfield_file=None):

    n_rows, n_cols, row_cells, img_type, _, _, _ = parse_image_grid_list(files)
    pool = Pool(processes=max_threads)

    cs = float(base_cell_size)
    base_cells_dims = np.ceil(np.r_[n_rows, n_cols] / cs).astype(int)

    cells_dir = ensure_dir(os.path.join(directories['out'], "cells"))
    cell_jobs = []
    cell_files = []

    blankfield = (None if blankfield_file is None else image_resize(
        file_to_cv2(blankfield_file), pre_resize))

    digits_row, digits_col = map(get_digits, base_cells_dims)

    for b_row in range(base_cells_dims[0]):
        for b_col in range(base_cells_dims[1]):

            base_name = "{:0{w_r}d}_{:0{w_c}d}".format(b_row,
                                                       b_col,
                                                       w_r=digits_row,
                                                       w_c=digits_col)
            temp_dirs = setup_directories(os.path.join(cells_dir, base_name))
            files = [
                file_image_preprocess(
                    row_cells[i][j],
                    os.path.join(temp_dirs['in'],
                                 os.path.basename(row_cells[i][j])),
                    pre_resize, blankfield)
                for i in range(base_cell_size *
                               b_row, min(base_cell_size *
                                          (b_row + 1), n_rows))
                for j in range(base_cell_size *
                               b_col, min(base_cell_size *
                                          (b_col + 1), n_cols))
            ]
            out_file = os.path.join(cells_dir,
                                    "{}.{}".format(base_name, img_data_fmt))
            cell_jobs.append(
                pool.apply_async(fiji_grid_stitch,
                                 (temp_dirs, files, out_file), {
                                     'pre_copy_files': False,
                                     'keep_uncropped': keep_uncropped
                                 }))
            cell_files.append(out_file)
    pool.close()
    pool.join()

    cell_successes = [job.get() for job in cell_jobs]
    if all(cell_successes):
        debug_log("Assembling of base cells into", cells_dir, "is complete.",
                  "Assembling image...")
        return fiji_grid_stitch(directories,
                                cell_files,
                                out_file_name,
                                keep_uncropped=keep_uncropped)
    else:
        return False
Exemple #23
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    band = opt.crop_size

    basename_ref = os.path.basename(opt.reference)
    img1 = cvt.file_to_cv2(opt.reference)
    img1_bb = get_bounding_box(img1.shape)
    img1_br = img1_bb[1]
    img1_crop_bb = [(img1_br - band) / 2, (img1_br + band) / 2]
    target_size = get_bounding_box_size(img1_bb)

    img1_crop = cvt.color_to_gray(crop_to_bounding_box(img1, img1_crop_bb))
    """
    Using crop chunks centered at each images' center impedes resolving
    the image-to-image displacement
    """
    #img1_crop = cvt.color_to_gray(crop_center_chunk(img1, band))

    all_bboxes = [
        img1_bb.tolist(),
    ]
    pre_aligned_files = [
        opt.reference,
    ]

    # trainImages
    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))
    for img_file in targets:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_crop = cvt.color_to_gray(crop_to_bounding_box(img2, img1_crop_bb))
        #img2_crop = cvt.color_to_gray(crop_center_chunk(img2, band))

        debug_log("Computing translation of", basename, "relative to",
                  basename_ref)

        peak_loc, peak_val = get_phasecorr_peak(img1_crop, img2_crop, 100)
        debug_log("Translation is", peak_loc, "value:", peak_val)
        warp_matrix = get_translation_matrix(peak_loc)[:2]

        img2_aligned = warp_image(img2, warp_matrix, target_size)
        img2_adj_bb = get_adjusted_bounding_box(img1_bb,
                                                get_bounding_box(img2.shape),
                                                warp_matrix)
        aligned_file = os.path.join(opt.work_dir, "pre-" + basename)
        cvt.cv2_to_file(img2_aligned, aligned_file)
        all_bboxes.append(img2_adj_bb)
        pre_aligned_files.append(aligned_file)

        debug_log("Alignment of", img_file, "done")

    common_box = intersect_bounding_boxes(all_bboxes)

    for fi_aligned in pre_aligned_files:
        debug_log("Cropping", fi_aligned, newline=False)
        aligned = cvt.file_to_cv2(fi_aligned)
        cropped = crop_to_bounding_box(aligned, common_box)

        cf_name = (("reg-" + basename_ref) if fi_aligned == opt.reference else
                   os.path.basename(fi_aligned).replace("pre-", "reg-"))
        cropped_file = os.path.join(opt.work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success:
            center_crop = crop_center_chunk(cropped, 1024)
            center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
            center_crop_file = os.path.join(opt.work_dir, center_crop_name)
            cvt.cv2_to_file(center_crop, center_crop_file)

            if not opt.keep_uncropped and fi_aligned != opt.reference:
                os.remove(fi_aligned)

        result = "done" if success else "failed"
        print(result)
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    # Feature detector
    sift = cv2.xfeatures2d.SIFT_create(
        contrastThreshold=opt.contrast_threshold, sigma=opt.sigma)

    initial_w = opt.strip_width
    s_width = initial_w
    max_width = 3700

    # queryImage
    img1_subg = cvt.color_to_gray(cvt.file_to_cv2(opt.reference))
    target_size = img1_subg.shape[::-1]
    img1_bb = get_bounding_box(img1_subg.shape)
    basename_ref = os.path.basename(opt.reference)
    img1_bands = get_image_bands(img1_subg, max_width)
    debug_log("Gather features of reference image", basename_ref, "with w =",
              max_width)
    kp1, des1 = get_features_on_bands(img1_subg, img1_bands, sift)
    ref_package = (kp1, des1)

    # objects cached by strip width, with initial element
    #img1_features = {}
    match_stats = {'fail': {s_width: 0}, 'success': {s_width: 0}}

    all_bboxes = [
        img1_bb.tolist(),
    ]
    pre_aligned_files = [
        opt.reference,
    ]

    # trainImages
    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))
    for img_file in targets:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_subg = cvt.color_to_gray(img2)
        keypoints = []
        descriptors = []

        s_width = initial_w
        s_clearance = 1000

        converged = False
        while not converged:

            debug_log("Gather features of image", basename, "with w =",
                      s_width, "and c =", s_clearance)
            img2_bands = get_image_bands(img2_subg, s_width)
            kp2, des2 = get_features_on_bands(img2_subg, img2_bands, sift)
            keypoints.extend(kp2)
            descriptors.append(des2)
            target_package = (keypoints, np.vstack(descriptors))

            warp_matrix = match_get_transf(ref_package, target_package,
                                           opt.min_matches)
            print("Warp matrix {}->{}:".format(basename_ref, basename))
            print(warp_matrix)
            success = (warp_matrix is not None
                       and is_good_matrix(warp_matrix, 3e-3))
            converged = success

            if not success:
                debug_log("Not good enough matching achieved.",
                          "Increasing bands width")
                iter_step = 500
                s_width = iter_step
                s_clearance += s_width
                continue

            mark_successful_iter(s_width, match_stats)
            img2_aligned = align_image(img2, warp_matrix, target_size)
            img2_adj_bb = get_adjusted_bounding_box(
                img1_bb, get_bounding_box(img2.shape), warp_matrix)
            aligned_file = os.path.join(opt.work_dir, "pre-" + basename)
            cvt.cv2_to_file(img2_aligned, aligned_file)
            all_bboxes.append(img2_adj_bb)
            pre_aligned_files.append(aligned_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", img_file, result)

    common_box = intersect_bounding_boxes(target_size, all_bboxes)

    for fi_aligned in pre_aligned_files:
        debug_log("Cropping", fi_aligned, newline=False)
        aligned = cvt.file_to_cv2(fi_aligned)
        cropped = crop_to_bounding_box(aligned, common_box)

        cf_name = (("reg-" + basename_ref) if fi_aligned == opt.reference else
                   os.path.basename(fi_aligned).replace("pre-", "reg-"))
        cropped_file = os.path.join(opt.work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success and not opt.keep_uncropped and fi_aligned != opt.reference:
            os.remove(fi_aligned)

        result = "done" if success else "failed"
        print(result)
Exemple #25
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    # Feature detector
    sift_options = dict(sigma=opt.sigma,
                        contrastThreshold=opt.contrast_threshold)
    sift = cv2.xfeatures2d.SIFT_create(**sift_options)

    strip_width = opt.strip_width

    # Reference image
    basename_ref = os.path.basename(opt.reference)
    image_ref = cvt.file_to_cv2(opt.reference)
    imgref_subg = cvt.color_to_gray(image_ref)
    target_size = imgref_subg.shape[::-1]
    imgref_clearance = -1
    kp_ref, des_ref = [], None

    ref_max_clearance = min(target_size) - 2 * strip_width

    # Target images
    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))
    for img_file in targets:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_subg = cvt.color_to_gray(img2)
        keypoints = []
        descriptors = []

        s_clearance = opt.clearance
        tgt_max_clearance = min(img2_subg.shape) - 2 * strip_width

        converged = False
        while not converged:

            if s_clearance > imgref_clearance:
                imgref_clearance = s_clearance
                if ref_max_clearance < imgref_clearance:
                    debug_log("Cannot expand feature extraction for reference",
                              basename_ref, "; using current data")
                else:
                    imgref_bands = get_image_bands(imgref_subg, strip_width,
                                                   imgref_clearance)
                    debug_log("Gather features of reference image",
                              basename_ref, "with w =", strip_width, "and c =",
                              imgref_clearance)
                    kp_, des_ = get_features_on_bands(imgref_subg,
                                                      imgref_bands, sift)
                    kp_ref.extend(kp_)
                    des_ref = (des_ if des_ref is None else np.vstack(
                        [des_ref, des_]))

                ref_package = (kp_ref, des_ref)

            if tgt_max_clearance < s_clearance:
                debug_log("Cannot expand feature extraction for target",
                          basename_ref, "; Aborting")
                success = False
                break

            debug_log("Gather features of image", basename, "with w =",
                      strip_width, "and c =", s_clearance)
            img2_bands = get_image_bands(img2_subg, strip_width, s_clearance)
            kp_tgt, des_tgt = get_features_on_bands(img2_subg, img2_bands,
                                                    sift)
            keypoints.extend(kp_tgt)
            descriptors.append(des_tgt)
            target_package = (keypoints, np.vstack(descriptors))

            src_pts_flann, dst_pts_flann = match_points_flann(
                ref_package, target_package, opt.min_matches)
            similarity = get_transf_similarity(src_pts_flann, dst_pts_flann)
            scale_change_pct = 100 * abs(similarity.scale - 1)
            success = (scale_change_pct < 1)
            converged = success

            debug_log("Similarity transform: scale change pct.:",
                      scale_change_pct, "Trl:", similarity.translation, "Rot:",
                      similarity.rotation)

            if not success:
                debug_log("Not good enough matching achieved.",
                          "Increasing bands width")
                s_clearance += strip_width
                continue

            transform = SimilarityTransform(scale=1,
                                            rotation=similarity.rotation,
                                            translation=similarity.translation)
            warp_matrix = get_transf_homography(src_pts_flann, dst_pts_flann)
            print(
                "Compare Flann matrices:\n  Homography:\n{}\n Similarity:\n{}".
                format(warp_matrix, transform.params))
            """ BFmatcher test
            src_pts_bf, dst_pts_bf = match_points_bf(ref_package, target_package,
                                                     opt.min_matches)
            warp_matrix_bf = get_transf_homography(src_pts_bf, dst_pts_bf)
            similarity = get_transf_similarity(src_pts_bf, dst_pts_bf, min_samples=2)
            transform_bf = SimilarityTransform(scale=1, rotation=similarity.rotation,
                                               translation=similarity.translation)
            print("Compare BF matrices:\n  Homography:\n{}\n Similarity:\n{}".format(warp_matrix_bf, transform_bf.params))
            """

            img2_aligned = align_image(img2, transform.params, target_size)

            aligned_file = os.path.join(opt.work_dir, "reg-" + basename)
            success = cvt.cv2_to_file(img2_aligned, aligned_file)

            if success:
                center_crop = crop_center_chunk(img2_aligned, 1024)
                center_crop_name = "crop-" + basename.replace(".png", ".jpg")
                center_crop_file = os.path.join(opt.work_dir, center_crop_name)
                cvt.cv2_to_file(center_crop, center_crop_file)

                if opt.small_images:
                    small = cvt.image_resize(img2_aligned, 30)
                    small_name = "small-" + basename.replace(".png", ".jpg")
                    small_file = os.path.join(opt.work_dir, small_name)
                    cvt.cv2_to_file(small, small_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", img_file, result)
Exemple #26
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    targets = opt.files
    first_img = opt.files[0]
    basename_ref = os.path.basename(first_img)

    img1 = cvt.file_to_cv2(first_img)
    img1_crop = cvt.color_to_gray(crop_center_chunk(img1, opt.crop_size))
    img1_bb = get_bounding_box(img1.shape[:2])

    all_bboxes = [
        img1_bb,
    ]
    relative_displacements = []

    # Get pairwise relative displacements
    for img_file in targets[1:]:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_crop = cvt.color_to_gray(crop_center_chunk(img2, opt.crop_size))

        debug_log("Computing translation of", basename, "relative to",
                  basename_ref)

        peak_loc, peak_val = get_phasecorr_peak(img1_crop, img2_crop, 100)
        debug_log("Translation is", peak_loc, "value:", peak_val)

        relative_displacements.append(peak_loc)
        all_bboxes.append(get_bounding_box(img2.shape[:2]))

        img1, img1_crop, basename_ref = img2, img2_crop, basename

    del img1, img2, img1_crop, img2_crop

    # Determine largest bounding box
    bboxes_area = np.array(
        [get_bounding_box_area(bbox) for bbox in all_bboxes])
    largest_area = np.argmax(bboxes_area)
    largest_bbox = all_bboxes[largest_area]
    target_size = get_bounding_box_size(largest_bbox)
    reference = targets[largest_area]
    basename_ref = os.path.basename(reference)

    print "disps:", relative_displacements
    debug_log("Largest area image is", reference, "({})".format(largest_area))

    # Propagate displacements
    pre_aligned_files = []
    for i, img_file in enumerate(targets):

        # Displacements are applied relative to largest bbox
        if i == largest_area:
            pre_aligned_files.append(reference)
            continue

        basename = os.path.basename(img_file)
        img = cvt.file_to_cv2(img_file)

        # displacement[i] = pos[i+1] - pos[i]
        if largest_area < i:
            disp_chain = range(largest_area, i)
            direction = 1
        else:
            disp_chain = range(i, largest_area)
            direction = -1

        total_displacement = direction * sum(relative_displacements[j]
                                             for j in disp_chain)
        debug_log("Displacement from", reference, "to", img_file, "is",
                  total_displacement)
        print "dir", direction, "; chain", disp_chain
        warp_matrix = get_translation_matrix(total_displacement)[:2]

        img_aligned = align_image(img, warp_matrix, target_size)
        aligned_file = os.path.join(opt.work_dir, "pre-" + basename)
        success = cvt.cv2_to_file(img_aligned, aligned_file)
        if success:
            pre_aligned_files.append(aligned_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", img_file, "into", aligned_file, result)

    common_box = intersect_bounding_boxes(target_size, all_bboxes)

    for fi_aligned in pre_aligned_files:
        debug_log("Cropping", fi_aligned, newline=False)
        aligned = cvt.file_to_cv2(fi_aligned)
        cropped = crop_to_bounding_box(aligned, common_box)

        cf_name = (("reg-" + basename_ref) if fi_aligned == reference else
                   os.path.basename(fi_aligned).replace("pre-", "reg-"))
        cropped_file = os.path.join(opt.work_dir, cf_name)
        success = cvt.cv2_to_file(cropped, cropped_file)

        if success:
            center_crop = crop_center_chunk(cropped, 1024)
            center_crop_name = "crop-" + cf_name.replace(".png", ".jpg")
            center_crop_file = os.path.join(opt.work_dir, center_crop_name)
            cvt.cv2_to_file(center_crop, center_crop_file)

            if not opt.keep_uncropped and fi_aligned != reference:
                os.remove(fi_aligned)

        result = "done" if success else "failed"
        print(result)
Exemple #27
0
def blur_blankfield(bfield_file, bfield_blurred_file):

    bfield = file_to_cv2(bfield_file)
    cv2_to_file(simple_blur(bfield), bfield_blurred_file, 0.01)
Exemple #28
0
def main():

    opt = process_command_line()
    print opt

    ensure_dir(opt.work_dir)

    # Feature detector
    sift_options = dict(sigma=opt.sigma,
                        contrastThreshold=opt.contrast_threshold)

    base_csize = opt.crop_size

    # Reference image
    basename_ref = os.path.basename(opt.reference)
    image_ref = cvt.file_to_cv2(opt.reference)
    imgref_subg = cvt.color_to_gray(image_ref)
    target_size = imgref_subg.shape[::-1]
    imgref_csize = -1
    kp_ref, des_ref = [], None

    ref_max_size = min(target_size)

    # Target images
    targets = sorted(set(opt.files) - set([
        opt.reference,
    ]))
    for img_file in targets:

        basename = os.path.basename(img_file)
        img2 = cvt.file_to_cv2(img_file)
        img2_subg = cvt.color_to_gray(img2)
        keypoints = []
        descriptors = []

        csize = base_csize
        tgt_max_size = min(img2_subg.shape)

        converged = False
        while not converged:

            if csize > imgref_csize:
                imgref_csize = csize
                if ref_max_size < imgref_csize:
                    debug_log("Cannot expand feature extraction for reference",
                              basename_ref, "; using current data")
                else:
                    debug_log("Gather features of reference image",
                              basename_ref, "with c =", imgref_csize)
                    imgref_bands = get_center_crop_bounding_box(
                        imgref_subg, imgref_csize)
                    print("Crop box:", imgref_bands)
                    sift = cv2.xfeatures2d.SIFT_create(**sift_options)
                    roi = crop_to_bounding_box(imgref_subg, imgref_bands)
                    #borders = cvt.simple_grayscale_stretch(scharr(roi))
                    kp_ref, des_ref = sift.detectAndCompute(roi, None)

                ref_package = (kp_ref, des_ref)

            if tgt_max_size < csize:
                debug_log("Cannot expand feature extraction for target",
                          basename_ref, "; Aborting")
                success = False
                break

            debug_log("Gather features of image", basename, "with c =", csize)
            sift = cv2.xfeatures2d.SIFT_create(**sift_options)
            roi = crop_to_bounding_box(img2_subg, imgref_bands)
            #borders = cvt.simple_grayscale_stretch(scharr(roi))
            keypoints, descriptors = sift.detectAndCompute(roi, None)
            target_package = (keypoints, descriptors)

            src_pts_flann, dst_pts_flann = match_points_flann(
                ref_package, target_package, opt.min_matches)
            similarity = get_transf_similarity(src_pts_flann, dst_pts_flann)
            scale_change_pct = 100 * abs(similarity.scale - 1)

            debug_log("FLANN Similarity transform: scale change pct.:",
                      scale_change_pct, "Trl:", similarity.translation, "Rot:",
                      similarity.rotation)
            warp_matrix = get_transf_homography(src_pts_flann, dst_pts_flann)
            print(
                "Compare FLANN matrices:\n  Homography:\n{}\n Similarity:\n{}".
                format(warp_matrix, similarity.params))
            """ BFmatcher test
            src_pts_bf, dst_pts_bf = match_points_bf(ref_package, target_package,
                                                     opt.min_matches)
            similarity = get_transf_similarity(src_pts_bf, dst_pts_bf, min_samples=2)
            scale_change_pct = 100*abs(similarity.scale - 1)

            debug_log("BF Similarity transform: scale change pct.:",
                      scale_change_pct, "Trl:", similarity.translation,
                      "Rot:", similarity.rotation)

            transform = SimilarityTransform(scale=1, rotation=similarity.rotation,
                                            translation=similarity.translation)
            warp_matrix_bf = get_transf_homography(src_pts_bf, dst_pts_bf)
            print("Compare BF matrices:\n  Homography:\n{}\n  Similarity:\n{}".format(warp_matrix_bf, transform.params))
            """

            success = (scale_change_pct < 1)
            converged = success

            if not success:
                debug_log("Not good enough matching achieved.",
                          "Increasing bands width")
                csize += base_csize
                continue

            transform = SimilarityTransform(scale=1,
                                            rotation=similarity.rotation,
                                            translation=similarity.translation)

            img2_aligned = align_image(img2, transform.params, target_size)

            aligned_file = os.path.join(opt.work_dir, "reg-" + basename)
            success = cvt.cv2_to_file(img2_aligned, aligned_file)

            if success:
                center_crop = crop_center_chunk(img2_aligned, 1024)
                center_crop_name = "crop-" + basename.replace(".png", ".jpg")
                center_crop_file = os.path.join(opt.work_dir, center_crop_name)
                cvt.cv2_to_file(center_crop, center_crop_file)

                if opt.small_images:
                    small = cvt.image_resize(img2_aligned, 30)
                    small_name = "small-" + basename.replace(".png", ".jpg")
                    small_file = os.path.join(opt.work_dir, small_name)
                    cvt.cv2_to_file(small, small_file)

        result = "done" if success else "failed"
        debug_log("Alignment of", img_file, result)