Exemplo n.º 1
0
def do_epoch(model, data, vocab):
    before_loss = 0
    all_losses = []
    current_loss = 0
    for epoch in range(1, hparam.steps + 1):
        label_name, label_tensor, line, line_tensor = randomTrainingPair(
            data, vocab)
        output, loss = train(model, label_tensor, line_tensor)
        current_loss += loss
        # Print epoch number, loss, name and guess
        if epoch % hparam.every == 0:
            ll = itos(line, vocab)
            guess, guess_i = categoryFromOutput(vocab, output)
            correct = '✓' if guess == label_name else '✗ (%s)' % label_name
            logger.info(
                'step:%d %.2f%% loss: %.4f %s / %s %s' %
                (epoch, epoch / hparam.steps * 100, loss, ll, guess, correct))
            if loss < before_loss or before_loss == 0:
                ckpt = "model_%s_%.4f.pt" % (epoch, loss)
                utils.save(logger, model.rnn, hparam.save_train, ckpt)
                before_loss = loss

            all_losses.append(current_loss / hparam.every)
            current_loss = 0
            f = open(os.path.join(hparam.save_train,
                                  '%s_loss.json' % hparam.loss),
                     'w',
                     encoding='utf-8')
            json.dump(all_losses, f, indent=0)
Exemplo n.º 2
0
def make_data(vocab, path, flag):
    label_lines = {}
    logger.info("process %s data..." % flag)
    logger.info("load vocab...size: [%s]" % vocab.size)
    for id, filename in enumerate(
            utils.findFiles(os.path.join(path, hparam.files))):
        logger.info("proess file: %s" % filename)
        category = filename.split('/')[-1].split('.')[0]
        lines = utils.readLines(filename)
        new_lines = []
        for id in tqdm(range(len(lines))):
            line = lines[id]
            if flag == "train":
                if len(line) < hparam.max_length:
                    chars_id = utils.stoi(line, vocab)
                    new_lines.append(chars_id)
                    label_lines[category] = new_lines
                else:
                    logger.info("not proess: %s" % len(line))
            else:
                chars_id = utils.stoi(line, vocab)
                new_lines.append(chars_id)
                label_lines[category] = new_lines

    logger.info("process: %s size: [%s]" % (category, len(new_lines)))
    data = utils.data(label_lines)
    utils.save(logger, data, hparam.save_data, "%s.pt" % flag)
Exemplo n.º 3
0
def load_vocab():

    if os.path.exists(hparam.vocab):
        vocab = torch.load(hparam.vocab)
    else:
        logger.info("make vocab")
        vocab, label = make_vocab()
        if not os.path.exists(hparam.save_data):
            os.mkdir(hparam.save_data)
        vocab_path = os.path.join(hparam.save_data, "vocab.txt")
        with codecs.open(vocab_path, 'w', encoding="utf-8") as f:
            f.write(utils.UNK + "\n")
            for char in vocab:
                f.write(char + "\n")
        vocab = utils.readLines(vocab_path)
        logger.info("load vocab...size: [%s]" % len(vocab))
        vocab = utils.vocabulary(label, vocab)
        utils.save(logger, vocab, hparam.save_data, "vocab.pt")

    return vocab
Exemplo n.º 4
0
def deblur_module(pic,
                  filename,
                  dest_path,
                  blur_width,
                  confidence=10,
                  bias=1e-4,
                  step=1e-3,
                  bits=8,
                  iterations=200,
                  sharpness=0,
                  mask=None,
                  display=True,
                  neighbours=8,
                  correlation=False):
    """
    API to call the debluring process

    :param pic: an image memory object, from PIL or tifffile
    :param filename: string, the name of the file to save
    :param dest_path: string, the path where to save the file
    :param blur_width: integer, the diameter of the blur e.g. the size of the PSF
    :param confidence: float, default 1, max 100, set the confidence you have in your sample. For example, on noisy pictures, 
    use 1 to 10. For a clean low-ISO picture, you can go all the way to 100. A low factor will reduce the convergence, a high 
    factor will allow more noise amplification.
    :param bias: float, the blending parameter between sharp and blurred pictures. Ensure    the convergence of the sharp image.
    Usually between 0.0001 and 0.1
    :param step: float, the gradient-descent factor. Normal is 1e-3. Increase it to converge faster, but be careful because
    it could diverge more as well.
    :param bits: integer, default is 8 meaning the input image is encoded with 8 bits/channel. Use 16 if you input 16 bits
    tiff files.
    :param iterations: float, default is 1, meaning that the base number of iterations to perform are the width of the blur.
    While this works in most cases, complicated blurs need extra care. Set it > 1 in conjunction with a smaller step
    when more iterations are needed.
    :param mask: list of 4 integers, the region on which the blur will be estimated to speed-up the process.
    :param neighbours: set the number of pixels used to compute the total variation regularization. 2 is fast but will over-smooth
    and thus reduce the convergence. 4 is good but might blur the edges, 8 is better but slower and might be too permissive in some cases. 
    With 8, you might want to decrease the confidence factor.
    :param sharpness: this applies a final unsharp mask using the input picture as the blurry version and the deconvoluted picture as the sharp one. Put 0
    to disable this feature, and 1 to apply it full throttle.

    :param display: Pop-up a control window at the end of the blur estimation to check the solution before runing it on
    the whole picture
    :return:
    """
    # TODO : refocus http://web.media.mit.edu/~bandy/refocus/PG07refocus.pdf
    # TODO : extract foreground only https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html#grabcut

    pic = np.ascontiguousarray(pic, dtype=np.float32)

    # Load the FFTW wisdom if the file exists
    if isfile('fftw_wisdom.pickle'):
        with open('fftw_wisdom.pickle', 'rb') as f:
            pyfftw.import_wisdom(pickle.load(f))
            print("FFT profiles loaded")
    else:
        print(
            "No FFT profiles detected. They will be created at the end of the session"
        )

    # Verifications
    if blur_width < 3:
        raise ValueError("The blur width should be at least 3 pixels.")

    if blur_width % 2 == 0:
        raise ValueError("The blur width should be odd. You can use %i." %
                         (blur_width + 1))

    if confidence > 100:
        raise ValueError(
            "The confidence factor is limited to 100. Try increasing the bias instead"
        )

    # Set the bit-depth
    samples = 2**bits - 1

    # Rescale the RGB values between 0 and 1
    pic = pic / samples

    # Make the picture dimensions odd to avoid ringing on the border of even pictures. We just replicate the last row/column
    odd_vert = False
    odd_hor = False

    if pic.shape[0] % 2 == 0:
        pic = pad_image(pic, ((1, 0), (0, 0))).astype(np.float32)
        odd_vert = True
        print("Padded vertically")

    if pic.shape[1] % 2 == 0:
        pic = pad_image(pic, ((0, 0), (1, 0))).astype(np.float32)
        odd_hor = True
        print("Padded horizontally")

    # Construct a blank PSF
    psf = utils.uniform_kernel(blur_width)
    psf = np.dstack((psf, psf, psf))

    # Get the dimensions once for all
    MK = blur_width
    M = pic.shape[0]
    N = pic.shape[1]
    C = pic.shape[2]

    # Rescale the lambda parameter
    confidence = 1000 * confidence

    # Set the error
    epsilon = dc.best_param(pic, confidence, M, N)

    print("\n===== BLIND ESTIMATION OF BLUR =====")

    # Construct the mask for the blur estimation
    if mask:
        # Check the mask size
        if ((mask[1] - mask[0]) % 2 == 0 or (mask[3] - mask[2]) % 2 == 0):
            raise ValueError(
                "The mask dimensions should be odd. You could use at least %i×%i pixels."
                % (blur_width + 2, blur_width + 2))

        u_masked = pic[mask[0]:mask[1], mask[2]:mask[3], ...].copy()
        i_masked = pic[mask[0]:mask[1], mask[2]:mask[3], ...]

    else:
        u_masked = pic.copy()
        i_masked = pic

    # Build the intermediate sizes and factors
    images, kernels, lambdas = build_pyramid(MK, confidence)
    k_prec = MK
    for i, k, l in zip(reversed(images), reversed(kernels), reversed(lambdas)):
        print("======== Pyramid step %1.3f ========" % i)

        # Resize blured, deblured images and PSF from previous step
        if i != 1:
            im = ndimage.zoom(i_masked, (i, i, 1)).astype(np.float32)
        else:
            im = i_masked

        psf = ndimage.zoom(psf, (k / k_prec, k / k_prec, 1)).astype(np.float32)
        dc.normalize_kernel(psf, k)

        u_masked = ndimage.zoom(u_masked, (im.shape[0] / u_masked.shape[0],
                                           im.shape[1] / u_masked.shape[1], 1))

        vert_odd = False
        hor_odd = False

        # Pad to ensure oddity
        if pic.shape[0] % 2 == 0:
            hor_odd = True
            im = pad_image(im, ((1, 0), (0, 0))).astype(np.float32)
            u_masked = pad_image(u_masked, ((1, 0), (0, 0))).astype(np.float32)
            print("Padded vertically")

        if pic.shape[1] % 2 == 0:
            vert_odd = True
            im = pad_image(im, ((0, 0), (1, 0))).astype(np.float32)
            u_masked = pad_image(u_masked, ((0, 0), (1, 0))).astype(np.float32)
            print("Padded horizontally")

        # Pad for FFT
        pad = np.floor(k / 2).astype(int)
        u_masked = pad_image(u_masked, (pad, pad))

        # Make a blind Richardson-Lucy deconvolution on the RGB signal
        dc.richardson_lucy_MM(im,
                              u_masked,
                              psf,
                              epsilon,
                              im.shape[0],
                              im.shape[1],
                              3,
                              k,
                              int(iterations / i),
                              step,
                              l,
                              epsilon,
                              neighbours,
                              blind=True,
                              correlation=correlation)

        # Unpad FFT because this image is resized/reused the next step
        u_masked = u_masked[pad:-pad, pad:-pad, ...]

        # Unpad oddity for same reasons
        if vert_odd:
            u_masked = u_masked[1:, :, ...]

        if hor_odd:
            u_masked = u_masked[:, 1:, ...]

        k_prec = k

    # Display the control preview
    if display:
        psf_check = (psf - np.amin(psf))
        psf_check = psf_check / np.amax(psf_check)
        plt.imshow(psf_check,
                   interpolation="lanczos",
                   filternorm=1,
                   aspect="equal",
                   vmin=0,
                   vmax=1)
        plt.show()
        plt.imshow(u_masked,
                   interpolation="lanczos",
                   filternorm=1,
                   aspect="equal",
                   vmin=0,
                   vmax=1)
        plt.show()

    print("\n===== REGULAR DECONVOLUTION =====")

    u = pic.copy()

    # Build the intermediate sizes and factors
    k_prec = MK
    for i, k, l in zip(reversed(images), reversed(kernels), reversed(lambdas)):
        print("======== Pyramid step %1.3f ========" % i)

        # Resize blured, deblured images and PSF from previous step
        if i != 1:
            im = ndimage.zoom(pic, (i, i, 1)).astype(np.float32)
            psf_loc = ndimage.zoom(psf, (k / k_prec, k / k_prec, 1)).astype(
                np.float32)
            dc.normalize_kernel(psf_loc, k)
        else:
            im = pic
            psf_loc = psf

        u = ndimage.zoom(
            u, (im.shape[0] / u.shape[0], im.shape[1] / u.shape[1], 1))

        vert_odd = False
        hor_odd = False

        # Pad to ensure oddity
        if pic.shape[0] % 2 == 0:
            hor_odd = True
            im = pad_image(im, ((1, 0), (0, 0))).astype(np.float32)
            u = pad_image(u, ((1, 0), (0, 0))).astype(np.float32)
            print("Padded vertically")

        if pic.shape[1] % 2 == 0:
            vert_odd = True
            im = pad_image(im, ((0, 0), (1, 0))).astype(np.float32)
            u = pad_image(u, ((0, 0), (1, 0))).astype(np.float32)
            print("Padded horizontally")

        # Pad for FFT
        pad = np.floor(k / 2).astype(int)
        u = pad_image(u, (pad, pad))

        # Make a non-blind Richardson-Lucy deconvolution on the RGB signal
        dc.richardson_lucy_MM(im,
                              u,
                              psf_loc,
                              bias,
                              im.shape[0],
                              im.shape[1],
                              3,
                              k,
                              int(iterations / i),
                              step,
                              l,
                              epsilon,
                              neighbours,
                              blind=False)

        # Unpad FFT because this image is resized/reused the next step
        u = u[pad:-pad, pad:-pad, ...]

        # Unpad oddity for same reasons
        if vert_odd:
            u = u[1:, :, ...]

        if hor_odd:
            u = u[:, 1:, ...]
    """ 
            
    # Pad to ensure oddity
    if pic.shape[0] % 2 == 0:
        hor_odd = True
        im = pad_image(im, ((1, 0), (0, 0))).astype(np.float32)
        u = pad_image(u, ((1, 0), (0, 0))).astype(np.float32)
        print("Padded vertically")

    if pic.shape[1] % 2 == 0:
        vert_odd = True
        im = pad_image(im, ((0, 0), (1, 0))).astype(np.float32)
        u = pad_image(u, ((0, 0), (1, 0))).astype(np.float32)
        print("Padded horizontally")
        
    # Pad for FFT
    pad = np.floor(MK / 2).astype(int)
    u = pad_image(u, (pad, pad))
        
    # Make a non-blind Richardson-Lucy deconvolution on the RGB signal
    dc.richardson_lucy_MM(pic, u, psf, bias, M, N, 3, k, iterations, step, l, epsilon, neighbours, blind=False)

    # Unpad FFT because this image is resized/reused the next step
    u = u[pad:-pad, pad:-pad, ...]

    # Unpad oddity for same reasons
    if vert_odd:
        u = u[1:, :, ...]

    if hor_odd:
        u = u[:, 1:, ...]
    """

    # Unsharp mask to boost a bit the sharpness
    u = (1 + sharpness) * u - sharpness * pic

    # if the picture has been padded to make it odd, unpad it to get the original size
    if odd_hor:
        u = u[:, 1:, ...]
    if odd_vert:
        u = u[1:, :, ...]

    # Clip extreme values
    np.clip(u, 0, 1, out=u)

    # Convert to 16 bits RGB
    u = u * (2**16 - 1)

    # Save the pic
    utils.save(u, filename, dest_path)

    # Save the FFTW wisdom for later use
    with open('fftw_wisdom.pickle', 'wb') as f:
        pickle.dump(pyfftw.export_wisdom(), f)
def deblur_module(pic,
                  filename,
                  dest_path,
                  blur_width,
                  confidence=10,
                  tolerance=1,
                  quality="normal",
                  bits=8,
                  mask=None,
                  display=True,
                  blur="static",
                  preview=False,
                  p=1,
                  order=2,
                  norm=1,
                  priority=0,
                  mask_size=255,
                  iterations=200,
                  refocus=False):
    """
    API to call the debluring process

    :param pic: an image memory object, from PIL or tifffile
    :param filename: string, the name of the file to save
    :param dest_path: string, the path where to save the file
    :param blur_width: integer, the diameter of the blur e.g. the size of the PSF
    :param confidence: float, default 1, max 100, set the confidence you have in your sample. For example, on noisy pictures,
    use 1 to 10. For a clean low-ISO picture, you can go all the way to 100. A low factor will reduce the convergence, a high
    factor will allow more noise amplification.
    :param tolerance: float, between 0 and 100. The amount of error you can accept in the solution in %.
    :param bits: integer, default is 8 meaning the input image is encoded with 8 bits/channel. Use 16 if you input 16 bits
    tiff files.
    :param mask: list of 2 integers, the center of th region on which the blur will be estimated to speed-up the process.
    :param display: Pop-up a control window at the end of the blur estimation to check the solution before runing it on
    the whole picture
    :param p: float, the power of the Total Variation used to regularize the deblurring. Set > 2 to increase the convergence rate but this might favor the blurry picture as well.
    It will be refined during the process anyway.
    :return:
    """
    # TODO : refocus http://web.media.mit.edu/~bandy/refocus/PG07refocus.pdf
    # TODO : extract foreground only https://docs.opencv.org/3.0-beta/doc/py_tutorials/py_imgproc/py_grabcut/py_grabcut.html#grabcut

    pic = np.ascontiguousarray(pic, dtype=np.float32)

    # Extrapad for safety
    pic = pad_image(pic, (1, 1)).astype(np.float32)

    # Set the bit-depth
    samples = 2**bits - 1

    # Rescale the RGB values between 0 and 1
    pic = pic / samples

    # Undo the gamma corrector
    pic = pic**(1 / 2.2)

    # Map the quality to gradient descent step
    if quality == "normal":
        step = 1e-3
    elif quality == "high":
        step = 5e-4
    elif quality == "veryhigh":
        step = 1e-4
    elif quality == "low":
        step = 5e-3

    # Blur verifications
    if blur_width < 3:
        raise ValueError("The blur width should be at least 3 pixels.")
    elif blur_width % 2 == 0:
        raise ValueError("The blur width should be odd. You can use %i." %
                         (blur_width + 1))

    #TODO : automatically evaluate blur size : https://www.researchgate.net/publication/257069815_Blind_Deconvolution_of_Blurred_Images_with_Fuzzy_Size_Detection_of_Point_Spread_Function

    # Get the dimensions once for all
    MK = blur_width  # PSF size
    M = pic.shape[0]  # Image height
    N = pic.shape[1]  # Image width
    C = 3  # RGB channels

    # Define a minimum mask size for the blind deconvolution
    if mask is None:
        # By default, set the mask in the center of the picture
        mask = [M // 2, N // 2]

    # Create the coordinates of the masking box
    top = mask[0] - mask_size // 2
    bottom = mask[0] + mask_size // 2
    left = mask[1] - mask_size // 2
    right = mask[1] + mask_size // 2

    print("Mask size :", (bottom - top + 1), "×", (right - left + 1))

    if top > 0 and bottom < M and left > 0 and right < N:
        pass
    else:
        raise ValueError(
            "The mask is outside the picture boundaries. Move its center inside or reduce the blur size."
        )

    # Adjust the blur type.
    # For motion blur, we enforce the RGB of the PSF to have the same coefficients
    # This is to help the solver converge.
    if blur == "static":
        correlation = False
    elif blur == "motion":
        correlation = True

    # Rescale the tolerance
    tolerance /= 100.

    # Make the picture dimensions odd to avoid ringing on the border of even pictures. We just replicate the last row/column
    odd_vert = False
    odd_hor = False

    if pic.shape[0] % 2 == 0:
        pic = pad_image(pic, ((1, 0), (0, 0))).astype(np.float32)
        odd_vert = True
        print("Padded vertically")

    if pic.shape[1] % 2 == 0:
        pic = pad_image(pic, ((0, 0), (1, 0))).astype(np.float32)
        odd_hor = True
        print("Padded horizontally")

    # Construct a uniform PSF : ones everywhere
    psf = utils.uniform_kernel(blur_width)
    psf = np.dstack((psf, psf, psf))

    # Build the pyramid
    images, kernels = build_pyramid(blur_width, confidence)

    # Convergence flag
    # When a pyramid step does not converge inside the amount of iterations fixed
    # it's usually because the blur size is ill-chosen
    # so the convergence flag is raised and the deconvolution is stopped
    convergence_flag = False

    try:

        # Launch the pyramid deconvolution
        for case in ["blind", "non-blind"]:
            print("\n===== %s DECONVOLUTION =====" % case)

            deblured_image = pic.copy()

            # The algorithm is designed to make lambda converge no matter what
            # But a kick on the right path is nice
            lambd = confidence * 1000
            p_temp = p
            norm_temp = norm

            for i, k in zip(reversed(images), reversed(kernels)):
                # Still not sure if we should process the pyramid in a non-blind setup
                if True:  #case == "blind" or i == 1.:
                    print("======== Pyramid step %1.3f ========" % i)

                    # Compute the new sizes of the mask
                    temp_top = int(i * top)
                    temp_bottom = int(i * bottom)
                    temp_left = int(i * left)
                    temp_right = int(i * right)

                    # Make sure the mask dimensions will be odd and square
                    if int(temp_bottom - temp_top) % 2 == 0:
                        if int(temp_bottom - temp_top) < int(temp_right -
                                                             temp_left):
                            temp_bottom += 1
                        elif int(temp_bottom - temp_top) > int(temp_right -
                                                               temp_left):
                            temp_top += 1
                        else:
                            temp_top -= 1

                    if int(temp_right - temp_left) % 2 == 0:
                        if int(temp_bottom - temp_top) < int(temp_right -
                                                             temp_left):
                            temp_left += 1
                        elif int(temp_bottom - temp_top) > int(temp_bottom -
                                                               temp_top):
                            temp_right += 1
                        else:
                            temp_right -= -1

                    # Compute the new size of the picture
                    temp_width = int(np.floor(i * N))
                    temp_height = int(np.floor(i * M))

                    # Ensure oddity on the picture
                    if temp_width % 2 == 0:
                        temp_width += 1
                    if temp_height % 2 == 0:
                        temp_height += 1

                    shape = (temp_height, temp_width, 3)

                    # Resize blured, deblured images and PSF from previous step
                    temp_blurry_image = resize(pic,
                                               shape,
                                               order=3,
                                               mode="edge",
                                               preserve_range=True).astype(
                                                   np.float32)
                    deblured_image = resize(deblured_image,
                                            shape,
                                            order=3,
                                            mode="edge",
                                            preserve_range=True).astype(
                                                np.float32)

                    if case == "blind":
                        psf_copy = resize(psf, (k, k, 3),
                                          order=3,
                                          mode="edge",
                                          preserve_range=True).astype(
                                              np.float32)
                        dc.normalize_kernel(psf_copy, k)
                    else:
                        psf_copy = psf.copy()
                        k = kernels[0]

                    # Extra safety padding - Remember the gradient is not evaluated on borders
                    temp_blurry_image = pad_image(temp_blurry_image,
                                                  (1, 1)).astype(np.float32)
                    deblured_image = pad_image(deblured_image,
                                               (1, 1)).astype(np.float32)

                    # Pad for FFT
                    pad = int(np.floor(k / 2))

                    # Debug
                    print("Image size", temp_blurry_image.shape)
                    print("u size", deblured_image.shape)
                    print("Mask size", (temp_bottom - temp_top),
                          (temp_right - temp_left))
                    print("PSF size", psf_copy.shape)

                    # Disallow tolerance on lower pyramid sizes
                    # This because creating noise while downscaled will result in smudges when upscaled
                    if i == 1.:
                        tolerance_temp = tolerance
                    else:
                        tolerance_temp = 0

                    # Make a blind Richardson-Lucy deconvolution on the RGB signal
                    if case == "blind":
                        deblured_image[temp_top - 1:temp_bottom + 1,
                                       temp_left - 1:temp_right + 1,
                                       ...] = dc.richardson_lucy_MM(
                                           temp_blurry_image[temp_top -
                                                             1:temp_bottom + 1,
                                                             temp_left -
                                                             1:temp_right + 1,
                                                             ...],
                                           deblured_image[temp_top - pad -
                                                          1:temp_bottom + pad +
                                                          1, temp_left - pad -
                                                          1:temp_right + pad +
                                                          1, ...],
                                           psf_copy,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           0,
                                           temp_bottom - temp_top + 2,
                                           temp_right - temp_left + 2,
                                           3,
                                           k,
                                           iterations,
                                           step,
                                           lambd,
                                           blind=True,
                                           p=p_temp,
                                           correlation=correlation,
                                           order=order,
                                           norm=2,
                                           priority=0,
                                           refocus=refocus)
                        # Update the PSF
                        psf = psf_copy.copy()

                    elif case != "blind" and preview:
                        deblured_image[temp_top - 1:temp_bottom + 1,
                                       temp_left - 1:temp_right + 1,
                                       ...] = dc.richardson_lucy_MM(
                                           temp_blurry_image[temp_top -
                                                             1:temp_bottom + 1,
                                                             temp_left -
                                                             1:temp_right + 1,
                                                             ...],
                                           deblured_image[temp_top - pad -
                                                          1:temp_bottom + pad +
                                                          1, temp_left - pad -
                                                          1:temp_right + pad +
                                                          1, ...],
                                           psf_copy,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           tolerance_temp,
                                           temp_bottom - temp_top + 2,
                                           temp_right - temp_left + 2,
                                           3,
                                           k,
                                           iterations,
                                           step,
                                           lambd,
                                           blind=False,
                                           p=p_temp,
                                           order=order,
                                           norm=2,
                                           priority=priority,
                                           refocus=refocus)
                    else:
                        # Pad for FFT
                        deblured_image = pad_image(
                            deblured_image, (pad, pad)).astype(np.float32)
                        deblured_image[pad:-pad, pad:-pad,
                                       ...] = dc.richardson_lucy_MM(
                                           temp_blurry_image,
                                           deblured_image,
                                           psf_copy,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           pad + 1,
                                           temp_bottom - temp_top - pad - 1,
                                           tolerance_temp,
                                           temp_height + 2,
                                           temp_width + 2,
                                           3,
                                           k,
                                           iterations,
                                           step,
                                           lambd,
                                           blind=False,
                                           p=p_temp,
                                           order=order,
                                           norm=2,
                                           priority=priority,
                                           refocus=refocus)

                        # Unpad FFT because this image is resized/reused the next step
                        deblured_image = deblured_image[pad:-pad, pad:-pad,
                                                        ...]

                    if convergence_flag:
                        raise RuntimeError(
                            "The optimization didn't converge. It usually means your blur size is ill-chosen."
                        )

                    # Remove the extra safety padding
                    temp_blurry_image = temp_blurry_image[1:-1, 1:-1, ...]
                    deblured_image = deblured_image[1:-1, 1:-1, ...]

                    # Update the norm
                    norm_temp /= 2

                    k_prec = k

            # Display the control preview
            if display and case == "blind":
                psf_check = (psf - np.amin(psf)) / (np.amax(psf) -
                                                    np.amin(psf))
                plt.imshow(psf_check,
                           interpolation="lanczos",
                           filternorm=1,
                           aspect="equal",
                           vmin=0,
                           vmax=1)
                plt.show()
                plt.imshow((deblured_image[top:bottom, left:right, ...] *
                            255).astype(np.uint8),
                           interpolation="lanczos",
                           filternorm=1,
                           aspect="equal",
                           vmin=0,
                           vmax=255)
                plt.show()

    except KeyboardInterrupt:
        # Nasty trick to be able to hard-shutdown the iterations and still get the output
        # Don't try this at home
        # Seriously, don't.
        pass

    # Clip extreme values
    np.clip(deblured_image, 0., 1., out=deblured_image)

    # Redo the gamma corrector
    deblured_image = deblured_image**(2.2)

    # Convert to 16 bits RGB
    deblured_image = deblured_image * (2**16 - 1)

    # Save the pic
    if preview:
        filename = filename + "-preview"
        deblured_image = deblured_image[top:bottom, left:right, ...]
    else:
        # if the picture has been padded to make it odd, unpad it to get the original size
        if odd_hor:
            deblured_image = deblured_image[:, 1:, ...]
        if odd_vert:
            deblured_image = deblured_image[1:, :, ...]

        # Remove the extra pad
        deblured_image = deblured_image[1:-1, 1:-1, ...]

    utils.save(deblured_image, filename, dest_path)