Beispiel #1
0
def load_image():
    '''Loads the airplane and Brussels Image'''
    #Image Path
    img1_path = "/home/harshbhate/Pictures/airplane_downsample_gray_square.jpg"
    img2_path = "/home/harshbhate/Pictures/brussels_downsample_gray_square.jpg"
    #Read Image
    IMG1 = dip.image_io.im_read(img1_path)
    IMG2 = dip.image_io.im_read(img2_path)
    #Convert to float
    IMG1 = dip.im_to_float(IMG1)
    IMG2 = dip.im_to_float(IMG2)
    #Normalize
    IMG1 *= 255
    IMG2 *= 255
    #Return
    return [IMG1, IMG2]
def sharpen(im, kernel):

    if im.dtype != np.uint8:
        print('Fix data type to uint8')
        return

    #Convert to float and scale to 255
    im = dip.im_to_float(im)
    im *= 255
    im_lap = im

    im_lap = dip.convolve2d(im_lap, kernel)
    # Shift up to remove negative values
    im_lap = im_lap - np.min(im_lap)
    # Rescale to range of integers
    im_lap = 255 * (im_lap / np.max(im_lap))

    #crop to original size
    im_lap = dip.resample(im_lap, np.array([[1, 0], [0, 1]]), crop=True, crop_size=(im.shape[0], im.shape[1]))

    # Add laplacian back in, normalize, convert to uint8
    im_n = im + im_lap
    im_n = im_n - np.min(im_n)
    im_n = 255 * im_n / np.max(im_n)
    im_n = im_n.astype(np.uint8)
    im_lap = im_lap.astype(np.uint8)

    return im_n, im_lap
 def fast_multiplicative_restore(self,
                                 degraded_image,
                                 h_param=35,
                                 search_window_size=51):
     int_image = dip.float_to_im(degraded_image)
     return dip.im_to_float(
         cv2.fastNlMeansDenoising(int_image,
                                  h=h_param,
                                  searchWindowSize=search_window_size))
 def param_search_multiplicative_restore(self, degraded_image,
                                         original_image):
     int_image = dip.float_to_im(degraded_image)
     psnr_max = None
     best_denoise = None
     for i in range(1, 25):
         cur_denoised = dip.im_to_float(
             cv2.fastNlMeansDenoising(int_image, h=i, searchWindowSize=31))
         cur_psnr = dip.PSNR(original_image, cur_denoised)
         if psnr_max is None or cur_psnr > psnr_max:
             best_denoise = cur_denoised
             psnr_max = cur_psnr
     return best_denoise
Beispiel #5
0
def basic_image_ip(im_path, args, convert_to_float = True, normalize = True):
    '''Function to read image, convert to float and normalize'''
    if (os.path.exists(im_path)):
        X = dip.image_io.im_read(im_path)
    else:
        print (bcolors.FAIL+"File Path not found, aborting!"+bcolors.ENDC)
        sys.exit()
    if args.verbose:
        print (bcolors.OKGREEN+"Converted Image to Float"+bcolors.ENDC)
    if (convert_to_float):
        X = dip.im_to_float(X)
    if (convert_to_float and normalize):
        if args.verbose:
            print ("Normalizing")
        X *= 255
    return X
def load_image(args, convert_to_float=True, normalize=True):
    '''Loading the image and converting to float'''
    if args.verbose:
        msg1 = bcolors.OKBLUE + "Loading the image, converting to float and normalizing" + bcolors.ENDC
        msg2 = bcolors.OKBLUE + "Loading the image, converting to float" + bcolors.ENDC
        msg3 = bcolors.OKBLUE + "Loading the image" + bcolors.ENDC
        if (convert_to_float and normalize):
            print(msg1)
        elif (convert_to_float and not normalize):
            print(msg2)
        else:
            print(msg3)
    if (os.path.exists(args.path)):
        X = dip.image_io.im_read(args.path)
    else:
        print(bcolors.FAIL + "File Path not found, aborting!" + bcolors.ENDC)
        sys.exit()
    if (convert_to_float):
        X = dip.im_to_float(X)
    if (convert_to_float and normalize):
        X *= 255
    return X
Beispiel #7
0
def open_image(file):
    three_layer_image = dip.im_to_float(dip.im_read(file))
    return np.dot(three_layer_image[..., :3], [0.2989, 0.5870, 0.1140])
Beispiel #8
0
Problem Number: 2
"""

import dippykit as dip
import numpy as np

picture_link = "/home/harshbhate/Pictures/cameraman.tif"
save_link_1 = "/home/harshbhate/Pictures/cameraman_add.tif"
save_link_2 = "/home/harshbhate/Pictures/cameraman_square.tif"
save_link_3 = "/home/harshbhate/Pictures/cameraman_fourier.tif"

#(c) Reading an image
X = dip.im_read(picture_link)

#(d) Converting the image to normalized floating point space
X = dip.im_to_float(X)
X *= 255

#(e) Adding Constant to Image
Y = X + 75

#(f) Renormalize the image and covert to integer
Y = dip.float_to_im(Y/255)

#(g) Writing an image to disk
dip.im_write(Y, save_link_1)

#(h) Square Intenstiy and write image to disk
Z = X**2
Z = dip.float_to_im(Z/255)
Z = dip.im_write(Z, save_link_2)
def run_optical_flow(filepath_ind: int, OF_alg: int, param: int=100,
                     display: bool=True):

    frame_1 = dip.im_read(filepaths[filepath_ind] + 'frame1.png')[:, :, :3]
    frame_2 = dip.im_read(filepaths[filepath_ind] + 'frame2.png')[:, :, :3]
    residual = np.abs(frame_1.astype(float) - frame_2.astype(float)) \
            .astype(np.uint8)
    frame_1_gray = dip.rgb2gray(frame_1)
    frame_2_gray = dip.rgb2gray(frame_2)
    PSNR_val = dip.PSNR(frame_1_gray, frame_2_gray)
    if display:
        # Plot the initial images
        dip.figure()
        dip.subplot(1, 3, 1)
        dip.imshow(frame_1)
        dip.title('Frame 1', fontsize='x-small')
        dip.subplot(1, 3, 2)
        dip.imshow(frame_2)
        dip.title('Frame 2', fontsize='x-small')
        dip.subplot(1, 3, 3)
        dip.imshow(residual)
        dip.title('Residual - PSNR: {:.2f} dB'.format(PSNR_val), fontsize='x-small')

    # Convert to grayscale for analysis
    frame_1 = dip.im_to_float(frame_1_gray)
    frame_2 = dip.im_to_float(frame_2_gray)
    start_time = default_timer()

    # ============================ EDIT THIS PART =============================
    mask_x = np.array([[-1, 1], [-1,1]])
    mask_y = np.array([[-1, -1], [1,1]])
    mask_t_2 = np.array([[-1, -1], [-1,-1]])
    mask_t_1 = np.array([[1, 1], [1,1]])
    dIx = dip.convolve2d(frame_1, mask_x, mode='same', like_matlab=True)
    dIy = dip.convolve2d(frame_1, mask_y, mode='same', like_matlab=True)
    dIt = dip.convolve2d(frame_1, mask_t_1, mode='same', like_matlab=True) + dip.convolve2d(frame_2, mask_t_2, mode='same', like_matlab=True)
    
    # ==========!!!!! DO NOT EDIT ANYTHING BELOW THIS !!!!!====================

    # Instantiate blank u and v matrices
    u = np.zeros_like(frame_1)
    v = np.zeros_like(frame_1)

    if 0 == OF_alg:
        print('The optical flow is estimated using Horn-Schuck...')
        u, v = horn_schuck(u, v, dIx, dIy, dIt, param)
    elif 1 == OF_alg:
        print('The optical flow is estimated using Lucas-Kanade...')
        u, v = lucas_kanade(u, v, dIx, dIy, dIt, param)
    else:
        raise ValueError('OF_alg must be either 0 or 1')

    end_time = default_timer()

    # Determine run time
    duration = end_time - start_time
    clock = [int(duration // 60), int(duration % 60)]
    print('Flow estimation time was {} minutes and {} seconds'
            .format(*clock))

    # Downsample for better visuals
    stride = 10
    m, n = frame_1.shape
    x, y = np.meshgrid(range(n), range(m))
    x = x.astype('float64')
    y = y.astype('float64')

    # Downsampled u and v
    u_ds = u[::stride, ::stride]
    v_ds = v[::stride, ::stride]

    # Coords for downsampled u and v
    x_ds = x[::stride, ::stride]
    y_ds = y[::stride, ::stride]

    # Estimated flow
    estimated_flow = np.stack((u, v), axis=2)

    # Read file for ground truth flow
    ground_truth_flow = read_flow_file(filepaths[filepath_ind] + 'flow1_2.flo')
    u_gt_orig = ground_truth_flow[:, :, 0]
    v_gt_orig = ground_truth_flow[:, :, 1]
    u_gt = np.where(np.isnan(u_gt_orig), 0, u_gt_orig)
    v_gt = np.where(np.isnan(v_gt_orig), 0, v_gt_orig)


    # Downsampled u_gt and v_gt
    u_gt_ds = u_gt[::stride, ::stride]
    v_gt_ds = v_gt[::stride, ::stride]
    if display:
        # Plot the optical flow field
        dip.figure()
        dip.subplot(2, 2, 1)
        dip.imshow(frame_2, 'gray')
        dip.quiver(x_ds, y_ds, u_ds, v_ds, color='r')
        dip.title('Estimated', fontsize='x-small')
        dip.subplot(2, 2, 2)
        dip.imshow(frame_2, 'gray')
        dip.quiver(x_ds, y_ds, u_gt_ds, v_gt_ds, color='r')
        dip.title('Ground truth', fontsize='x-small')
        # Draw colored velocity flow maps
        dip.subplot(2, 2, 3)
        dip.imshow(flow_to_color(estimated_flow))
        dip.title('Estimated', fontsize='x-small')
        dip.subplot(2, 2, 4)
        dip.imshow(flow_to_color(ground_truth_flow))
        dip.title('Ground truth', fontsize='x-small')

    # Normalization for metric computations
    normalize = lambda im: (im - np.min(im)) / (np.max(im) - np.min(im))
    un = normalize(u)
    un_gt = normalize(u_gt)
    un_gt[np.isnan(u_gt_orig)] = 1
    vn = normalize(v)
    vn_gt = normalize(v_gt)
    vn_gt[np.isnan(v_gt_orig)] = 1

    # Error calculations and displays
    EPE = ((un - un_gt) ** 2 + (vn - vn_gt) ** 2) ** 0.5
    AE = np.arccos(((un * un_gt) + (vn * vn_gt) + 1) /
                   (((un + vn + 1) * (un_gt + vn_gt + 1)) ** 0.5))
    EPE_nan_ratio = np.sum(np.isnan(EPE)) / EPE.size
    AE_nan_ratio = np.sum(np.isnan(AE)) / AE.size
    EPE_inf_ratio = np.sum(np.isinf(EPE)) / EPE.size
    AE_inf_ratio = np.sum(np.isinf(AE)) / AE.size
    print('Error nan ratio: EPE={:.2f}, AE={:.2f}'
            .format(EPE_nan_ratio, AE_nan_ratio))
    print('Error inf ratio: EPE={:.2f}, AE={:.2f}'
            .format(EPE_inf_ratio, AE_inf_ratio))
    EPE_avg = np.mean(EPE[~np.isnan(EPE)])
    AE_avg = np.mean(AE[~np.isnan(AE)])
    print('EPE={:.2f}, AE={:.2f}'.format(EPE_avg, AE_avg))

    if display:
        dip.show()

    return clock, EPE_avg, AE_avg
Beispiel #10
0
import dippykit as dip
import numpy as np


# Part (a)
I1 = dip.im_read('/home/harshbhate/Pictures/lena.png')  # Specify your image here
I1 = dip.im_to_float(I1) #Converting image to float
I1 *= 255 #Normalizing
# Part (b)
# Take the Fourier transform of the image
H1 = dip.fft2(I1)  # Fourier transform of I1
H1 = dip.fftshift(H1)
H1 = np.log(np.abs(H1))
print ("Shape of I1:"+str(I1.shape))
print("Shape of H1"+str(H1.shape))
# Part (c)
# Downsample the image by 2 in both directions (and take its Fourier transform)
x_scaling = 2
y_scaling = 2
sampling_matrix = np.array([[x_scaling, 0],[0, y_scaling]])
I2 = dip.sampling.resample(I1, sampling_matrix)  # Downsampled I1
H2 = dip.fft2(I2)  # Fourier transform of I2
# Part (d)
# Pad the downsampled image's spectrum (H2) with zeros and then take its
# inverse Fourier transform
H3 = np.pad(H2,(128, 128), 'constant', constant_values = (0,0))  # Zero-padded H2
I3 = np.abs(dip.ifft2(H3)) # Interpolated image
I3 = I3/(np.amax(I3))*255   #Normalizing
#Converting everything back to int and normalizing
I1 = dip.float_to_im(I1/255)
I2 = dip.float_to_im(I2/255)
Beispiel #11
0
def read_image(img_path):
    '''Function to read image, convert to float and normalize'''
    X = dip.image_io.im_read(img_path)
    X = dip.im_to_float(X)
    X *= 255
    return X
Beispiel #12
0
 def open_image_file_as_matrix(self, image_path):
     im = dip.im_read(image_path)
     float_im = dip.im_to_float(im)
     gray_im = self.rgb_to_gray(float_im)
     return gray_im
Beispiel #13
0
import dippykit as dip
import numpy as np
import matplotlib.pyplot as plt
import os

path = "/Users/chuchu/Dropbox/gt_exp/ddb1_fundusimages/"  # Change the path to where you store the images
modes = [2, 3, 4]
for m in range(len(modes)):
    dir = 'upsample_img/'+str(modes[m]);
    if not os.path.exists(dir):
        os.makedirs(dir)

for m in range(len(modes)):
    for filename in os.listdir(path):
        mode = modes[m]
        file_path = path + filename
        f = dip.im_read(file_path)  # read icmage
        f = dip.im_to_float(f)
        M = np.array([[1/mode, 0],
                      [0, 1/mode]])
        f_up_0 = dip.sampling.resample(f[:,:,0], M, interp= 'bilinear')
        f_up_1 = dip.sampling.resample(f[:, :, 1], M, interp='bilinear')
        f_up_2 = dip.sampling.resample(f[:, :, 2], M, interp='bilinear')
        h, l = f_up_0.shape
        f_up = np.zeros((h, l, 3))
        f_up[:,:,0] = f_up_0
        f_up[:, :, 1] = f_up_1
        f_up[:, :, 2] = f_up_2
        upsample_img = 'upsample_img/' + str(mode)+ '/' +filename[:-4] + '_up' + str(mode) + '.jpg'
        save_im = dip.float_to_im(f_up)
        dip.im_write(save_im, upsample_img)
Beispiel #14
0
import dippykit as dip
from math import exp
import numpy as np

# Proof of concept low pass filter
dim_filter = 800

h = np.zeros((dim_filter, dim_filter))
for u in range(dim_filter):
    for v in range(dim_filter):
        h[u][v] = exp(-(u + v) / (dim_filter * 0.3))

# Loading image
im = dip.im_read('images/UW_400.png')
im = dip.im_to_float(im)

if 2 < im.ndim:
    im = np.mean(im, axis=2)

F = dip.fft2(im)
print(h * F)

#Plot results
#Original spectra
dip.figure(1)

dip.subplot(2, 2, 1)
dip.imshow(im, 'gray')
dip.title('Original Image')
Beispiel #15
0
## Define Down Sampling Matrix
D = 8
Down_Sampled_Matrix = np.array([[D, 0], [0, D]])

# Generate Training Set
Training_images = np.zeros([100, 64, 64])
Training_labels = np.zeros([100, 1])

if mode == 0:

    for i in range(50):

        path1 = "Tumor_Images/" + str(i + 1) + ".png"
        path2 = "Non_Tumor_Images/" + str(i + 1) + ".png"
        Training_images[i, :, :] = dip.resample(
            dip.im_to_float(dip.im_read(path1)), Down_Sampled_Matrix)
        Training_images[i + 50, :, :] = dip.resample(
            dip.im_to_float(dip.im_read(path2)), Down_Sampled_Matrix)
        Training_labels[i] = 1
        Training_labels[i + 50] = 0

elif mode == 1:

    for i in range(50):

        path1 = "Tumor_Images/" + str(i + 1) + ".png"
        path2 = "Non_Tumor_Images/" + str(i + 1) + ".png"

        X1 = SkullAndShape(cv2.imread(path1))
        X2 = SkullAndShape(cv2.imread(path1))
    def multiplicative_clustering_restore(self, degraded_image):
        block_size = 16

        max_h_value = 30

        blocks = self.break_image_into_blocks(degraded_image, block_size)
        variances = self.get_variances_of_blocks(blocks)
        medians = self.get_statistic_of_blocks(blocks, np.median)
        means = self.get_means_of_blocks(blocks)
        maxs = self.get_statistic_of_blocks(blocks,
                                            lambda b: np.percentile(b, 55))
        mins = self.get_statistic_of_blocks(blocks,
                                            lambda b: np.percentile(b, 45))

        scaler = preprocessing.StandardScaler()
        data = []
        for i in range(len(blocks)):
            data.append([
                variances[i],
                np.abs(medians[i] - means[i]), maxs[i] * 1.3, mins[i] * 1.3
            ])

        scaler.fit(data)

        output_image = np.zeros(
            [degraded_image.shape[0], degraded_image.shape[1]])
        cluster_image = np.zeros(
            [degraded_image.shape[0], degraded_image.shape[1]])
        h_param_image = np.zeros(degraded_image.shape)
        ch = ClusteringHandler(data)
        ch.cluster_data()
        clustered_labels = ch.labels
        cluster_centers = np.asarray(ch.cluster_centers)

        diff_c = cluster_centers[:, 2] - cluster_centers[:, 3]
        mean_c = cluster_centers[:, 1]
        median_c = cluster_centers[:, 1]
        var_centers = cluster_centers[:, 0]
        average_var = np.mean(var_centers)

        h_params = np.linspace(max_h_value, max_h_value, ch.num_clusters)
        window_sizes = [21 for i in range(ch.num_clusters)]
        count = 0

        h_params = [
            a for _, a in sorted(zip(var_centers, h_params), reverse=True)
        ]
        h_params = var_centers**.5 * diff_c**.55 * 20 * 25 * 23 * 30 * 15 / 30 / 300

        for m in range(0, degraded_image.shape[0], block_size):
            for n in range(0, degraded_image.shape[1], block_size):
                cur_percentile = clustered_labels[
                    m // block_size * (degraded_image.shape[0] // block_size) +
                    (n // block_size)]
                cluster_image[m:m + block_size, n:n +
                              block_size] = cur_percentile / ch.num_clusters
                h_param_image[m:m + block_size,
                              n:n + block_size] = h_params[cur_percentile]

        # dip.im_write(dip.float_to_im(h_param_image / np.max(h_param_image)), "./unsmoothed_hparams.jpg")
        h_param_image = self.blur_borders(h_param_image, cluster_image)
        # dip.im_write(dip.float_to_im(h_param_image / np.max(h_param_image)), "./smoothed.jpg")

        h_param_count = 0
        all_temp_outputs = []
        h_param_list = np.unique(h_param_image.flatten())

        processes = []
        manager = Manager()
        return_dict = manager.dict()
        blocked_pad_size = 32
        blocked_image = self.create_surrounding_block_fast(
            degraded_image, pad_width=blocked_pad_size)

        process_count = 0
        for c in h_param_list:
            temp_output_image = self.fast_multiplicative_restore(
                blocked_image, h_param=c, search_window_size=21
            )[blocked_pad_size:degraded_image.shape[0] + blocked_pad_size,
              blocked_pad_size:degraded_image.shape[1] + blocked_pad_size]
            return_dict[c] = temp_output_image
            h_param_count += 1

        output_image = output_image.flatten()
        h_param_image = h_param_image.flatten()
        for cur_param in return_dict.keys():
            idx = h_param_image == cur_param
            return_dict[cur_param] = return_dict[cur_param].flatten()
            output_image[idx] = return_dict[cur_param][idx]
        return dip.im_to_float(
            dip.float_to_im(output_image.reshape(
                degraded_image.shape))), cluster_image, h_params