Пример #1
0
def OpenAndPreProcessImage(path,
                           norm=True,
                           blur=None,
                           copyOrig=False,
                           quantize=None):
    im = Image.open(path).convert('L')
    im = np.asarray(im)  #[125:375,125:375] #Take a smaller region for speed

    # Also return an unprocessed copy of original image, if required
    im_orig = None
    if copyOrig:
        im_orig = im.copy()

    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)
    # Normalize
    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im / 255.  #convert to floats without normalizing

    #Quantize into a number of intensity bins
    if quantize:
        im = np.rint(im * (quantize - 1)) / (quantize - 1)

    return im, im_orig
def own_canny(image, sigma=0):
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    # compute the median of the single channel pixel intensities
    # v = np.median(image)

    # apply automatic Canny edge detection using the computed median
    # lower = int(max(0, (1.0 - sigma) * v))
    # upper = int(min(255, (1.0 + sigma) * v))

    #create gaussian filter
    sigma = np.array([[1, 0], [0, 1]])
    gaussian = gaussian_filter(1, sigma)

    #calculate gauusian deritive for fatser computation
    Sx, Sy = sobel_filters()
    aa = np.array([[1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3], [1, 2, 3]])
    bb = cross_correlation(aa, Sx)
    cc = cross_correlation(aa, Sx)
    print(bb)
    print(cc)
    exit()
    d_x = cross_correlation(gaussian, Sx)
    d_y = cross_correlation(gaussian, Sy)
    abs_d_x = cv2.convertScaleAbs(d_x)
    abs_d_y = cv2.convertScaleAbs(d_y)
    grad = cv2.addWeighted(abs_d_x, 0.5, abs_d_y, 0.5, 0)
    print(grad.shape)

    #apply gussian filter for blure image
    edged = cross_correlation(image, grad)

    # return the edged image
    return edged
Пример #3
0
def harris_method(image,
                  image_height,
                  image_width,
                  threshold,
                  is_colored=False):
    sigma = 2
    pixels = np.array(image)
    new_image = np.zeros((image_height, image_width, 3), dtype=np.uint8)
    images = sobel_detection(image, image_height, image_width, False,
                             is_colored)
    horizontal_image = images[0]
    vertical_image = images[1]
    ix_squared = horizontal_image * horizontal_image
    ix_squared = gaussian_filter(ix_squared, image_height, image_width, sigma,
                                 False, False, 7)
    iy_squared = vertical_image * vertical_image
    iy_squared = gaussian_filter(iy_squared, image_height, image_width, sigma,
                                 False, False, 7)
    cross_product = horizontal_image * vertical_image
    cross_product = gaussian_filter(cross_product, image_height, image_width,
                                    sigma, False, False, 7)
    cross_product_squared = cross_product * cross_product
    trace = ix_squared + iy_squared
    k = 0.04
    r = ix_squared * iy_squared - cross_product_squared - k * (trace * trace)
    # min_value = int(np.max(r) * threshold)
    min_value = threshold
    for y in range(0, image_height):
        for x in range(0, image_width):
            if r[y, x] >= min_value:
                new_image[y, x, 2] = constants.MAX_COLOR_VALUE
            else:
                if is_colored:
                    new_image[y, x, 0] = pixels[y, x, 0]
                    new_image[y, x, 1] = pixels[y, x, 1]
                    new_image[y, x, 2] = pixels[y, x, 2]
                else:
                    new_image[y, x, 0] = pixels[y, x]
                    new_image[y, x, 1] = pixels[y, x]
                    new_image[y, x, 2] = pixels[y, x]
    save_colored_image(new_image, save_path + "harris.ppm")
    img = Image.fromarray(new_image, 'RGB')
    img.show()
def generate_gaussian_filter_input():
    interface = InterfaceInfo.get_instance()
    if interface.current_image is not None:
        interface.delete_widgets(interface.buttons_frame)
        ttk.Label(interface.buttons_frame, text="Sigma", background=constants.TOP_COLOR).grid(row=0, column=0)
        sigma = Entry(interface.buttons_frame)
        sigma.grid(row=0, column=1)
        generate_noise = ttk.Button(interface.buttons_frame, text="Apply",
                                    command=lambda: gaussian_filter(interface.current_image, constants.WIDTH,
                                                                    constants.HEIGHT, int(sigma.get())))
        generate_noise.grid(row=1, column=0)
    else:
        interface.reset_parameters()
        messagebox.showerror(title="Error", message="You must upload an image to apply gaussian filter ")
Пример #5
0
def preProcessImage(im, norm=True, blur=None, equalize=False, quantize=None):
    """Take a uint8 ndarray representing an image and preprocess it according to arguments.
    Note: returns a floating point (0-1) image.
    """

    #Convert to float to avoid any overflow or rounding issues
    im = np.array(im, dtype='float64')
    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)

    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im/255. #convert to floats between 0 and 1 without normalizing

    if equalize:        
        im = filters.image_histogram_equalization(im)

    if quantize:
        im = np.rint(im * (quantize-1))/(quantize-1)
    
    return im
Пример #6
0
def preProcessImageAndCopy(path, norm=True, blur=None):
    im = Image.open(path).convert('L')
    im = np.asarray(im)  #[125:375,125:375] #Take a smaller region for speed

    # Keep unmodified copies of original images
    im_orig = im.copy()

    if blur and blur > 0:
        im = filters.gaussian_filter(im, blur)
    # Normalize
    if norm:
        im = filters.normalize(im, 0.0, None)
    else:
        im = im / 255.  #convert to floats without normalizing

    # Normalizing gives us values between 0 and 1. Now quantize into 4 bits
    # to reduce the amount noise in the mutual information calculation and
    # reduce the time taken to calculate it, too.
    # Instead, do this later on in the distance measure.


#    im = np.rint(im*63).astype('uint8')

    return im, im_orig
Пример #7
0
    def initialize(self, pyramid_images_output_path=None):
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(AffineTransform(self.dim))

        ### Preprocessing

        pyramid_levels = len(self.pyramid_factors)

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_resampled = filters.downsample(
                filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]),
                factor)
            flo_resampled = filters.downsample(
                filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]),
                factor)

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            ref_resampled = filters.normalize(ref_resampled, 0.0,
                                              ref_mask_resampled)
            flo_resampled = filters.normalize(flo_resampled, 0.0,
                                              flo_mask_resampled)

            if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
                scipy.misc.imsave(
                    '%sref_resampled_%d.png' %
                    (pyramid_images_output_path, i + 1), ref_resampled)
                scipy.misc.imsave(
                    '%sflo_resampled_%d.png' %
                    (pyramid_images_output_path, i + 1), flo_resampled)

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled.shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled.shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

            ref_diag = np.sqrt(
                np.square(np.array(ref_resampled.shape) *
                          self.ref_spacing).sum())
            flo_diag = np.sqrt(
                np.square(np.array(flo_resampled.shape) *
                          self.flo_spacing).sum())

            q_ref = QuantizedImage(ref_resampled,
                                   self.alpha_levels,
                                   ref_weights,
                                   self.ref_spacing * factor,
                                   remove_zero_weight_pnts=True)
            q_flo = QuantizedImage(flo_resampled,
                                   self.alpha_levels,
                                   flo_weights,
                                   self.flo_spacing * factor,
                                   remove_zero_weight_pnts=True)

            tf_ref = alpha_amd.AlphaAMD(q_ref,
                                        self.alpha_levels,
                                        ref_diag,
                                        self.ref_spacing * factor,
                                        ref_mask_resampled,
                                        ref_mask_resampled,
                                        interpolator_mode='linear',
                                        dt_fun=None,
                                        mask_out_edges=True)
            tf_flo = alpha_amd.AlphaAMD(q_flo,
                                        self.alpha_levels,
                                        flo_diag,
                                        self.flo_spacing * factor,
                                        flo_mask_resampled,
                                        flo_mask_resampled,
                                        interpolator_mode='linear',
                                        dt_fun=None,
                                        mask_out_edges=True)

            symmetric_measure = True
            squared_measure = False

            sym_dist = symmetric_amd_distance.SymmetricAMDDistance(
                symmetric_measure=symmetric_measure,
                squared_measure=squared_measure)

            sym_dist.set_ref_image_source(q_ref)
            sym_dist.set_ref_image_target(tf_ref)

            sym_dist.set_flo_image_source(q_flo)
            sym_dist.set_flo_image_target(tf_flo)

            sym_dist.set_sampling_fraction(self.sampling_fraction)

            sym_dist.initialize()

            self.distances.append(sym_dist)
    def initialize(self, pyramid_images_output_path=None):
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(AffineTransform(self.dim))

        ch = len(self.ref_im)
        #        print(ch)
        # require same number of channels
        assert (ch == len(self.flo_im))

        ref_input = self.ref_im
        flo_input = self.flo_im
        if self.channel_mode == 'decompose_pre':
            lev = None
            #lev = self.alpha_levels
            ref_input = filters.fidt(ref_input, lev)  #self.alpha_levels)
            flo_input = filters.fidt(flo_input, lev)  #self.alpha_levels)
            ch = len(ref_input)
        ### Preprocessing

        pyramid_levels = len(self.pyramid_factors)
        percentile = 0.01

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            ref_resampled = []
            flo_resampled = []

            for k in range(ch):
                ref_k = filters.downsample(
                    filters.gaussian_filter(ref_input[k],
                                            self.pyramid_sigmas[i]), factor)
                flo_k = filters.downsample(
                    filters.gaussian_filter(flo_input[k],
                                            self.pyramid_sigmas[i]), factor)
                #if self.channel_mode == 'sum':
                #ref_k = filters.normalize(ref_k, percentile, ref_mask_resampled)
                #flo_k = filters.normalize(flo_k, percentile, flo_mask_resampled)
                ref_resampled.append(ref_k)
                flo_resampled.append(flo_k)

            if self.channel_mode == 'sum' or self.channel_mode == 'decompose_pre':
                pass
            elif self.channel_mode == 'decompose':
                ref_resampled = filters.fidt(ref_resampled, self.alpha_levels)
                flo_resampled = filters.fidt(flo_resampled, self.alpha_levels)
                for k in range(len(ref_resampled)):
                    ref_resampled[k] = filters.normalize(
                        ref_resampled[k], percentile, ref_mask_resampled)
                    flo_resampled[k] = filters.normalize(
                        flo_resampled[k], percentile, flo_mask_resampled)

            #if pyramid_images_output_path is not None and ref_resampled[0].ndim == 2:
            #    scipy.misc.imsave('%sref_resampled_%d.png' % (pyramid_images_output_path, i+1), ref_resampled)
            #    scipy.misc.imsave('%sflo_resampled_%d.png' % (pyramid_images_output_path, i+1), flo_resampled)

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled[0].shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled[0].shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

            ref_diag = np.sqrt(
                np.square(np.array(ref_resampled[0].shape) *
                          self.ref_spacing).sum())
            flo_diag = np.sqrt(
                np.square(np.array(flo_resampled[0].shape) *
                          self.flo_spacing).sum())

            dists = []

            for k in range(len(ref_resampled)):
                q_ref = QuantizedImage(ref_resampled[k],
                                       self.alpha_levels,
                                       ref_weights,
                                       self.ref_spacing * factor,
                                       remove_zero_weight_pnts=True)
                q_flo = QuantizedImage(flo_resampled[k],
                                       self.alpha_levels,
                                       flo_weights,
                                       self.flo_spacing * factor,
                                       remove_zero_weight_pnts=True)

                if self.squared_measure:
                    dt_fun = alpha_amd.edt_sq
                else:
                    dt_fun = None

                tf_ref = alpha_amd.AlphaAMD(q_ref,
                                            self.alpha_levels,
                                            ref_diag,
                                            self.ref_spacing * factor,
                                            ref_mask_resampled,
                                            ref_mask_resampled,
                                            interpolator_mode='linear',
                                            dt_fun=dt_fun,
                                            mask_out_edges=True)
                tf_flo = alpha_amd.AlphaAMD(q_flo,
                                            self.alpha_levels,
                                            flo_diag,
                                            self.flo_spacing * factor,
                                            flo_mask_resampled,
                                            flo_mask_resampled,
                                            interpolator_mode='linear',
                                            dt_fun=dt_fun,
                                            mask_out_edges=True)

                symmetric_measure = True
                squared_measure = False  #self.squared_measure

                sym_dist = symmetric_amd_distance.SymmetricAMDDistance(
                    symmetric_measure=symmetric_measure,
                    squared_measure=squared_measure)

                sym_dist.set_ref_image_source(q_ref)
                sym_dist.set_ref_image_target(tf_ref)

                sym_dist.set_flo_image_source(q_flo)
                sym_dist.set_flo_image_target(tf_flo)

                sym_dist.set_sampling_fraction(self.sampling_fraction)

                sym_dist.initialize()

                dists.append(sym_dist)

            self.distances.append(dists)
Пример #9
0
    def initialize(self, pyramid_images_output_path=None, norm=True):
        """Initialize the registration framework: must be called before run().
        
        Prepare pyramid scheme by creating and saving downsampled versions of the images 
        for each pyramid level. Set up a distance measure (separate instance for each pyramid
        level, with the corresponding version of the images).
        
        Args:
            pyramid_images_output_path: slash-terminated string specifying folder in which to
            save the downsampled images. Default None. If None, images are not saved. Only 
            applicable for 2D images
            
        Other running parameters are set in __init__()
        """
        if len(self.pyramid_factors) == 0:
            self.add_pyramid_level(1, 0.0)
        if len(self.initial_transforms) == 0:
            self.add_initial_transform(transforms.AffineTransform(self.dim))
#        while len(self.opt_opts['step_length']) < len(self.pyramid_factors):
#            self.opt_opts['step_length'] = np.concatenate((self.opt_opts['step_length'], np.array([[0,1.0]])))

### Preprocessing

        pyramid_levels = len(self.pyramid_factors)

        for i in range(pyramid_levels):
            factor = self.pyramid_factors[i]

            ref_resampled = filters.downsample(
                filters.gaussian_filter(self.ref_im, self.pyramid_sigmas[i]),
                factor)
            flo_resampled = filters.downsample(
                filters.gaussian_filter(self.flo_im, self.pyramid_sigmas[i]),
                factor)

            ref_mask_resampled = filters.downsample(self.ref_mask, factor)
            flo_mask_resampled = filters.downsample(self.flo_mask, factor)

            if norm:
                ref_resampled = filters.normalize(ref_resampled, 0.0,
                                                  ref_mask_resampled)
                flo_resampled = filters.normalize(flo_resampled, 0.0,
                                                  flo_mask_resampled)

            if pyramid_images_output_path is not None and ref_resampled.ndim == 2:
                Image.fromarray(ref_resampled).convert(
                    'RGB').save(pyramid_images_output_path +
                                'ref_resampled_%d.png' % (i + 1))
                Image.fromarray(flo_resampled).convert(
                    'RGB').save(pyramid_images_output_path +
                                'flo_resampled_%d.png' % (i + 1))

            if self.ref_weights is None:
                ref_weights = np.zeros(ref_resampled.shape)
                ref_weights[ref_mask_resampled] = 1.0
            else:
                ref_weights = filters.downsample(self.ref_weights, factor)
            if self.flo_weights is None:
                flo_weights = np.zeros(flo_resampled.shape)
                flo_weights[flo_mask_resampled] = 1.0
            else:
                flo_weights = filters.downsample(self.flo_weights, factor)

#            #add default step length in case it wasn't specified,
#            #TODO can't the optimizer do this?
#            self.optimizer_opts['step_length'] = self.optimizer_opts.get('step_length',np.array([[0.1]*pyramid_levels]))

#            #DEBUG
#            if(False):
#                #Display each image with its mask to check all is ok
#                plt.subplot(121)
#                plt.imshow(np.hstack((ref_resampled, ref_mask_resampled)), cmap='gray')
#                plt.subplot(122)
#                plt.imshow(np.hstack((flo_resampled, flo_mask_resampled)), cmap='gray')
#                plt.show()
#            #END DEBUG


            dist_measure = self._make_dist_measure(ref_resampled, ref_mask_resampled, ref_weights, \
                                                  flo_resampled, flo_mask_resampled, flo_weights, factor)

            self.distances.append(dist_measure)
Пример #10
0
def laplacian_processing(gray_img, tam_kernel = 3):
    lapl = flt.laplacian_filter(tam_kernel) * np.transpose(flt.gaussian_filter(tam_kernel))
    return ndimage.convolve(gray_img, lapl)