Ejemplo n.º 1
0
def segmentation(input_met, markers, compactness):
    '''
    :param input_met: Raster 1 bande sur laquelle on veut faire la segmentation (.tif)
    :param markers: Nombre de polygone voulu dans la segmentation (int)
    :param compactness: Influence la forme des polygones, ex: 0.02 (float)
    :return: Numpy array de la segmentation

    *** EDIT: Seul la segmentation watershed est implémentée***
    '''

    # Empilage de 3x l'image à segmenter pour simuler une image RGB
    img1 = img_as_float64(io.imread(input_met))
    img2 = img_as_float64(io.imread(input_met))
    img3 = img_as_float64(io.imread(input_met))
    img = np.dstack([img1, img2, img3])

    # Segmentation
    gradient = sobel(rgb2gray(img))
    segments = watershed(
        gradient,
        markers=markers,
        connectivity=True,
        mask=None,
        compactness=compactness,
        watershed_line=False
    )  #selon la surface du feuillet, "markers" peut varier de 7000 à 8000
    return segments
Ejemplo n.º 2
0
def masking(fore: Image, back: Image, mask: Image) -> Image:
    if fore.size != back.size or back.size != mask.size:
        raise ValueError('Images must be of same dimensions')

    f = img_as_float64(fore.as_scikit())
    b = img_as_float64(back.as_scikit())
    m = img_as_float64(mask.as_scikit())

    print('mask')

    return Image.from_scikit(m * f + (1 - m) * b)
Ejemplo n.º 3
0
    def __call__(self, sample: dict) -> dict:
        image, label = sample['image'], sample['label']

        if len(image.shape) == 2:
            image = np.dstack([image] * 3)

        # swap color axis because
        # numpy image: H x W x C
        # torch image: C X H X W
        image = image.transpose((2, 0, 1))
        label = label.transpose((2, 0, 1))
        return {
            'image': torch.from_numpy(img_as_float64(image)),
            'label': torch.from_numpy(img_as_float64(label))
        }
Ejemplo n.º 4
0
def multi_band_blending(img1,
                        img2,
                        mask,
                        iterations=5,
                        m_ratio=0.8,
                        bandwidth_low=10,
                        bandwidth_high=20,
                        m_cutoff=12):
    def pyr_up(src, cutoff, ratio):
        lowpassed = cv.GaussianBlur(src, (2 * cutoff + 1, 2 * cutoff + 1),
                                    0,
                                    borderType=cv.BORDER_REFLECT101)
        src -= lowpassed
        return cv.resize(lowpassed, (0, 0), None, ratio, ratio, cv.INTER_AREA)

    def blend(src, tar, mask, bandwidth):
        mask = cv.GaussianBlur(mask, (2 * bandwidth + 1, 2 * bandwidth + 1),
                               0,
                               borderType=cv.BORDER_REFLECT101)[:, :, None]
        return src * mask + tar * (1 - mask)

    if len(mask.shape) == 3:
        mask = color.rgb2gray(mask)

    pyr_lap_1 = [util.img_as_float64(img1, force_copy=True)]
    pyr_lap_2 = [util.img_as_float64(img2, force_copy=True)]
    pyr_mask = [util.img_as_float64(mask, force_copy=True)]

    for i in range(iterations):
        pyr_lap_1.append(pyr_up(pyr_lap_1[i], m_cutoff, m_ratio))
        pyr_lap_2.append(pyr_up(pyr_lap_2[i], m_cutoff, m_ratio))
        pyr_mask.append(
            cv.resize(pyr_mask[i], (0, 0), None, m_ratio, m_ratio,
                      cv.INTER_NEAREST))

    pyr_lap_1[iterations] = blend(pyr_lap_1[iterations], pyr_lap_2[iterations],
                                  pyr_mask[iterations], bandwidth_low)

    for i in range(iterations - 1, -1, -1):
        pyr_lap_1[i] = blend(pyr_lap_1[i], pyr_lap_2[i], pyr_mask[i],
                             bandwidth_high)
        tmp = cv.resize(pyr_lap_1[i + 1],
                        pyr_lap_1[i].shape[:2][::-1],
                        interpolation=cv.INTER_AREA)
        pyr_lap_1[i] += tmp

    res = np.clip(pyr_lap_1[0], 0, 1)
    return res
Ejemplo n.º 5
0
 def image_pyramid(self, image=None, pyramid_type='gaussian', levels=1):
     '''Function to generate the Gaussian/Laplacian pyramid of an image'''
     # Validate parameters
     if image is None:
         return None
     
     image = img_as_float64(image)
     
     # Generate Gaussian Pyramid
     current_layer = image
     gaussian = [current_layer]
     for i in range(levels):
         current_layer = cv.pyrDown(current_layer)
         gaussian.append(current_layer)
         
     if pyramid_type == 'gaussian':
         return gaussian
     # Generate Laplacian Pyramid
     elif pyramid_type == 'laplacian':
         current_layer = gaussian[levels-1]
         laplacian = [current_layer]
         for i in range(levels - 1, 0, -1):
             shape = (gaussian[i-1].shape[1], gaussian[i-1].shape[0])
             expand_gaussian = cv.pyrUp(gaussian[i], dstsize=shape)
             current_layer = cv.subtract(gaussian[i-1], expand_gaussian)
             laplacian.append(current_layer)
         laplacian.reverse()
         return laplacian
Ejemplo n.º 6
0
def fixedThreshold(img, params):
    if img.ndim > 2:
        raise ValueError("Threshold only applicable to grayscale images!")

    thresh = params['thresh']
    maxval = 1
    thresh_type = params['thresh_type']
    thresh_algorithm = params['thresh_algorithm']

    thresh_type = eval('cv2.{}'.format(thresh_type))

    if thresh_algorithm == 'none':
        pass
    elif thresh_algorithm == 'otsu':
        thresh_type += cv2.THRESH_OTSU
    elif thresh_algorithm == 'triangle':
        thresh_type += cv2.THRESH_TRIANGLE

    _, retval = cv2.threshold(img, thresh, maxval, thresh_type)
    if thresh_type in [cv2.THRESH_BINARY, cv2.THRESH_BINARY_INV]:
        return img_as_bool(retval)
    elif thresh_type in [
            cv2.THRESH_TRUNC, cv2.THRESH_TOZERO, cv2.THRESH_TOZERO_INV
    ]:
        return img_as_float64(retval)
    else:
        return retval
Ejemplo n.º 7
0
def rescale(img, params):
    scale_x = params['scale_x']
    scale_y = params['scale_y']
    order = params['order']
    mode = params['mode']
    cval = params['cval']
    clip = params['clip']
    anti_aliasing = params['anti_aliasing']

    scale = (scale_x, scale_y)
    preserve_range = True
    if img.ndim == 2:
        multichannel = False
    else:
        multichannel = True

    retval = skimage.transform.rescale(img,
                                       scale,
                                       order=order,
                                       mode=mode,
                                       cval=cval,
                                       clip=clip,
                                       preserve_range=preserve_range,
                                       multichannel=multichannel,
                                       anti_aliasing=anti_aliasing)

    if img.dtype == np.uint8:
        return img_as_ubyte(retval)
    elif img.dtype == np.float64:
        return img_as_float64(retval)
    elif img.dtype == np.bool:
        return img_as_bool(retval)
    else:
        return retval
Ejemplo n.º 8
0
    def saliency_map(self, image=None):
        '''Function to generate the Saliency Weight Map of an image'''
        # Validate parameters
        if image is None:
            return None
        
        image = img_as_float64(image)
        
        # Convert image to grayscale
        if(len(image.shape) > 2):
            image = rgb2gray(image)
        else:
            image = image
        
        # Apply Gaussian Smoothing
        gaussian = cv.GaussianBlur(image,(5, 5),0) 
        
        # Apply Mean Smoothing
        image_mean = np.mean(image)
        
        # Generate Saliency Map
        saliencymap = np.absolute(gaussian - image_mean)

        # Display result (if verbose)           
        if self.verbose is True:
            self.__show(
                images=[self.image, saliencymap],
                titles=['Original Image', 'Saliency Weight Map'],
                size=(15, 15),
                gray=True
            )
        
        return saliencymap
Ejemplo n.º 9
0
    def chromatic_map(self, image=None):
        '''Function to generate the Chromatic Weight Map of an image'''
        # Validate parameters
        if image is None:
            return None
        
        image = img_as_float64(image)
        
        # Convert to HSV colour space
        hsv = rgb2hsv(image)

        # Extract Saturation
        saturation = hsv[:, :, 1]
        max_saturation = 1.0
        sigma = 0.3
        
        # Generate Chromatic Map
        chromaticmap = np.exp(-1 * (((saturation - max_saturation) ** 2) / (2 * (sigma ** 2))))

        # Display result (if verbose)
        if self.verbose is True:
            self.__show(
             images=[self.image, chromaticmap],
             titles=['Original Image', 'Chromatic Weight Map'],
             size=(15, 15),
             gray=True
        )
    
        return chromaticmap
Ejemplo n.º 10
0
def find_optimal_mask(img1, img2):
    img1 = util.img_as_float64(img1, force_copy=True)
    img2 = util.img_as_float64(img2, force_copy=True)

    overlap, img1_mask, img2_mask = get_overlap_mask(img1, img2)

    ag = np.argwhere(overlap)
    bound_min = np.min(ag, axis=0)
    bound_max = np.max(ag, axis=0)
    conf_dist = (bound_max[1] - bound_min[1]) // 20

    diff_mag = get_mag(img1 - img2)
    diff_mag[:, bound_min[1]:bound_min[1] + conf_dist] = 1
    diff_mag[:, bound_max[1] - conf_dist:bound_max[1]] = 1

    msk_ovr = 1 - 1.0 * get_shortest_path_mask(
        diff_mag[:, bound_min[1]:bound_max[1]])
    msk = 1.0 * img2_mask
    msk[:, bound_min[1]:bound_max[1]] *= msk_ovr[..., None]

    return msk
Ejemplo n.º 11
0
def input_image():

    np_image = io.imread('assets/image_to_predict.jpeg', as_grey=True)
    np_image = util.img_as_float64(np_image)
    np_image = util.invert(np_image)
    np_image = transform.resize(np_image, (28, 28))
    np_image = (np.expand_dims(np_image, 0))

    prediction_single = model.predict(np_image)
    plot_value_array(0, prediction_single, test_labels)
    plot_image(0, prediction_single, test_labels, np_image)
    return f"saved: {np_image}"
Ejemplo n.º 12
0
def warp_perspective(src, mat, sz, gpu=False, flt=True):
    res = None
    if gpu:
        src = util.img_as_ubyte(src)
        gpu_imgt = cv.cuda_GpuMat(src)
        tmp = cv.cuda.warpPerspective(gpu_imgt, mat, sz)
        res = tmp.download()
    else:
        res = cv.warpPerspective(src, mat, sz)
    if flt:
        return util.img_as_float64(res)
    else:
        return res
Ejemplo n.º 13
0
def edge_entropy(img, bal=0.1):
    """
    Weights pixels based on a weighted edge-detection and entropy balance.

    Parameters
    ----------
    img : ndarray
        Image to weight.
    bal : float (optional)
        How much to value entropy (bal) versus edge-detection (1 - bal)

    Returns
    -------
    ndarray :
        Noramlized weight matrix for pixel sampling.
    """
    dn_img = skimage.restoration.denoise_tv_bregman(img, 0.1)
    img_gray = rgb2gray(dn_img)
    img_lab = rgb2lab(dn_img)

    entropy_img = gaussian(
        img_as_float64(
            dilation(entropy(img_as_ubyte(img_gray), disk(5)), disk(5))))
    edges_img = dilation(
        np.mean(np.array(
            [scharr(img_lab[:, :, channel]) for channel in range(3)]),
                axis=0), disk(3))

    weight = (bal * entropy_img) + ((1 - bal) * edges_img)
    weight /= np.mean(weight)
    weight /= np.amax(weight)

    if args.debug:
        fig, (ax1, ax2, ax3) = plt.subplots(nrows=1,
                                            ncols=3,
                                            figsize=(8, 3),
                                            sharex=True,
                                            sharey=True)
        ax1.imshow(entropy_img)
        ax1.axis('off')

        ax2.imshow(edges_img)
        ax2.axis('off')

        ax3.imshow(weight)
        ax3.axis('off')

        fig.tight_layout()
        plt.show()

    return weight
Ejemplo n.º 14
0
    def white_balance(self, image=None):
        '''Function to perform white balancing operation on an image'''
        # Validate parameters
        if image is None:
            return None
        
        image = img_as_float64(image)

        # Extract colour channels
        R = image[:, :, 2]
        G = image[:, :, 1]
        B = image[:, :, 0]

        # Obtain average intensity for each colour channel
        mean_R = np.mean(R)
        mean_G = np.mean(G)
        mean_B = np.mean(B)

        mean_RGB = np.array([mean_R, mean_G, mean_B])

        # Obtain scaling factor
        grayscale = np.mean(mean_RGB)
        scale = grayscale / mean_RGB

        white_balanced = np.zeros(image.shape)

        # Rescale original intensities
        white_balanced[:, :, 2] = scale[0] * R
        white_balanced[:, :, 1] = scale[1] * G
        white_balanced[:, :, 0] = scale[2] * B

        # Clip to [0.0, 1.0]
        white_balanced = self.__clip(white_balanced)

        # Display result (if verbose)
        if self.verbose is True:
            self.__show(
                images=[self.image, white_balanced],
                titles=['Original Image', 'White Balanced Image'],
                size=(15, 15)
            )
        return white_balanced
Ejemplo n.º 15
0
def visualize_sample(img, weights, sample_points):
    fig, (ax1, ax2, ax3) = plt.subplots(nrows=1,
                                        ncols=3,
                                        figsize=(8, 3),
                                        sharex=True,
                                        sharey=True)
    ax1.imshow(img, cmap='gray')
    ax1.axis('off')

    ax2.imshow(weights, cmap='gray')
    ax2.axis('off')

    heatmap = gray2rgb(img_as_float64(weights))
    for point in sample_points:
        rr, cc = circle(point[0], point[1], 2, shape=weights.shape)
        heatmap[rr, cc, 0] = 1
    ax3.imshow(heatmap)
    ax3.axis('off')

    fig.tight_layout()
    plt.show()
Ejemplo n.º 16
0
    def luminance_map(self, image=None):
        '''Function to generate the Luminance Weight Map of an image'''
        # Validate parameters
        if image is None:
            return None
        
        image = img_as_float64(image)

        # Generate Luminance Map
        luminance = np.mean(image, axis=2)
        luminancemap = np.sqrt((1 / 3) * (np.square(image[:, :, 0] - luminance + np.square(image[:, :, 1] - luminance) + np.square(image[:, :, 2] - luminance))))

        # Display result (if verbose)
        if self.verbose is True:
            self.__show(
                images=[self.image, luminancemap],
                titles=['Original Image', 'Luminanace Weight Map'],
                size=(15, 15),
                gray=True
            )
        return luminancemap
Ejemplo n.º 17
0
    def enhance_contrast(self, image=None):
        '''Function to enhance contrast in an image'''
        # Validate parameters
        if image is None:
            return None

        image = img_as_float64(image)

        # Extract colour channels
        R = image[:, :, 2]
        G = image[:, :, 1]
        B = image[:, :, 0]

        # Obtain luminance using predefined scale factors
        luminance = 0.299 * R + 0.587 * G + 0.114 * B
        mean_luminance = np.mean(luminance)

        # Compute scale factor
        gamma = 2 * (0.5 + mean_luminance)

        # Scale mean-luminance subtracted colour chanels 
        enhanced = np.zeros(image.shape)
        enhanced[:, :, 2] = gamma * (R - mean_luminance)
        enhanced[:, :, 1] = gamma * (G - mean_luminance)
        enhanced[:, :, 0] = gamma * (B - mean_luminance)

        # Clip to [0.0, 1.0]
        enhanced = self.__clip(enhanced)

        # Display result (if verbose)
        if self.verbose is True:
            self.__show(
                images=[self.image, enhanced],
                titles=['Original Image', 'Contrast Enhanced Image'],
                size=(15, 15)
            )

        return enhanced
Ejemplo n.º 18
0
#print("metadata", metadata)

# create some variables with those info
nb_frames = int(metadata['duration']*metadata['fps'])
nb_rows = metadata['size'][1]
nb_cols = metadata['size'][0]
depth = 3

#create a numpy ndarray to put each frame. This array has size (number of frames, size in y, size in x, number of channels)
#this means here the size is (42, 480, 720, 3)
data_train = np.empty((int(metadata['duration']*metadata['fps']), metadata['size'][1], metadata['size'][0], 3), dtype='float64')
print("Shape of the image array", data_train.shape)

# This loop puts the images in the array
for num, image in enumerate(vid.iter_data()):
    data_train[num] = util.img_as_float64(image)

#Apply median filter to each frame
median = np.empty((nb_frames, data_train.shape[1], data_train.shape[2]))
for num, image in enumerate(data_train):
    median[num] = filters.median(image[:,:,0], behavior='ndimage')

#####CREATE THE MASKS FOR THE ROBOT
robot_masks = []
for i, im in enumerate(data_train):
    image_rev = data_train[i][:,:,2]
    image_rev = filters.median(image_rev[:,:], behavior='ndimage')
    image = util.invert(image_rev)

    # apply threshold
    thresh = threshold_otsu(image)
Ejemplo n.º 19
0
                else:
                    img = process_raw(src)

                img = negate(img)
                img = clahe(img)
                img = rescale(img)
                img = gamma_image(img)
                result = img

                os.makedirs(str(pathlib.Path(args.outdir, outdir)),
                            exist_ok=True)
                tifffile = pathlib.Path(
                    tmpdirname,
                    pathlib.Path(src).with_suffix(".tif").name)
                io.imsave(str(tifffile),
                          util.img_as_float64(result),
                          check_contrast=False,
                          plugin='tifffile')

                command = imagemagick_convert_command(tifffile,
                                                      pathlib.Path(tmpdirname))
                print(command)
                subprocess.run(command, stderr=subprocess.STDOUT)
                jpg = pathlib.Path(
                    tmpdirname,
                    pathlib.Path(tifffile).with_suffix(args.format).name)
                command = exiftool_command(jpg, src)
                print(command)
                subprocess.run(command, stderr=subprocess.STDOUT)
                shutil.move(str(jpg), str(pathlib.Path(args.outdir, outdir)))
        except KeyboardInterrupt:
Ejemplo n.º 20
0
# %%
import cv2 as cv
import numpy as np
from matplotlib import pyplot as plt
from skimage import util

img1 = util.img_as_float64(plt.imread('./data/hw1/im01.jpg'))
img2 = util.img_as_float64(plt.imread('./data/hw1/im02.jpg'))


def normalized(src):
    return (src - src.min()) / (src.max() - src.min())


# %% grad


def get_derivatives(src, k_size=3, mag=False):
    d_x = cv.Sobel(src,
                   cv.CV_64F,
                   1,
                   0,
                   ksize=k_size,
                   borderType=cv.BORDER_REFLECT101)
    d_y = cv.Sobel(src,
                   cv.CV_64F,
                   0,
                   1,
                   ksize=k_size,
                   borderType=cv.BORDER_REFLECT101)
    if mag:
Ejemplo n.º 21
0
    def dehaze(self, image=None, verbose=None, pyramid_height=12):
        '''Driver function to dehaze the image'''
        # Validate parameters
        if image is None:
            return None

        self.image = image
        
        if len(image.shape) > 2 and image.shape[2] == 4:
            self.image = image[:, :, :3]

        # Set verbose flag (to decide whether each step is displayed)
        if verbose is None:
            pass
        elif verbose is True:
            self.verbose = True
        else:
            self.verbose = False

        # Generating Input Images 
        white_balanced = self.white_balance(image=img_as_float64(self.image))       # First Input Image
        contrast_enhanced = self.enhance_contrast(image=img_as_float64(self.image)) # Second Input Image
        
        input_images = [
            img_as_float64(white_balanced),
            img_as_float64(contrast_enhanced)
        ]
        
        # Generating Weight Maps
        weight_maps = [
            # Weight maps for first image
            {
                'luminance': self.luminance_map(image=input_images[0]),
                'chromatic': self.chromatic_map(image=input_images[0]),
                'saliency': self.saliency_map(image=input_images[0])
            },
            
            # Weight maps for second image
            {
                'luminance': self.luminance_map(image=input_images[1]),
                'chromatic': self.chromatic_map(image=input_images[1]),
                'saliency': self.saliency_map(image=input_images[1])
            }
        ]
        
        # Weight map normalization
        # Combined weight maps
        weight_maps[0]['combined'] = (weight_maps[0]['luminance'] * weight_maps[0]['chromatic'] * weight_maps[0]['saliency'])
        weight_maps[1]['combined'] = (weight_maps[1]['luminance'] * weight_maps[1]['chromatic'] * weight_maps[1]['saliency'])
        
        # Normalized weight maps
        weight_maps[0]['normalized'] = weight_maps[0]['combined'] / (weight_maps[0]['combined'] + weight_maps[1]['combined'])
        weight_maps[1]['normalized'] = weight_maps[1]['combined'] / (weight_maps[0]['combined'] + weight_maps[1]['combined'])
        
        # Generating Gaussian Image Pyramids
        gaussians = [
            self.image_pyramid(image=weight_maps[0]['normalized'], pyramid_type='gaussian', levels=pyramid_height),
            self.image_pyramid(image=weight_maps[1]['normalized'], pyramid_type='gaussian', levels=pyramid_height)
        ]

        # Fusion Step
        fused = self.fusion(input_images, weight_maps, gaussians)
 
        # Dehazing data
        dehazing = {
            'hazed': self.image,
            'inputs': input_images,
            'maps': weight_maps,
            'dehazed': fused
        }
        
        self.image = None   # Reset image

        return dehazing
Ejemplo n.º 22
0
# imports needed for skimage and pyplot
import skimage
import numpy
import sys
import scipy
from skimage import io, util, color
from scipy import ndimage

# Read image and convert it to grayscale and float
image = util.img_as_float64(color.rgb2gray(io.imread(sys.argv[1])))

# Read in the size of the filter and validate the value to be odd
size = int(sys.argv[2])
if (size % 2 == 0):
    print("Wrong size")
    sys.exit()

# Create the filter with the specified size
filt = numpy.ones((size, size))
# Normalize the values in the filter
#filt = filt / numpy.sum(filt)
filt = filt / size**2

# Show filter
print(filt)

# Perform the smoothing
out = ndimage.convolve(image, filt, mode="constant", cval=0)

# Save the result
io.imsave(sys.argv[3], numpy.clip(out, 0, 1))
Ejemplo n.º 23
0
def active_contours(input_image,
                    initial_snake,
                    output_image,
                    alpha,
                    beta,
                    tau,
                    w_line,
                    w_edge,
                    kappa_1,
                    kappa_2,
                    sigma=2,
                    re_param=100,
                    iter_param=10000):
    """Active contour model

    Parameters
    ----------
    input_image: string
        Path of the input image
    initial_snake: 2-D sequence of floats
        Path of the initial snake txt file
    output_image: string, optional
        path of the output image
    alpha: float
        Contour elasticity parameter
    beta: float
        Contour stiffness parameter
    tau: float
        Time step (Snake speed) parameter
    w_line: float
        Line potential weight
    w_edge: float
        Edge potential weight
    kappa_1: float
        Balloon force parameter, where kappa_1 sign controls inflate or deflate
        note: |kappa_1| < |kappa_2| <1
    kappa_2: float
        External force parameter
        note: |kappa_1| < |kappa_2| <1
    sigma: float
        Gaussian filter parameter
    re_param: int
        Curve reparametrization frequency
    iter_param: int
        Number of iterations

    Returns
    -------
    snake: 2-D sequence of floats
        Contour
    """
    alpha, beta, tau, w_line, w_edge, kappa_1, kappa_2, sigma, re_param, iter_param = \
    float(alpha), float(beta), float(tau), float(w_line), float(w_edge), float(kappa_1),\
    float(kappa_2), float(sigma), int(re_param), int(iter_param)
    image = img_as_float64(imread(input_image, as_gray=True))
    snake = np.loadtxt(initial_snake)

    # Contour points
    x, y = snake[:, 0], snake[:, 1]
    # Number of contour points
    n = snake.shape[0]

    # A – Euler equation matrix, where c_n - the n order contour derivative
    c_2 = np.roll(np.eye(n), -1, axis=1) - 2 * np.eye(n) + np.roll(
        np.eye(n), -1, axis=0)
    c_4 = np.roll(np.eye(n), -2, axis=1) - 4 * np.roll(
        np.eye(n), -1, axis=1) + 6 * np.eye(n) - 4 * np.roll(
            np.eye(n), -1, axis=0) + np.roll(np.eye(n), -2, axis=0)
    A = -alpha * c_2 + beta * c_4

    # (I-tau*A) inverse matrix
    A_inv = np.linalg.inv(np.eye(n) + tau * A)

    # Gaussian derivative filter
    # With respect to X
    dx = gaussian_filter(image,
                         sigma=sigma,
                         order=[1, 0],
                         output=np.float64,
                         mode='nearest')
    # With respect to Y
    dy = gaussian_filter(image,
                         sigma=sigma,
                         order=[0, 1],
                         output=np.float64,
                         mode='nearest')

    # Potential (for External energy)
    Potential_edge = -(dx**2 + dy**2)
    Potential_line = -gaussian_filter(
        image, sigma=sigma, order=[0, 0], output=np.float64, mode='nearest')
    Potential = -w_line * Potential_line - w_edge * Potential_edge
    # Poltential interpolation
    Potential_interp = RectBivariateSpline(np.arange(Potential.shape[1]),
                                           np.arange(Potential.shape[0]),
                                           Potential.T)

    # Main Loop
    j = 1
    while j < iter_param:

        # Potential gradient
        P_x, P_y = Potential_interp(x, y, dx=1,
                                    grid=False), Potential_interp(x,
                                                                  y,
                                                                  dy=1,
                                                                  grid=False)
        n_x, n_y = balloon_force(x, y)
        F_ext_x = kappa_1 * n_x - kappa_2 * P_x / np.hypot(P_x, P_y)
        F_ext_y = kappa_1 * n_y - kappa_2 * P_y / np.hypot(P_x, P_y)

        x = A_inv.dot(x + tau * F_ext_x)
        y = A_inv.dot(y + tau * F_ext_y)

        # Reparametrization
        if j % re_param == 0:
            x, y = reparametrization(
                x,
                y,
            )

        j += 1

    new_snake = np.hstack((x[:, np.newaxis], y[:, np.newaxis]))
    utils.save_mask(output_image, new_snake, image)

    return new_snake
Ejemplo n.º 24
0
	solidity = []
	centroid = []
	for region in measure.regionprops(label_image):
		area.append(region.area)
		length.append(region.major_axis_length)
		width.append(region.minor_axis_length)
		solidity.append(region.solidity)
		centroid.append(region.centroid)

	# plot_outlines(input_name = mask_name,
	# 			contours = measure.find_contours(cleared, 0.8),
	# 			file_path = path
	# )


	cell_edges = mark_boundaries(mask_name,img_as_float64(cleared))

	plt.imshow(cell_edges)
	plt.axis('off')

	plt.savefig(path + '/outlines/' + input_no_ext + '.jpg')
	plt.close()

	print('Print Outlines')
	image_df = pd.DataFrame(
		{'Image_name':image_name,
		'Area':area,
		'Major_axis_length':length,
		'Minor_axis_length':width,
		'Solidity':solidity
		})