Пример #1
0
def mid_axis(img):
    dis = ndimg.distance_transform_edt(img)
    dis[[0, -1], :] = 0
    dis[:, [0, -1]] = 0
    idx = np.argsort(dis.flat).astype(np.int32)
    medial_axis(dis, idx, lut)
    return dis
Пример #2
0
    def convert_binary_image_to_sdf(self, binary_img, vis = False):
        binary_data = np.array(binary_img)
        skel, sdf_in = morph.medial_axis(binary_data, return_distance = True)
        useless_skel, sdf_out = morph.medial_axis(self.upper_bound_ - binary_data, return_distance = True)
        
        sdf = sdf_out - sdf_in

        # display the sdf and skeleton
        if vis:
            dist_on_skel = sdf * skel
            
            fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
            ax1.imshow(binary_data, cmap=plt.cm.gray, interpolation='nearest')
            ax1.axis('off')
            ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
            ax2.contour(binary_data, [0.5], colors='w')
            ax2.axis('off')

            fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
            plt.show()

            plt.imshow(sdf)
            plt.show()

            plt.imshow(skel)
            plt.show()

        return sdf, skel
Пример #3
0
    def medskel(self, return_distance=True, verbose=False):
        '''

        This function performs the medial axis transform (skeletonization) on the mask.
        This is essentially a wrapper function of skimage.morphology.medial_axis. The
        returned skeletons are the objects used for the bulk of the analysis.

        If the distance transform is returned from the transform, it is used as a pruning
        step. Regions where the width of a region are far too small (set to >0.01 pc) are
        deleted. This ensures there no unnecessary connections between filaments.

        Parameters
        ----------
        return_distance : bool, optional
                          This sets whether the distance transform is returned from
                          skimage.morphology.medial_axis.

        verbose : bool, optional
                  If True, the image is overplotted with the skeletons for inspection.

        Returns
        -------

        self.skeleton : numpy.ndarray
                        The array containing all of the skeletons.

        self.medial_axis_distance : numpy.ndarray
                                    The distance transform used to create the skeletons.
                                    Only defined is return_distance=True

        '''

        if return_distance:
            self.skeleton,self.medial_axis_distance = medial_axis(self.mask, return_distance=return_distance)
            self.medial_axis_distance  = self.medial_axis_distance * self.skeleton
            if self.pixel_unit_flag:
                print "Setting arbitrary width threshold to 2 pixels"
                width_threshold = raw_input("Enter threshold change or pass: "******"":
                    width_threshold = 2
                width_threshold = float(width_threshold)
            else:
                width_threshold = round((0.1/10.)/self.imgscale) # (in pc) Set to be a tenth of expected filament width
            narrow_pts = np.where(self.medial_axis_distance<width_threshold)
            self.skeleton[narrow_pts] = 0 ## Eliminate narrow connections
        else:
            self.skeleton = medial_axis(self.mask)



        if verbose: # For examining results of skeleton
            masked_image = self.image * self.mask
            skel_points = np.where(self.skeleton==1)
            for i in range(len(skel_points[0])):
                masked_image[skel_points[0][i],skel_points[1][i]] = np.NaN
            p.imshow(masked_image,interpolation=None,origin="lower")
            p.show()

        return self
 def medial_axis_Astar(self, data, global_goal, TARGET_ALTITUDE,
                       SAFETY_DISTANCE):
     grid, north_offset, east_offset = create_grid_bfs(
         data, TARGET_ALTITUDE, SAFETY_DISTANCE)
     skeleton = medial_axis(invert(grid))
     print("North offset = {0}, east offset = {1}".format(
         north_offset, east_offset))
     grid_start = (int(self.local_position[0]) - north_offset,
                   int(self.local_position[1]) - east_offset)
     local_goal = global_to_local(global_goal, self.global_home)
     grid_goal = (int(local_goal[0]) - north_offset,
                  int(local_goal[1]) - east_offset)
     skel_start, skel_goal = find_start_goal(skeleton, grid_start,
                                             grid_goal)
     mapa = breadth_first(
         invert(skeleton).astype(np.int), tuple(skel_goal),
         tuple(skel_start))
     print('Local Start and Goal: ', grid_start, grid_goal)
     path, _ = a_star_bfs(
         invert(skeleton).astype(np.int), mapa, tuple(skel_start),
         tuple(skel_goal))
     path.append(grid_goal)
     pruned_path = prune_path_bres(path, grid)
     waypoints = [[
         int(p[0] + north_offset),
         int(p[1] + east_offset), TARGET_ALTITUDE, 0
     ] for p in pruned_path]
     for i in range(1, len(waypoints)):
         heading = np.arctan2(
             waypoints[i][0] - waypoints[i - 1][0],
             waypoints[i][1] - waypoints[i - 1][1]) - pi / 2
         waypoints[i][3] = -heading
     return waypoints
Пример #5
0
    def _coarse(self):

        sigma = self.sigma
        method_center = self.method_center
        Xcoarse = spim.gaussian_filter(self.X, sigma=sigma)
        xmean = np.mean(Xcoarse)

        Xcontour = np.zeros(np.shape(Xcoarse))
        for i in range(np.shape(Xcoarse)[0]):
            for j in range(np.shape(Xcoarse)[1]):

                if Xcoarse[i, j] > xmean:
                    Xcontour[i, j] = 1
                else:
                    Xcontour[i, j] = 0

        self.Xcontour = Xcontour  # Attribute

        # Compute the centerline/skeleton
        if method_center == 'medial_axis':
            skeleton_coarse = medial_axis(Xcontour) * 1.0
        elif method_center == 'skeletonize':
            skeleton_coarse = skeletonize(Xcontour) * 1.0
        else:
            raise ValueError(
                'PyCrack: no method to compute the skeleton/centerline is selected.'
            )

        if self.verbose: print('PyCrack: get nodes and coordinates.')

        self.skeleton_coarse = skeleton_coarse  # Attribute
        nodes_coarse = self._getnodes(skeleton_coarse)
        coords_crack = self._getcoords(self.X)
        coords_skeleton = self._getcoords(skeleton_coarse)

        # Attributes
        self.nodes_coarse = nodes_coarse
        self.coords_crack = coords_crack
        self.coords_skeleton = coords_skeleton

        if self.showfig:

            if self.verbose: print('PyCrack: plot graphic.')

            fig = plt.figure(figsize=(10, 10))
            ax = fig.add_subplot(111)
            ax.imshow(Xcontour, cmap=plt.cm.gray)
            ax.scatter(coords_crack[:, 0], coords_crack[:, 1], s=0.05, c='y')
            ax.scatter(coords_skeleton[:, 0],
                       coords_skeleton[:, 1],
                       c='b',
                       marker='o',
                       s=(72. / fig.dpi / 8)**2)
            ax.scatter(nodes_coarse[:, 0], nodes_coarse[:, 1], s=20, c='red')
            ax.axis('off')
            for i in range(len(nodes_coarse)):
                ax.text(nodes_coarse[i, 0], nodes_coarse[i, 1], s=str(i))

            fig.tight_layout()
            plt.show()
Пример #6
0
def skeleton_features(mask):
    """calculates the set of cell-skeleton based features 
    
    Calculates medial axis of the segmented cell and calculates the length,
    maximum and minimum thickness of the skeleton

    Parameters
    ----------
    image : 3D array, shape (M, N, C)
        The input image with multiple channels.

    Returns
    -------
    features :  dict  
        dictionary including percentiles, moments and sum per channel 

    """
    # storing the feature values
    features = dict()
    for ch in range(mask.shape[2]):
        # calculating the medial axis and distance on the skeleton
        skel, distance = medial_axis(mask[:, :, ch], return_distance=True)
        dist_on_skel = distance * skel

        # storing the features
        features["skeleton_length_Ch" + str(ch + 1)] = skel.sum()
        features["skeleton_thickness_max_Ch" +
                 str(ch + 1)] = dist_on_skel.max()
        if dist_on_skel.max() > 0.:
            features["skeleton_thickness_min_Ch" +
                     str(ch + 1)] = dist_on_skel[dist_on_skel > 0.].min()
        else:
            features["skeleton_thickness_min_Ch" + str(ch + 1)] = 0.

    return features
Пример #7
0
def get_skeleton() -> Tuple[np.ndarray, np.ndarray]:
    """
        It skeletonizes an image of the hairpin
    """
    # load arena image & threshold
    img = Image.open(Hairpin.image_local_path())
    new_width = 40
    new_height = 60
    img = img.resize((new_width, new_height))
    img = np.array(img)[:, :, 0]
    img[img > 0] = 1
    arena = invert(img)
    arena[arena == 254] = 0
    arena[arena == 255] = 1

    # perform skeletonization
    skeleton = skeletonize(arena)
    _, distance = medial_axis(arena, return_distance=True)

    # convolve distance map with gaussian kernel
    kernel = kernel_gaussian_2D()
    distance = signal.convolve2d(distance,
                                 kernel,
                                 boundary="symm",
                                 mode="same")

    return skeleton, distance
Пример #8
0
def label_watershed(obj: np.array, maxima_threshold):
    """
    Separate touching objects based on distance map (similar to imageJ watershed)

    :param obj: np.array, 0-and-1
    :param maxima_threshold: threshold for identify maxima
    :return: seg: np.array, grey scale with different objects labeled with different numbers
    """
    _, dis = medial_axis(obj, return_distance=True)
    maxima = extrema.h_maxima(dis, maxima_threshold)
    # maxima_threshold for Jess data = 1
    # maxima_threshold for Jose 60x data = 10
    # maxima_threshold for Jose 40x data = 20
    maxima_mask = binary_dilation(maxima)
    for i in range(6):
        maxima_mask = binary_dilation(maxima_mask)

    label_maxima = label(maxima_mask, connectivity=2)
    markers = label_maxima.copy()
    markers[obj == 0] = np.amax(label_maxima) + 1
    elevation_map = sobel(obj)
    label_obj = segmentation.watershed(elevation_map, markers)
    label_obj[label_obj == np.amax(label_maxima) + 1] = 0

    return label_obj
def create_skeletons(images):
    try:
        # Create target directories
        os.mkdir(dataset_path)
        os.mkdir(train_path)
        os.mkdir(test_skeletonize_path)
        os.mkdir(test_medial_axis_path)
    except FileExistsError:
        print("Directory already exists")

    for i in range(len(images)):
        res = cv2.resize(images[i],
                         dsize=(256, 256),
                         interpolation=cv2.INTER_CUBIC)
        plt.imsave(fname=train_path + "/train_" + str(i) + ".png",
                   arr=res,
                   cmap="gray")
        res_medial = medial_axis(res[:, :, 0])
        plt.imsave(fname=test_medial_axis_path + "/medial_" + str(i) + ".png",
                   arr=res_medial,
                   cmap="gray")

        res_skeleton = skeletonize(res[:, :, 0])
        plt.imsave(fname=test_skeletonize_path + "/skeletonize_" + str(i) +
                   ".png",
                   arr=res_skeleton,
                   cmap="gray")
Пример #10
0
def segment_cells(frame, mask=None):
    """
    Compute the initial segmentation based on ridge detection + watershed.
    This works reasonably well, but is not robust enough to use by itself.
    """
    
    blurred = filters.gaussian(frame, 2)
    ridges = enhance_ridges(frame)
    
    # threshold ridge image
    thresh = filters.threshold_otsu(ridges)
    thresh_factor = 0.5
    prominent_ridges = ridges > thresh_factor*thresh
    prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)
    prominent_ridges = morphology.binary_closing(prominent_ridges)
    prominent_ridges = morphology.binary_dilation(prominent_ridges)
    
    # skeletonize
    ridge_skeleton = morphology.medial_axis(prominent_ridges)
    ridge_skeleton = morphology.binary_dilation(ridge_skeleton)
    ridge_skeleton *= mask
    ridge_skeleton = np.bitwise_xor(ridge_skeleton, mask)
    
    # label
    cell_label_im = measure.label(ridge_skeleton)
    
    # morphological closing to fill in the cracks
    for cell_num in range(1, cell_label_im.max()+1):
        cell_mask = cell_label_im==cell_num
        cell_mask = morphology.binary_closing(cell_mask, disk(3))
        cell_label_im[cell_mask] = cell_num
    
    return cell_label_im 
Пример #11
0
    def run(self):

        print 'Gerando Caracteristicas....'
        images = self.__image_list(image_path('circinatum'), image_path('kelloggii'), image_path('negundo'))

        arquivo = open(data_path(),'w')
        count = 0
        for i in images:
            count+=1
            img, vetor, arclen, classe = self.__features(i)

            # Media Curvatura
            arquivo.write(str(np.mean(np.abs(vetor))) + ',')
            # Comprimento de Arco
            arquivo.write(str(arclen) + ',')            
            # Area
            arquivo.write(str(np.sum(img)) + ',')          
            # Numero Pixels Esqueleto
            arquivo.write(str(np.sum(morphology.medial_axis(img))) + ',')
            # Classe Folhas
            arquivo.write(classe)
            arquivo.write("\n")

        arquivo.close()

        print '100%'
        print 'Total Imagens: ' + str(count)
Пример #12
0
def imageThinning(datasetPath, subFolder, file):
    """Thins and saves binary image

    Args:

        datasetPath (string): Relative or Absolute path of the dataset.

        subFolder (string): Folder name of specific character

        file (string): Name of image

    Purpose:

        Saves the thinned image with tag "-thinned" to the actual file name in the actual images' location
    """

    path = os.path.join(datasetPath, subFolder, file)

    original = invert(cv2.imread(path, cv2.IMREAD_GRAYSCALE))
    binary = original > filters.threshold_otsu(original)
    skeleton, distance = medial_axis(binary, return_distance=True)
    distanceOnSkeleton = distance * skeleton
    invertedImage = invert(distanceOnSkeleton)

    index = file.find('.png')
    newFile = file[:index] + "-thinned" + file[index:]
    newPath = os.path.join(datasetPath, subFolder, newFile)
    io.imsave(newPath, invertedImage)
Пример #13
0
def get_crack_width(input_file, pixel_mm=0.31, draw=False):

    img = cv2.imread(input_file, 0)
    img_bool = img > 0
    # Compute the medial axis (skeleton) and the distance transform
    skel, distance = medial_axis(img_bool, return_distance=True)
    # Distance to the background for pixels of the skeleton
    dist_on_skel = distance * skel
    max_width = np.max(dist_on_skel) * pixel_mm * 2
    #determine if the defect is critical
    critical = False
    if max_width >= 0.3:
        critical = True

    if draw:
        i, j = np.unravel_index(dist_on_skel.argmax(), dist_on_skel.shape)
        fig = plt.figure(figsize=(10, 10))
        ax = fig.add_subplot(111)
        ax.imshow(dist_on_skel, cmap='gray')
        ax.scatter(j, i, color='red')
        ax.contour(img, [0.5], colors='w')
        img_name = os.path.split(input_file)[-1].split('.')[0] + '_draw.png'

        plt.savefig(img_name, bbox_inches='tight')
        plt.close(fig)

    return max_width, critical
Пример #14
0
    def plan_medial_axis(self, data,local_start, local_goal, TARGET_ALTITUDE, SAFETY_DISTANCE):
        print('Medial-Axis planning')

        # create grid and skeleton
        grid, north_offset, east_offset = create_grid(data, TARGET_ALTITUDE, SAFETY_DISTANCE)
        print("North offset = {0}, east offset = {1}".format(north_offset, east_offset))
        skeleton = medial_axis(invert(grid))
        print('Skeleton generated') 

        # calculate the closest start/goal points on the "graph"
        start_ne = ( local_start[0] - north_offset, local_start[1] - east_offset)
        goal_ne = ( local_goal[0] - north_offset, local_goal[1] - east_offset)
        skel_start, skel_goal = find_start_goal(skeleton, start_ne, goal_ne)
        
        # run A* to search for the goal along the road network
        path, cost = a_star(invert(skeleton).astype(np.int), heuristic, tuple(skel_start), tuple(skel_goal))

        # Prune path to minimize number of waypoints
        if (args.prune == 'collinearity'):
            print('Pruning path with collinearity check')
            path = collinearity_prune(path)
        elif (args.prune == 'bresenham'):
            print('Pruning path with bresenham')
            path = bresenham_prune(path,grid)
        else:
            print('Unrecognized prune option returning full path')

        # Convert path to waypoints
        waypoints = [[int(p[0]) + north_offset,int(p[1]) + east_offset, TARGET_ALTITUDE, 0] for p in path]

        return waypoints
def place_routers_on_skeleton_iterative(d, cmethod):
    budget = d['budget']
    R = d['radius']
    max_num_routers = int(d['budget'] / d['price_router'])
    coverage = np.where(d["graph"] == Cell.Wireless, 1, 0).astype(np.bool)

    pbar = tqdm(range(max_num_routers), desc="Placing Routers")
    while budget > 0:
        # perform skeletonization
        # skeleton = skeletonize(coverage)
        skeleton = medial_axis(coverage)
        # get all skeleton positions
        pos = np.argwhere(skeleton > 0).tolist()
        # escape if no positions left
        if not len(pos):
            break
        # get a random position
        shuffle(pos)
        a, b = pos[0]
        # place router
        d["graph"][a][b] = Cell.Router
        d, ret, cost = _add_cabel(d, (a, b), budget)
        if not ret:
            print("No budget available!")
            break
        budget -= cost
        # refresh wireless map by removing new coverage
        m = wireless_access(a, b, R, d['graph']).astype(np.bool)
        coverage[(a - R):(a + R + 1), (b - R):(b + R + 1)] &= ~m
        pbar.update()
    pbar.close()

    return d
Пример #16
0
def agreeing_skeletons(float_surface, mito_labels):
    topological_skeleton = skeletonize(mito_labels)

    medial_skeleton, distance = medial_axis(mito_labels, return_distance=True)

    # TODO: test without the active threshold surface
    active_threshold = np.mean(float_surface[mito_labels])
    transform_filter = np.zeros(mito_labels.shape, dtype=np.uint8)
    transform_filter[np.logical_and(medial_skeleton > 0,
                                    float_surface > active_threshold)] = 1
    # transform filter is basically medial_skeleton on a field above threshold (mean*5 - wow, that's a lot)
    medial_skeleton = transform_filter * distance

    median_skeleton_masked = np.ma.masked_array(medial_skeleton,
                                                medial_skeleton > 0)
    skeleton_convolve = ndi.convolve(median_skeleton_masked,
                                     np.ones((3, 3)),
                                     mode='constant',
                                     cval=0.0)
    divider_convolve = ndi.convolve(transform_filter,
                                    np.ones((3, 3)),
                                    mode='constant',
                                    cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]

    skeletons = np.zeros_like(medial_skeleton)
    skeletons[topological_skeleton] = skeleton_convolve[topological_skeleton]
    # dbg.skeleton_debug(float_surface, mito_labels, skeletons)
    return skeletons
Пример #17
0
def segment_cells(frame, mask=None):
    """
    Compute the initial segmentation based on ridge detection + watershed.
    This works reasonably well, but is not robust enough to use by itself.
    """
    
    blurred = filters.gaussian_filter(frame, 2)
    ridges = enhance_ridges(frame)
    
    # threshold ridge image
    thresh = filters.threshold_otsu(ridges)
    thresh_factor = 0.6
    prominent_ridges = ridges > thresh_factor*thresh
    prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)
    prominent_ridges = morphology.binary_closing(prominent_ridges)
    prominent_ridges = morphology.binary_dilation(prominent_ridges)
    
    # skeletonize
    ridge_skeleton = morphology.medial_axis(prominent_ridges)
    ridge_skeleton = morphology.binary_dilation(ridge_skeleton)
    ridge_skeleton *= mask
    ridge_skeleton -= mask
    
    # label
    cell_label_im = measure.label(ridge_skeleton)
    
    # morphological closing to fill in the cracks
    for cell_num in range(1, cell_label_im.max()+1):
        cell_mask = cell_label_im==cell_num
        cell_mask = morphology.binary_closing(cell_mask, disk(3))
        cell_label_im[cell_mask] = cell_num
    
    return cell_label_im 
def skeletonize_mitochondria(mch_channel):
    mch_collector = np.max(mch_channel, axis=0)  # TODO: check max projection v.s. sum
    skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # use adaptative threshold? => otsu seems to be sufficient in this case

    skeleton_labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(skeleton_labels)
    skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
    active_threshold = np.mean(mch_collector[skeleton_labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return skeleton_labels, mch_collector, skeleton, transform_filter
Пример #19
0
def remove_narrow_part(thresholded_image, narrow_constant=2):
    skel, distance = medial_axis(thresholded_image, return_distance=True)
    dist_on_skel = distance * skel

    contour_image = get_contour_image(thresholded_image)
    distance_no_border = get_complement_image(skel, contour_image)
    distance_no_border[np.where(distance_no_border < 0)] = 0
    distance_no_border[np.where(distance > narrow_constant)] = 0

    distance_labeled_image = measure.label(distance_no_border, connectivity=2)
    if len(np.unique(distance_labeled_image)) > 1:
        distance_region_props = measure.regionprops_table(
            distance_labeled_image, properties=('label', 'area', 'image'))
        result_image = np.zeros_like(distance_labeled_image)
        for i in range(len(distance_region_props['label'])):
            if distance_region_props['area'][i] < 2:
                distance_labeled_image[np.where(
                    distance_labeled_image == distance_region_props['label']
                    [i])] = 0
        result_image[np.where(distance_labeled_image > 0)] = 1
        dilated_image = binary_dilation(result_image,
                                        square(narrow_constant + 1))

        removed_narrow_part_image = get_complement_image(
            thresholded_image, dilated_image)
        removed_narrow_part_image[np.where(removed_narrow_part_image < 0)] = 0
        return removed_narrow_part_image
    else:
        return thresholded_image
Пример #20
0
def vectorize_lines(im: np.ndarray,
                    threshold: float = 0.2,
                    min_sp_dist: int = 10):
    """
    Vectorizes lines from a binarized array.

    Args:
        im (np.ndarray): Array of shape (3, H, W) with the first dimension
                         being probabilities for (start_separators,
                         end_separators, baseline).

    Returns:
        [[x0, y0, ... xn, yn], [xm, ym, ..., xk, yk], ... ]
        A list of lists containing the points of all baseline polylines.
    """
    # split into baseline and separator map
    st_map = im[0]
    end_map = im[1]
    sep_map = st_map + end_map
    bl_map = im[2]
    # binarize
    bin = im > threshold
    skel, skel_dist_map = medial_axis(bin[2], return_distance=True)
    elongation_offset = np.max(skel_dist_map)
    sp_can = _find_superpixels(skel, heatmap=bl_map, min_sp_dist=min_sp_dist)
    if not sp_can.size:
        logger.warning(
            'No superpixel candidates found in network output. Likely empty page.'
        )
        return []
    intensities = _compute_sp_states(sp_can, bl_map, st_map, end_map)
    clusters = _cluster_lines(intensities)
    lines = _interpolate_lines(clusters, elongation_offset, bl_map.shape,
                               st_map, end_map)
    return lines
Пример #21
0
def topology_preserving_thinning(bw: np.ndarray,
                                 min_thickness: int = 1,
                                 thin: int = 1) -> np.ndarray:
    """perform thinning on segmentation without breaking topology

    Parameters:
    --------------
    bw: np.ndarray
        the 3D binary image to be thinned
    min_thinkness: int
        Half of the minimum width you want to keep from being thinned.
        For example, when the object width is smaller than 4, you don't
        want to make this part even thinner (may break the thin object
        and alter the topology), you can set this value as 2.
    thin: int
        the amount to thin (has to be an positive integer). The number of
         pixels to be removed from outter boundary towards center.

    Return:
    -------------
        A binary image after thinning
    """
    bw = bw > 0
    safe_zone = np.zeros_like(bw)
    for zz in range(bw.shape[0]):
        if np.any(bw[zz, :, :]):
            ctl = medial_axis(bw[zz, :, :] > 0)
            dist = distance_transform_edt(ctl == 0)
            safe_zone[zz, :, :] = dist > min_thickness + 1e-5

    rm_candidate = np.logical_xor(bw > 0, erosion(bw > 0, ball(thin)))

    bw[np.logical_and(safe_zone, rm_candidate)] = 0

    return bw
Пример #22
0
    def skeleton_extraction_medial_axis(self, im):
        '''
            medial_axis利用中轴变换方法计算前景(1值)目标对象的宽度,
            函数原型:
            skimage.morphology.medial_axis(image, mask=None, return_distance=False)
                mask:掩模. 默认为None, 如果给定一个掩模,
                        则在掩模内的像素值才执行骨架算法.
                return_distance: bool型值,默认为False.
                        如果为True, 则除了返回骨架, 还将距离变换值也同时返回.
                        这里的距离指的是中轴线上的所有点与背景点的距离.
        '''

        #计算中轴和距离变换值
        skel, distance = morphology.medial_axis(im, return_distance=True)

        #中轴上的点到背景像素点的距离
        dist_on_skel = distance * skel

        fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
        ax1.imshow(data, cmap=plt.cm.gray, interpolation='nearest')

        #用光谱色显示中轴
        ax2.imshow(dist_on_skel, cmap=plt.cm.spectral, interpolation='nearest')
        ax2.contour(im, [0.5], colors='w')  #显示轮廓线

        fig.tight_layout()
        plt.show()

        return skel, distance, dist_on_skel
Пример #23
0
def execute_Skeleton(proxy,obj):

	from skimage.morphology import medial_axis

	threshold=0.1*obj.threshold

	try: 
		img2=obj.sourceObject.Proxy.img
		img=img2.copy()
	except: 
		sayexc()
		img=cv2.imread(__dir__+'/icons/freek.png')

	data = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)

	# Compute the medial axis (skeleton) and the distance transform
	skel, distance = medial_axis(data, return_distance=True)

	# Distance to the background for pixels of the skeleton
	dist_on_skel = distance * skel

	# entferne ganz duenne linien
	dist_on_skelw =(dist_on_skel >= threshold)* distance

	say("size of the image ...")
	say(dist_on_skelw.shape)
#	skel = np.array(dist_on_skelw,np.uint8) 
	skel = np.array(dist_on_skelw *255/np.max(dist_on_skelw),np.uint8) 
	obj.Proxy.img=cv2.cvtColor(skel*100, cv2.COLOR_GRAY2BGR)
	obj.Proxy.dist_on_skel=dist_on_skelw
def run(img):
    if len(img.shape) > 2 and img.shape[2] == 4:
        img = color.rgba2rgb(img)
    if len(img.shape) == 2:
        img = color.gray2rgb(img)
    skel = medial_axis(color.rgb2gray(img), return_distance=False)
    return to_base64(skel)
Пример #25
0
def skeletonize_mitochondria(mCh_channel):

    mch_collector = np.max(mCh_channel, axis=0)  # TODO: check how max affects v.s. sum
    labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # TODO: use adaptative threshold? => otsu seems to be sufficient in this case
    # http://scikit-image.org/docs/dev/auto_examples/xx_applications/plot_thresholding.html#sphx
    # -glr-auto-examples-xx-applications-plot-thresholding-py
    #  log-transform? => Nope, does not work
    # TODO: hessian/laplacian of gaussian blob detection?

    labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(labels)
    skeleton, distance = medial_axis(labels, return_distance=True)
    active_threshold = np.mean(mch_collector[labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] \
                                              / divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return labels, mch_collector, skeleton, transform_filter
Пример #26
0
def label_nuclei(binary, min_size):
    '''Label, watershed and remove small objects'''

    distance = medial_axis(binary, return_distance=True)[1]

    distance_blured = gaussian_filter(distance, 5)

    local_maxi = peak_local_max(distance_blured, indices=False, labels=binary, min_distance = 30)

    markers = measure_label(local_maxi)

#    markers[~binary] = -1

#    labels_rw = segmentation.random_walker(binary, markers)

#    labels_rw[labels_rw == -1] = 0

#    labels_rw = segmentation.relabel_sequential(labels_rw)

    labels_ws = watershed(-distance, markers, mask=binary)

    labels_large = remove_small_objects(labels_ws,min_size)

    labels_clean_border = clear_border(labels_large)

    labels_from_one = relabel_sequential(labels_clean_border)

#    plt.imshow(ndimage.morphology.binary_dilation(markers))
#    plt.show()

    return labels_from_one[0]
Пример #27
0
def skeletonization():
    from skimage import img_as_bool, io, color, morphology
    
    image = img_as_bool(color.rgb2gray(io.imread('planC2V2black.tiff')))
    out = morphology.medial_axis(image)
    
    f, (ax0, ax1) = plt.subplots(1, 2)
    ax0.imshow(image, cmap='gray', interpolation='nearest')
    ax1.imshow(out, cmap='gray', interpolation='nearest')
    
    DefaultSize = plt.get_size_inches()
    plt.set_figsize_inches( (DefaultSize[0]*2, DefaultSize[1]*2) )
    plt.savefig("test.png", dpi = (1000))
    plt.show()
    
    res = []
    
    for i in range(len(out)):
        res.append([])
        for j in range(len(out[0])):
            if out[i][j]:
                res[i].append('1')
                
            else:
                if imarray[i][j][0] == 255: ##cette partie a ete modifiee pour garder les murs en memoire
                    res[i].append('2')
                else:
                    res[i].append('0')
        
    #print(res)
    #affCouleurs(suppParasites(res))
    return res
Пример #28
0
def get_length(neurite_mask, show=True):
    """
    Arguments:
    ----------
        neurite_mask: string or numpy.bool array
            path to binary image indicating the presence of neurites, OR
            corresponding boolean numpy.ndarray

        show: bool (default True)
            if True, plots intermediate steps of image analysis

    Returns:
    --------
        neurite_length: int
            total neurite length in pixels
    """

    neurite_mask = utils.handle_binary_image_input(neurite_mask)
    neurite_skeleton = medial_axis(neurite_mask)
    neurite_length = neurite_skeleton.sum()

    if show:
        images = [neurite_mask, neurite_skeleton]
        titles = ['Neurite mask', 'Medial axis']
        fig = utils.plot_image_collection(images, titles, nrows=1, ncols=2)
        fig.suptitle('Neurite length', fontsize='large')

    return neurite_length
Пример #29
0
def label_nuclei(binary, min_size):
    '''Label, watershed and remove small objects'''

    distance = medial_axis(binary, return_distance=True)[1]

    distance_blured = gaussian_filter(distance, 5)

    local_maxi = peak_local_max(distance_blured,
                                indices=False,
                                labels=binary,
                                min_distance=30)

    markers = measure_label(local_maxi)

    #    markers[~binary] = -1

    #    labels_rw = segmentation.random_walker(binary, markers)

    #    labels_rw[labels_rw == -1] = 0

    #    labels_rw = segmentation.relabel_sequential(labels_rw)

    labels_ws = watershed(-distance, markers, mask=binary)

    labels_large = remove_small_objects(labels_ws, min_size)

    labels_clean_border = clear_border(labels_large)

    labels_from_one = relabel_sequential(labels_clean_border)

    #    plt.imshow(ndimage.morphology.binary_dilation(markers))
    #    plt.show()

    return labels_from_one[0]
Пример #30
0
def skeletonize_mitochondria(mch_channel):
    mch_collector = np.max(mch_channel, axis=0)  # TODO: check max projection v.s. sum
    skeleton_labels = np.zeros(mch_collector.shape, dtype=np.uint8)

    # thresh = np.max(mch_collector)/2.
    thresh = threshold_otsu(mch_collector)
    # use adaptative threshold? => otsu seems to be sufficient in this case

    skeleton_labels[mch_collector > thresh] = 1
    skeleton2 = skeletonize(skeleton_labels)
    skeleton, distance = medial_axis(skeleton_labels, return_distance=True)
    active_threshold = np.mean(mch_collector[skeleton_labels]) * 5

    # print active_threshold
    transform_filter = np.zeros(mch_collector.shape, dtype=np.uint8)
    transform_filter[np.logical_and(skeleton > 0, mch_collector > active_threshold)] = 1
    skeleton = transform_filter * distance

    skeleton_ma = np.ma.masked_array(skeleton, skeleton > 0)
    skeleton_convolve = ndi.convolve(skeleton_ma, np.ones((3, 3)), mode='constant', cval=0.0)
    divider_convolve = ndi.convolve(transform_filter, np.ones((3, 3)), mode='constant', cval=0.0)
    skeleton_convolve[divider_convolve > 0] = skeleton_convolve[divider_convolve > 0] / \
                                              divider_convolve[divider_convolve > 0]
    new_skeleton = np.zeros_like(skeleton)
    new_skeleton[skeleton2] = skeleton_convolve[skeleton2]
    skeleton = new_skeleton

    return skeleton_labels, mch_collector, skeleton, transform_filter
Пример #31
0
def medial_scan_hand(im=None, skeletonize=False, save_path=None):
    """
    Read in the hand image and run MAT on it then display next to original.
    Code via skimage plot_medial_transform example.

    If save_path is given, will write the result to a file.
    """
    if im is None:
        im = auto_hand_img()
    # Compute the medial axis (skeleton) and the distance transform
    skel, distance = medial_axis(im, return_distance=True)

    # Distance to the background for pixels of the skeleton
    dist_on_skel = distance * skel

    fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
    ax1.imshow(im, cmap=plt.get_cmap('gray'), interpolation='nearest')
    ax1.axis('off')
    ax2.imshow(dist_on_skel, cmap=plt.get_cmap('Spectral'), interpolation='nearest')
    ax2.contour(im, [0.5], colors='w')
    ax2.axis('off')

    fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1)
    if save_path is None:
        plt.show()
    else:
        fig.savefig(save_path)
Пример #32
0
def generate_center_med_axis_from_points(points, mask):
    '''Generate medial axis transform for a centerline
        from a list of points that the centerline will go through.
        This is used to generate all the widths along a particular centerline
    
    Parameters:
    ------------
    points: list of 2-d tuples
        List of indices that the centerline will go through. 
        Indices must be given in the order that the centerline should 
        encounter each point
    mask: array_like (cast to booleans) shape (n,m) 
        Binary image of the worm mask

    Returns:
    -----------
    traceback: list of 2-d tuples
        List of indices associated with the centerline, starting with
        one of the endpoints of the centerline and ending with the ending
        index of the centerline. 
    distances: ndarray shape(n,m)
        Distance transform from the medial axis transform of the worm mask
    '''
    skeleton, med_axis = medial_axis(mask, return_distance=True)
    traceback = generate_centerline_from_points(points, skeleton)
    centerline = generate_centerline(traceback, mask.shape)
    distances = centerline * med_axis

    return traceback, distances
Пример #33
0
def update_splines(traceback, mask):
    '''Generate center_tck and width_tck from a new traceback

    Parameters:
    ------------
    traceback: list of 2-d tuples
        List of indices associated with the centerline, starting with
        one of the endpoints of the centerline and ending with the ending
        index of the centerline. 
    mask: array_like (cast to booleans) shape (n,m) 
        Binary image of the worm mask

    Returns:
    -----------
    center_tck: parametric spline tuple
        spline tuple for spline corresponding to the centerline 
    width_tck: nonparametric spline tuple
        spline tuple corresponding to the widths along the
        centerline of the worm
    '''
    skeleton, med_axis = medial_axis(mask, return_distance=True)
    centerline = generate_centerline(traceback, mask.shape)
    distances = centerline * med_axis
    center_tck = center_spline(traceback, distances)
    width_tck = width_spline(traceback, distances)

    return center_tck, width_tck
Пример #34
0
 def __init__(self, glyph, scale_factor):
   self.glyph = glyph
   self.glyph_img = rasterize(glyph, scale=scale_factor) > 128
   h, w = self.glyph_img.shape
   self.scale_factor = scale_factor
   # Labled by connectivity
   self.labeled, n_labels = label(self.glyph_img, return_num=True)
   axis, dist = medial_axis(self.glyph_img, return_distance=True)
   # Estimated stroke width
   self.stroke_width = np.average(dist[axis]) * 2
   # Smoothed image
   smooth = gaussian(self.glyph_img, 8*scale_factor) > 0.6
   # Skeleton image
   skel = np.logical_and(
     skeletonize(smooth), dist >= self.stroke_width * 0.3)
   # Skeleton segments
   skel_segments = get_skeleton_segments(skel, 
     prune_length=self.stroke_width * 0.25)
   # Reference skeleton points
   self.skel_pts = (np.array(
     [ pt[::-1] for segment in skel_segments for pt in segment ])
     /scale_factor).astype(int)
   # Skeleton points by their belonging region labels
   self.skel_pts_by_label = [ [] for _ in range(n_labels) ]
   for pt in self.skel_pts:
     ptx = int(pt[0] * scale_factor + 0.5)
     pty = int(pt[1] * scale_factor + 0.5)
     ptx = w-1 if ptx >= w else 0 if ptx < 0 else ptx
     pty = h-1 if pty >= h else 0 if pty < 0 else pty
     pt_label = self.labeled[pty, ptx]
     self.skel_pts_by_label[pt_label-1].append(pt)
Пример #35
0
def computeSkeleton(data):
    skel, dist = medial_axis(data, return_distance=True)

    dist_on_skel = dist * skel

    print("Medial axis computed...")

    return (skel, dist, dist_on_skel)
def skeleton_image(img_array, median=False, dim=28):
    """
    Thinners the foreground of an image to one pixel
    """
    img = resquare(binarize(img_array)//255, dim)
    if median:
        return (255*medial_axis(img)).flatten()
    return (255*skeletonize(img)).flatten()
Пример #37
0
def _process_img_morph(img, threshold=.5, scale=1):
    if scale > 1:
        up_img = transform.pyramid_expand(img, upscale=scale, order=3)  # type: np.ndarray
        img = (255. * up_img).astype(img.dtype)
    img_min, img_max = img.min(), img.max()
    bin_img = (img >= img_min + (img_max - img_min) * threshold)
    skel, dist_map = morphology.medial_axis(bin_img, return_distance=True)
    return img, bin_img, skel, dist_map
Пример #38
0
def skeleton(seg):
    skel, dist = skmorph.medial_axis(seg, return_distance=True)
    node, edge, leaf = (spim.label(g, np.ones((3, 3), bool))[0] for g in skel2graph(skel))

    trim_edge = (edge != 0) & ~(skmorph.binary_dilation(node != 0, np.ones((3, 3), bool)) != 0)
    trim_edge = spim.label(trim_edge, np.ones((3, 3), bool))[0]

    leaf_edge_vals = skmorph.binary_dilation(leaf != 0, np.ones((3, 3), bool)) != 0
    leaf_edge_vals = np.unique(trim_edge[leaf_edge_vals])
    leaf_edge_vals = leaf_edge_vals[leaf_edge_vals > 0]
    leaf_edge = leaf != 0

    trim_edge = ndshm.fromndarray(trim_edge)
    leaf_edge = ndshm.fromndarray(leaf_edge)
    Parallel()(delayed(set_msk)(leaf_edge, trim_edge, l) for l in leaf_edge_vals)
    trim_edge = np.copy(trim_edge)
    leaf_edge = np.copy(leaf_edge)

    leaf_edge[(skmorph.binary_dilation(leaf_edge, np.ones((3, 3), bool)) != 0) & (edge != 0)] = True
    leaf_edge = spim.label(leaf_edge, np.ones((3, 3), bool))[0]

    leaf_edge_node = skmorph.binary_dilation(leaf_edge != 0, np.ones((3, 3), bool)) != 0
    leaf_edge_node = ((node != 0) & leaf_edge_node) | leaf_edge
    leaf_edge_node = spim.label(leaf_edge_node, np.ones((3, 3), bool))[0]

    cand_node = leaf_edge_node * (node != 0)
    cand_node = cand_node.nonzero()
    cand_node = np.transpose((leaf_edge_node[cand_node],) + cand_node + (2 * dist[cand_node],))

    cand_leaf = leaf_edge_node * (leaf != 0)
    cand_leaf = cand_leaf.nonzero()
    cand_leaf = np.transpose((leaf_edge_node[cand_leaf],) + cand_leaf)

    if len(cand_node) > 0 and len(cand_leaf) > 0:
        cand_leaf = ndshm.fromndarray(cand_leaf)
        cand_node = ndshm.fromndarray(cand_node)
        pruned = Parallel()(delayed(prune_leaves)(cand_leaf, cand_node, j) for j in np.unique(cand_node[:, 0]))
        cand_leaf = np.copy(cand_leaf)
        cand_node = np.copy(cand_node)

        pruned_ind = []
        for p in pruned:
            pruned_ind.extend(p)
        pruned_ind = tuple(np.transpose(pruned_ind))

        pruned = ~skel

        pruned = ndshm.fromndarray(pruned)
        leaf_edge = ndshm.fromndarray(leaf_edge)
        Parallel()(delayed(set_msk)(pruned, leaf_edge, l) for l in np.unique(leaf_edge[pruned_ind]))
        pruned = np.copy(pruned)
        leaf_edge = np.copy(leaf_edge)

        pruned = ~pruned
    else:
        pruned = skel

    return pruned
Пример #39
0
def get_text_image(text, font, point_size):
    text = get_text(text, font, point_size)
    io.imsave(IMAGE_FILENAME, text)
    thresh = skfilter.threshold_otsu(text)
    binary = text > thresh
    skel, distance = morphology.medial_axis(binary, return_distance=True)
    distance = distance.astype(np.uint16)
    skel = skel.astype(np.uint16)
    return skel*distance
Пример #40
0
def make_skeleton(image, mindist):
    """Return a skeletonization of the image, filtered and normalized."""
    skel, distance = morphology.medial_axis(image, return_distance=True)

    dist_on_skel = distance * skel

    dist_on_skel = filter_skeleton_distances(dist_on_skel)
    normalized = normalize_skeleton(dist_on_skel)

    return normalized
Пример #41
0
 def test_01_01_rectangle(self):
     '''Test skeletonize on a rectangle'''
     image = np.zeros((9, 15), bool)
     image[1:-1, 1:-1] = True
     #
     # The result should be four diagonals from the
     # corners, meeting in a horizontal line
     #
     expected = np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
                          [0,1,0,0,0,0,0,0,0,0,0,0,0,1,0],
                          [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
                          [0,0,0,1,0,0,0,0,0,0,0,1,0,0,0],
                          [0,0,0,0,1,1,1,1,1,1,1,0,0,0,0],
                          [0,0,0,1,0,0,0,0,0,0,0,1,0,0,0],
                          [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
                          [0,1,0,0,0,0,0,0,0,0,0,0,0,1,0],
                          [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]], bool)
     result = medial_axis(image)
     assert np.all(result == expected)
     result, distance = medial_axis(image, return_distance=True)
     assert distance.max() == 4
Пример #42
0
def loadImage(filename):
    global im
    global distanceImage
    global lapImage
    global skeleton
    global resultImage
    global im1
    global skimage

    im = np.asarray(Image.open(filename))
    distanceImage, lapImage, resultImage = medial(im)
    skeleton = lapImage < 0
    im1 = ax.imshow(im, cmap = "gray", interpolation="nearest")
    skimage = medial_axis(im, return_distance=False)
    lapImage = lapImage - np.max(lapImage)
Пример #43
0
 def test_01_02_hole(self):
     '''Test skeletonize on a rectangle with a hole in the middle'''
     image = np.zeros((9, 15), bool)
     image[1:-1, 1:-1] = True
     image[4, 4:-4] = False
     expected = np.array([[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0],
                          [0,1,0,0,0,0,0,0,0,0,0,0,0,1,0],
                          [0,0,1,1,1,1,1,1,1,1,1,1,1,0,0],
                          [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
                          [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
                          [0,0,1,0,0,0,0,0,0,0,0,0,1,0,0],
                          [0,0,1,1,1,1,1,1,1,1,1,1,1,0,0],
                          [0,1,0,0,0,0,0,0,0,0,0,0,0,1,0],
                          [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]],bool)
     result = medial_axis(image)
     assert np.all(result == expected)
Пример #44
0
    def __transform(self):
        img_gray = io.imread(self.image_path, True)
        #Aplicar otsu para binarizar a imagem
        img_otsu = filter.threshold_otsu(img_gray) 
        self.__img = img_gray < img_otsu

        # Procura contornos da imagem binarizada
        self.__contours = measure.find_contours(self.__img, 0.5)
        
        arclen=0.0
        for n, contour in enumerate(self.__contours):
            arclenTemp=0.0
            for indice, valor in enumerate(contour):
                if indice > 0:
                    d1 = math.fabs(round(valor[0]) - round(contour[indice-1,0]))
                    d2 = math.fabs(round(valor[1]) - round(contour[indice-1,1]))
                    if d1+d2>1.0:
                        arclenTemp+=math.sqrt(2)
                    elif d1+d2 == 1:
                        arclenTemp+=1
            if arclenTemp > arclen:
                arclen = arclenTemp
                bestn = n
        
        #Transforma a lista contours[bestn] em uma matriz[n,2]
        aux = np.asarray(self.__contours[bestn])

        #---------------------------  Curvatura --------------
        vetor = [] #vetor que irá receber as curvaturas k(t)
        
        for i in range(len(aux)-2):    #Percorrer ate -2 para não pegar elementos inexistentes
            #---------------------------  Curvatura --------------
            #Inverter as posições em relação a fórmula pois o x esta no lugar do y
            b1 =  ( (aux[i-2,1]+aux[i+2,1]) + (2*(aux[i-1,1] + aux[i+1,1])) - (6*aux[i,1]) ) / 12
            b2 = ( (aux[i-2,0]+aux[i+2,0]) + (2*(aux[i-1,0] + aux[i+1,0])) - (6*aux[i,0]) ) / 12
            c1 =  ( (aux[i+2,1]-aux[i-2,1]) + (4*(aux[i+1,1] - aux[i-1,1])) ) / 12
            c2 = ( (aux[i+2,0]-aux[i-2,0]) + (4*(aux[i+1,0] - aux[i-1,0])) ) / 12

            k =  (2*(c1*b1 - c2*b2)) / ((c1**2 + c2**2)**(3/2))

            vetor.append(k) #append: insere objeto no final da lista

        self.__media_curvatura = np.mean(np.abs(vetor))
        self.__comprimento_arco = arclen
        self.__area = np.sum(self.__img)
        self.__esqueleto_pixel = np.sum(morphology.medial_axis(self.__img))
        self.__bestn = bestn
Пример #45
0
def image_features_morphology(img, maxPixel, num_features,imageSize):
     # X is the feature vector with one row of features per image
     #  consisting of the pixel values a, num_featuresnd our metric

     X=np.zeros(num_features, dtype=float)
     image = resize(img, (maxPixel, maxPixel))

     # Compute the medial axis (skeleton) and the distance transform
     skel, distance = medial_axis(image, return_distance=True)

     # Distance to the background for pixels of the skeleton
     dist_on_skel = distance * skel

    # Store the rescaled image pixels
     X[0:imageSize] = np.reshape(dist_on_skel,(1, imageSize))

     return X
Пример #46
0
def get_skeleton_of_maze(image_to_analyze, image_to_draw_on=None,
                         use_medial_axis=True, invert_threshold=False,
                         locate_critical_points=True, resize_ratio=1):
    """
    Computes, returns, and potentially displays the morphological skeleton of the given binary image.
    :param image_to_analyze: Image to manipulate in search of results.
    :param image_to_draw_on: Image passed in that is to be used for drawing the results of analysis.
    :param use_medial_axis: Whether to use an alternative method for finding the skeleton that
                              allows the computation of the critical points in the image.
    :param invert_threshold: Whether the threshold should be inverted before the skeleton is located
    :param locate_critical_points: Whether to find and draw critical points on the skeleton.
    :param resize_ratio: What ratio to rescale outgoing results to before they're displayed.
    :return: The skeleton of the image, and the critical points (If you chose to try to find them)
    """
    result = []
    # Invert our thresholded image since this will skeletonize white areas
    if invert_threshold:
        image_to_analyze = cv2.bitwise_not(image_to_analyze)
    image_to_analyze = cv2.bitwise_not(image_to_analyze)

    if use_medial_axis or locate_critical_points:
        # http://scikit-image.org/docs/dev/auto_examples/plot_medial_transform.html
        # In short, allows us to find the distance to areas on the skeleton.
        # This information can be used to find critical points in the skeleton, theoretically.
        path_skeleton, distance = medial_axis(image_to_analyze, return_distance=True)
        distance_on_skeleton = path_skeleton * distance
        path_skeleton = img_as_ubyte(path_skeleton)
        result.append(path_skeleton)

        if locate_critical_points:
            critical_points = find_critical_points(distance_on_skeleton, number_of_points=10,
                                                   minimum_thickness=50, edge_width=20,
                                                   image_to_draw_on=image_to_draw_on)
            result.append(critical_points)
    else:
        skeleton = skeletonize(image_to_analyze/255)
        path_skeleton = np.array(skeleton*255, np.uint8)
        result.append(path_skeleton)

    if image_to_draw_on is not None:
        path_skeleton_temp = cv2.cvtColor(path_skeleton, cv2.COLOR_GRAY2BGR)
        superimposed_skeleton = cv2.add(image_to_draw_on, path_skeleton_temp)
        display("Skeleton", superimposed_skeleton, resize_ratio)
    return result
Пример #47
0
def get_skeleton(img):
    
    rows, cols = img.shape
    
    for r in xrange(rows):
        img[r][0] = 0
        img[r][cols - 1] = 0
    for c in xrange(cols):
        img[0][c] = 0
        img[rows - 1][c] = 0
    
    dst = np.asarray(map(lambda row:
                         np.asarray(map(lambda x: 1 if x == 255 else 0,row)),
                     img))
    dst, _ = morphology.medial_axis(dst, return_distance=True)
    dst = np.asarray(map(lambda row:
                         np.asarray(map(lambda x: 255 if x else 0,row)),
                     dst))
    return dst
Пример #48
0
    def __transform(self):
        img_gray = io.imread(self.__img_path, True)
        
        thresh = filter.threshold_otsu(img_gray)
        data = img_gray < thresh
        s1, self.__distancia = morphology.medial_axis(data, return_distance=True)
        self.__s2 = s1

        for i in range(len(s1)):
            for j in range(len(s1)):
                if (s1[i,j] <> False): #Percorre o esqueleto da imagem
                    x, y,val = circle_perimeter_aa(i, j, int(self.__distancia[i,j]))

                    #desenha um circulo ao redor do pixel do esqueleto
                    #i,j coordenadas do centro  -- int(distance[i,j]=raio do circulo)
                    #x,y = índices dos pixels  ---- val = intensidade

                    #Define quais circulos devem ficar de acordo com o raio
                    if (int(self.__distancia[i,j]) > 0):
                        self.__s2[x, y] = True
                    else:
                        self.__s2[x, y] = False
                else:
                    self.__s2[i, j] = False        
Пример #49
0
    bub = BubbleFinder2D(mom0, sigma=80. * beam.jtok(hi_freq) / 1000.)

    # fils = fil_finder_2D(mom0.value, mom0.header, 10, distance=0.84e6)
    # fils.mask = ~(bub.mask.copy())
    # fils.medskel()
    # fils.analyze_skeletons()
    # # So at least on of the radial profiles fails. BUT the second fit is to a
    # # skeleton that is essentially the entire disk, so plot without interactivity
    # # and save the plot and the parameters shown in verbose mode.
    # p.ioff()
    # fils.find_widths(verbose=True, max_distance=500, auto_cut=False, try_nonparam=False)

    # Fit Parameters: [ 541.31726502  129.85351117  180.0710914   304.01262168
    # Fit Errors: [ 0.89151974  0.48394493  0.27313627  1.1462345 ]

    skeleton = medial_axis(~bub.mask)

    # Overplot the skeleton on the moment0
    ax = p.subplot(111, projection=mom0.wcs)
    ax.imshow(mom0.value, origin='lower')
    ax.contour(skeleton, colors='r')
    p.draw()

    p.savefig(paper1_figures_path("moment0_w_skeletons.pdf"), rasterize=True)
    p.savefig(paper1_figures_path("moment0_w_skeletons.png"))

    # raw_input("Next plot?")
    p.clf()

    pixscale = \
        mom0.header['CDELT2'] * (np.pi / 180.) * gal.distance.to(u.pc).value
Пример #50
0
 def test_narrow_image(self):
     """Test skeletonize on a 1-pixel thin strip"""
     image = np.zeros((1, 5), bool)
     image[:, 1:-1] = True
     result = medial_axis(image)
     assert np.all(result == image)
Пример #51
0
images = [f for f in listdir(figureDir) if isfile(join(figureDir, f))]
# http://stackoverflow.com/questions/7304117/split-filenames-with-python
imageNames = [fname.rsplit('.', 1)[0].capitalize() for fname in os.listdir(figureDir)]
filenames = dict(zip(imageNames[0::1], images[0::1]))

fig = plt.figure()
ax = fig.add_subplot(111)
fig.subplots_adjust(left=0.25, bottom=0.25)
min0 = 0
max0 = 25000

im = np.asarray(Image.open(os.path.join(figureDir,filenames[imageNames[0]])))
im1 = ax.imshow(im, cmap = "gray")
distanceImage, lapImage, resultImage = medial(im)
skeleton = lapImage < 0
skimage = medial_axis(im, return_distance=False)
lapImage = lapImage - np.max(lapImage)
# distanceImage = distance_transform_edt(im)
# lapImage = laplace(distanceImage, mode="constant")
# resultImage = np.logical_and(np.logical_not(skeleton), im )
def loadImage(filename):
    global im
    global distanceImage
    global lapImage
    global skeleton
    global resultImage
    global im1
    global skimage

    im = np.asarray(Image.open(filename))
    distanceImage, lapImage, resultImage = medial(im)
Пример #52
0
# objects. As the function ``medial_axis`` returns the distance transform in
# addition to the medial axis (with the keyword argument ``return_distance=True``),
# it is possible to compute the distance to the background for all points of
# the medial axis with this function. This gives an estimate of the local width
# of the objects.
#
# For a skeleton with fewer branches, ``skeletonize`` or ``skeletonize_3d``
# must be preferred.

from skimage.morphology import medial_axis, skeletonize, skeletonize_3d

# Generate the data
data = binary_blobs(200, blob_size_fraction=.2, volume_fraction=.35, seed=1)

# Compute the medial axis (skeleton) and the distance transform
skel, distance = medial_axis(data, return_distance=True)

# Compare with other skeletonization algorithms
skeleton = skeletonize(data)
skeleton3d = skeletonize_3d(data)

# Distance to the background for pixels of the skeleton
dist_on_skel = distance * skel

fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True,
                         subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()

ax[0].imshow(data, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')
Пример #53
0
import numpy as np
from skimage import io
from skimage import morphology
import matplotlib.pyplot as plt

io.use_plugin('matplotlib')

# test image from Matlab bwmorph('skel') demo
bw = io.imread('circles.png') > 0

sk0 = morphology.skeletonize(bw)
sk1 = morphology.medial_axis(bw).astype(np.uint8)

f, (ax0, ax1) = plt.subplots(1, 2)

ax0.imshow(bw + sk0, cmap=plt.cm.gray, interpolation='nearest')
ax0.set_title('Skeletonize')

ax1.imshow(bw + sk1, cmap=plt.cm.gray, interpolation='nearest')
ax1.set_title('Medial axis')

plt.show()
Пример #54
0
def process_file(img_id, par, par2, vgg_big_path, vgg_small_path, linknet_small_path, small_res_file_path, inc_file_path, 
                 vgg_smallest_file_path, inc_smallest_file_path, res_smallest_file_path, inc3_520_file_path, inc_v2_520_file_path,
                 linknet_big_file_path, linknet_520_file_path,
                 vgg_big_path_1, vgg_smallest_file_path_1, 
                 inc_smallest_file_path_1, res_smallest_file_path_1, inc3_520_file_path_1, inc_v2_520_file_path_1, 
                  linknet_big_file_path_1, linknet_520_file_path_1, save_to=None):
    res_rows = []
    
    if vgg_small_path is None:
        msk = np.zeros((1300, 1300))
    else:
        msk = cv2.imread(vgg_small_path, cv2.IMREAD_UNCHANGED)
        msk = cv2.resize(msk, (1300, 1300))
    if linknet_small_path is None:
        msk2 = np.zeros((1300, 1300))
    else:
        msk2 = cv2.imread(linknet_small_path, cv2.IMREAD_UNCHANGED)
        msk2 = cv2.resize(msk2, (1300, 1300))
    if vgg_big_path is None:
        msk3 = np.zeros((1300, 1300))
        msk3_1 = np.zeros((1300, 1300))
    else:
        msk3 =  cv2.imread(vgg_big_path, cv2.IMREAD_UNCHANGED)
        msk3_1 =  cv2.imread(vgg_big_path_1, cv2.IMREAD_UNCHANGED)
    if small_res_file_path is None:
        res_msk = np.zeros((1300, 1300))
    else:
        res_msk = cv2.imread(small_res_file_path, cv2.IMREAD_UNCHANGED)
        res_msk = cv2.resize(res_msk, (1300, 1300))
    if inc_file_path is None:
        inc_msk = np.zeros((1300, 1300))
    else:
        inc_msk = cv2.imread(inc_file_path, cv2.IMREAD_UNCHANGED)
        inc_msk = cv2.resize(inc_msk, (1300, 1300))
    if vgg_smallest_file_path is None:
        vgg_smlst_msk = np.zeros((1300, 1300))
        vgg_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        vgg_smlst_msk = cv2.imread(vgg_smallest_file_path, cv2.IMREAD_UNCHANGED)
        vgg_smlst_msk = cv2.resize(vgg_smlst_msk, (1300, 1300))
        vgg_smlst_msk_1 = cv2.imread(vgg_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        vgg_smlst_msk_1 = cv2.resize(vgg_smlst_msk_1, (1300, 1300))
    if inc_smallest_file_path is None:
        inc_smlst_msk = np.zeros((1300, 1300))
        inc_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        inc_smlst_msk = cv2.imread(inc_smallest_file_path, cv2.IMREAD_UNCHANGED)
        inc_smlst_msk = cv2.resize(inc_smlst_msk, (1300, 1300))
        inc_smlst_msk_1 = cv2.imread(inc_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        inc_smlst_msk_1 = cv2.resize(inc_smlst_msk_1, (1300, 1300))
    if res_smallest_file_path is None:
        res_smlst_msk = np.zeros((1300, 1300))
        res_smlst_msk_1 = np.zeros((1300, 1300))
    else:
        res_smlst_msk = cv2.imread(res_smallest_file_path, cv2.IMREAD_UNCHANGED)
        res_smlst_msk = cv2.resize(res_smlst_msk, (1300, 1300))
        res_smlst_msk_1 = cv2.imread(res_smallest_file_path_1, cv2.IMREAD_UNCHANGED)
        res_smlst_msk_1 = cv2.resize(res_smlst_msk_1, (1300, 1300))
    if inc3_520_file_path is None:
        inc3_520_msk = np.zeros((1300, 1300))
        inc3_520_msk_1 = np.zeros((1300, 1300))
    else:
        inc3_520_msk = cv2.imread(inc3_520_file_path, cv2.IMREAD_UNCHANGED)
        inc3_520_msk = cv2.resize(inc3_520_msk, (1300, 1300))
        inc3_520_msk_1 = cv2.imread(inc3_520_file_path_1, cv2.IMREAD_UNCHANGED)
        inc3_520_msk_1 = cv2.resize(inc3_520_msk_1, (1300, 1300))
    if inc_v2_520_file_path is None:
        inc_v2_520_msk = np.zeros((1300, 1300))
        inc_v2_520_msk_1 = np.zeros((1300, 1300))
    else:
        inc_v2_520_msk = cv2.imread(inc_v2_520_file_path, cv2.IMREAD_UNCHANGED)
        inc_v2_520_msk = cv2.resize(inc_v2_520_msk, (1300, 1300))
        inc_v2_520_msk_1 = cv2.imread(inc_v2_520_file_path_1, cv2.IMREAD_UNCHANGED)
        inc_v2_520_msk_1 = cv2.resize(inc_v2_520_msk_1, (1300, 1300))
    if linknet_big_file_path is None:
        link_big_msk = np.zeros((1300, 1300))
        link_big_msk_1 = np.zeros((1300, 1300))
    else:
        link_big_msk = cv2.imread(linknet_big_file_path, cv2.IMREAD_UNCHANGED)
        link_big_msk_1 = cv2.imread(linknet_big_file_path_1, cv2.IMREAD_UNCHANGED)
    if linknet_520_file_path is None:
        link_520_msk = np.zeros((1300, 1300))
        link_520_msk_1 = np.zeros((1300, 1300))
    else:
        link_520_msk = cv2.imread(linknet_520_file_path, cv2.IMREAD_UNCHANGED)
        link_520_msk = cv2.resize(link_520_msk, (1300, 1300))
        link_520_msk_1 = cv2.imread(linknet_520_file_path_1, cv2.IMREAD_UNCHANGED)
        link_520_msk_1 = cv2.resize(link_520_msk_1, (1300, 1300))
    
    msk3 = (msk3 * 0.5 + msk3_1 * 0.5)
    inc_smlst_msk = (inc_smlst_msk * 0.5 + inc_smlst_msk_1 * 0.5)
    vgg_smlst_msk = (vgg_smlst_msk * 0.5 + vgg_smlst_msk_1 * 0.5)
    res_smlst_msk = (res_smlst_msk * 0.5 + res_smlst_msk_1 * 0.5)
    inc3_520_msk = (inc3_520_msk * 0.5 + inc3_520_msk_1 * 0.5)
    inc_v2_520_msk = (inc_v2_520_msk * 0.5 + inc_v2_520_msk_1 * 0.5)
    link_big_msk = (link_big_msk * 0.5 + link_big_msk_1 * 0.5)
    link_520_msk = (link_520_msk * 0.5 + link_520_msk_1 * 0.5)
    
    coef = []
    tot_sum = par[:12].sum()
    for i in range(12):
        coef.append(par[i] / tot_sum)
    msk = (msk * coef[0] + msk2 * coef[1] + msk3 * coef[2] + res_msk * coef[3] + inc_msk * coef[4]
             + vgg_smlst_msk * coef[5]  + inc_smlst_msk * coef[6] + res_smlst_msk * coef[7] 
             + inc3_520_msk * coef[8] + inc_v2_520_msk * coef[9] + link_big_msk * coef[10] + link_520_msk * coef[11])
    msk = msk.astype('uint8')
    if save_to is not None:
        cv2.imwrite(save_to, msk, [cv2.IMWRITE_PNG_COMPRESSION, 9])

    msk2 = np.lib.pad(msk, ((22, 22), (22, 22)), 'symmetric')
    
    thr = par[12]
        
    msk2 = 1 * (msk2 > thr)
    msk2 = msk2.astype(np.uint8)
    
    if par2[0] > 0:
        msk2 = dilation(msk2, square(par2[0]))
    if par2[1] > 0:
        msk2 = erosion(msk2, square(par2[1]))
        
    if 'Shanghai' in img_id:
        skeleton = medial_axis(msk2)
    else:
        skeleton = skeletonize_3d(msk2)
    skeleton = skeleton[22:1322, 22:1322]
    
    lbl0 = label(skeleton)
    props0 = regionprops(lbl0)
    
    cnt = 0
    crosses = []
    for x in range(1300):
        for y in range(1300):
            if skeleton[y, x] == 1:
                if skeleton[max(0, y-1):min(1300, y+2), max(0, x-1):min(1300, x+2)].sum() > 3:
                    cnt += 1
                    crss = []
                    crss.append((x, y))
                    for y0 in range(max(0, y-1), min(1300, y+2)):
                        for x0 in range(max(0, x-1), min(1300, x+2)):
                            if x == x0 and y == y0:
                                continue
                            if skeleton[max(0, y0-1):min(1300, y0+2), max(0, x0-1):min(1300, x0+2)].sum() > 3:
                                crss.append((x0, y0))
                    crosses.append(crss)
    cross_hashes = []
    for crss in crosses:
        crss_hash = set([])
        for x0, y0 in crss:
            crss_hash.add(point_hash(x0, y0))
            skeleton[y0, x0] = 0
        cross_hashes.append(crss_hash)
 
    new_crosses = []
    i = 0
    while i < len(crosses):
        new_hashes = set([])
        new_hashes.update(cross_hashes[i])
        new_crss = crosses[i][:]
        fl = True
        while fl:
            fl = False
            j = i + 1
            while j < len(crosses):
                if len(new_hashes.intersection(cross_hashes[j])) > 0:
                    new_hashes.update(cross_hashes[j])
                    new_crss.extend(crosses[j])
                    cross_hashes.pop(j)
                    crosses.pop(j)
                    fl = True
                    break
                j += 1
        mean_p = np.asarray(new_crss).mean(axis=0).astype('int')
        if len(new_crss) > 1:
            t = KDTree(new_crss)
            mean_p = new_crss[t.query(mean_p[np.newaxis, :])[1][0][0]]
        new_crosses.append([(mean_p[0], mean_p[1])] + new_crss)
        i += 1
    crosses = new_crosses
    
    lbl = label(skeleton)
    props = regionprops(lbl)
    
    connected_roads = []
    connected_crosses = [set([]) for p in props]
    for i in range(len(crosses)):
        rds = set([])
        for j in range(len(crosses[i])):
            x, y = crosses[i][j]
            for y0 in range(max(0, y-1), min(1300, y+2)):
                for x0 in range(max(0, x-1), min(1300, x+2)):
                    if lbl[y0, x0] > 0:
                        rds.add(lbl[y0, x0])
                        connected_crosses[lbl[y0, x0]-1].add(i)
        connected_roads.append(rds)
    
    res_roads = []
    
    tot_dist_min = par2[2]
    coords_min = par2[3]
        
    for i in range(len(props)):
        coords = props[i].coords
        crss = list(connected_crosses[i])
        tot_dist = props0[lbl0[coords[0][0], coords[0][1]]-1].area

        if (tot_dist < tot_dist_min) or (coords.shape[0] < coords_min and len(crss) < 2):
            continue
        if coords.shape[0] == 1:
            coords = np.asarray([coords[0], coords[0]])
        else:
            coords = get_ordered_coords(lbl, i+1, coords)
        for j in range(len(crss)):
            x, y = crosses[crss[j]][0]
            d1 = abs(coords[0][0] - y) + abs(coords[0][1] - x)
            d2 = abs(coords[-1][0] - y) + abs(coords[-1][1] - x)
            if d1 < d2:
                coords[0][0] = y
                coords[0][1] = x
            else:
                coords[-1][0] = y
                coords[-1][1] = x
        coords_approx = approximate_polygon(coords, 1.5)
        res_roads.append(coords_approx)
        
    hashes = set([])
    final_res_roads = []
    for r in res_roads:
        if r.shape[0] > 2:
            final_res_roads.append(r)
            for i in range(1, r.shape[0]):
                p1 = r[i-1]
                p2 = r[i]
                h1 = pair_hash(p1, p2)
                h2 = pair_hash(p2, p1)
                hashes.add(h1)
                hashes.add(h2)
                            
    for r in res_roads:
        if r.shape[0] == 2:
            p1 = r[0]
            p2 = r[1]
            h1 = pair_hash(p1, p2)
            h2 = pair_hash(p2, p1)
            if not (h1 in hashes or h2 in hashes):
                final_res_roads.append(r)
                hashes.add(h1)
                hashes.add(h2)
        
    end_points = {}
    for r in res_roads:
        h = point_hash(r[0, 0], r[0, 1])
        if not (h in end_points.keys()):
            end_points[h] = 0
        end_points[h] = end_points[h] + 1
        h = point_hash(r[-1, 0], r[-1, 1])
        if not (h in end_points.keys()):
            end_points[h] = 0
        end_points[h] = end_points[h] + 1
    
    road_msk = np.zeros((1300, 1300), dtype=np.int32)
    road_msk = road_msk.copy()
    thickness = 1
    for j in range(len(final_res_roads)):
        l = final_res_roads[j]
        for i in range(len(l) - 1):
            cv2.line(road_msk, (int(l[i, 1]), int(l[i, 0])), (int(l[i+1, 1]), int(l[i+1, 0])), j+1, thickness)
            
    connect_dist = par2[4]

    min_prob = par2[5]
    angles_to_check = [0, radians(5), radians(-5), radians(10), radians(-10), radians(15), radians(-15)]
    if 'Paris' in img_id or 'Vegas' in img_id:
        angles_to_check += [radians(20), radians(-20), radians(25), radians(-25)]
    
    add_dist = par2[6]
    add_dist2 = par2[7]
    
    con_r = par2[8]

    for i in range(len(final_res_roads)):
        h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][1]
            p2 = final_res_roads[i][0]            
            p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)          
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
        h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][-2]
            p2 = final_res_roads[i][-1]
            p3 = try_connect(p1, p2, 0, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
                        
    for i in range(len(final_res_roads)):
        h = point_hash(final_res_roads[i][0, 0], final_res_roads[i][0, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][1]
            p2 = final_res_roads[i][0]
            p3 = None
            for a in angles_to_check:
                p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
                if p3 is not None:
                    break
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)          
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
            else:
                p3 = get_next_point(p1, p2, add_dist)
                if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297):
                    p3 = get_next_point(p1, p2, add_dist2)
                if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0):
                    h1 = pair_hash(p2, p3)
                    h2 = pair_hash(p3, p2)
                    if not (h1 in hashes or h2 in hashes):
                        final_res_roads[i] = np.vstack((p3, final_res_roads[i]))
                        hashes.add(h1)
                        hashes.add(h2)
                        tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                        tmp_road_msk = tmp_road_msk.copy()
                        cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                        road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                        road_msk = road_msk.copy()
                        end_points[point_hash(p3[0], p3[1])] = 2
                        
        h = point_hash(final_res_roads[i][-1, 0], final_res_roads[i][-1, 1])
        if end_points[h] == 1:
            p1 = final_res_roads[i][-2]
            p2 = final_res_roads[i][-1]
            p3 = None
            for a in angles_to_check:
                p3 = try_connect(p1, p2, a, connect_dist, road_msk, min_prob, msk, final_res_roads, con_r)
                if p3 is not None:
                    break
            if p3 is not None:
                h1 = pair_hash(p2, p3)
                h2 = pair_hash(p3, p2)
                if not (h1 in hashes or h2 in hashes):
                    r_id = road_msk[p3[0], p3[1]] - 1
                    final_res_roads[r_id], new_hashes = inject_point(final_res_roads[r_id], p3)
                    hashes.update(new_hashes)
                    tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                    tmp_road_msk = tmp_road_msk.copy()
                    cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                    road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                    road_msk = road_msk.copy()
                    final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                    hashes.add(h1)
                    hashes.add(h2)
                    end_points[point_hash(p3[0], p3[1])] = 2
            else:
                p3 = get_next_point(p1, p2, add_dist)
                if not (p3[0] < 2 or p3[1] < 2 or p3[0] > 1297 or p3[1] > 1297):
                    p3 = get_next_point(p1, p2, add_dist2)
                if (p3[0] != p2[0] or p3[1] != p2[1]) and (road_msk[p3[0], p3[1]] == 0):
                    h1 = pair_hash(p2, p3)
                    h2 = pair_hash(p3, p2)
                    if not (h1 in hashes or h2 in hashes):
                        final_res_roads[i] = np.vstack((final_res_roads[i], p3))
                        hashes.add(h1)
                        hashes.add(h2)
                        tmp_road_msk = np.zeros((1300, 1300), dtype=np.int32)
                        tmp_road_msk = tmp_road_msk.copy()
                        cv2.line(tmp_road_msk, (p2[1], p2[0]), (p3[1], p3[0]), i+1, thickness)
                        road_msk[road_msk == 0] = tmp_road_msk[road_msk == 0]
                        road_msk = road_msk.copy()
                        end_points[point_hash(p3[0], p3[1])] = 2
            
    lines = [LineString(r[:, ::-1]) for r in final_res_roads]

    if len(lines) == 0:
        res_rows.append({'ImageId': img_id, 'WKT_Pix': 'LINESTRING EMPTY'})
    else:
        for l in lines:
            res_rows.append({'ImageId': img_id, 'WKT_Pix': dumps(l, rounding_precision=0)})   
    return res_rows
def distance_transform(image):
    from skimage.morphology import medial_axis

    _, dist = medial_axis(image, return_distance=True)

    return dist
def cell_boundaries_detector(data_iterator,
                             metadata,
                             show_progress=False,
                             parameters={}):
    """
    Find cell boundary in bright field microscopy image.

    Parameters
    ----------
    data_iterator : python iterator
        To iterate over data.
    metadata : dict
        Metadata to scale detected peaks and parameters.
    show_progress : bool (default: False)
        Print progress bar during detection.
    verbose : bool (default: True)
        Display informations during detection.
    parameters : dict
        object_height : float
            Typical size of the object in um.
        minimal_area : float
            Typical area of the object in um^2.

    Returns
    ------()
    shapes : :class:`pd.DataFrame`
        Contains cell boundary properties for each time_stamp
    """

    _parameters = DEFAULT_PARAMETERS.copy()
    _parameters.update(parameters)
    parameters = _parameters

    # Load parameters
    sigma = parameters['object_height'] / metadata['PhysicalSizeZ']
    minimal_area = parameters['minimal_area'] / metadata['PhysicalSizeX']

    sizeX = metadata['SizeX']
    sizeY = metadata['SizeY']

    # calculate the correlation image from the z-stack
    cellprop = []
    t_tot = metadata['SizeT']
    for t, imt in enumerate(data_iterator):

        if show_progress:
            p = int(float(t + 1) / t_tot * 100.)
            print_progress(p)

        if np.any(imt) != 0:
            corr = np.zeros((sizeY, sizeX))

            for y, x in np.ndindex(sizeY, sizeX):
                Iz = imt[:, y, x]
                z = np.array(range(len(Iz)))
                zf = len(Iz) / 2
                corr[y, x] = integrate.simps(Iz[z] * (z - zf) * np.exp(-(zf - z) ** 2 /
                                             (2 * sigma ** 2)), z)

            # create binary mask of the correlation image
            thresh = threshold_otsu(corr)
            mask = corr > thresh

            area = 0
            n = 2

            prevarea = None
            prevcellprop = None

            # un seuil pas trop petit au cas où il resterait des petits objets
            # dans l'image
            while area < minimal_area and prevarea != area:
                tophat = binary_closing(mask, square(n))
                n += 1
                skel = medial_axis(tophat)
                skel = (skel - 1) * (-1)
                cleared = clear_border(skel)
                labelized = label(cleared, 8, 0) + 1

                # add cell characteristic in the cellprop list
                if np.any(labelized):
                    prevcellprop = regionprops(
                        labelized, intensity_image=corr)[0]

                prevarea = area
                if prevcellprop:
                    area = prevcellprop['area']

            if prevcellprop:
                cellprop.append(prevcellprop)

            else:
                if len(cellprop) >= 1:
                    cellprop.append(cellprop[-1])
                else:
                    cellprop.append(None)
        else:
                if len(cellprop) >= 1:
                    cellprop.append(cellprop[-1])
                else:
                    cellprop.append(None)

    print_progress(-1)

    # class cell morphology in time in the props Dataframe (time, centroid X,
    # centroid Y, ...)
    listprop = ['centroid_x', 'centroid_y', 'orientation', 'major_axis', 'minor_axis']
    cell_Prop = np.zeros((len(listprop) + 1, metadata["SizeT"]))

    for i in range(metadata["SizeT"]):
        if cellprop[i]:
            cell_Prop[0, i] = i
            cell_Prop[1, i] = cellprop[i]['centroid'][0] * metadata['PhysicalSizeX']
            cell_Prop[2, i] = cellprop[i]['centroid'][1] * metadata['PhysicalSizeX']
            cell_Prop[3, i] = cellprop[i]['orientation']
            cell_Prop[4, i] = cellprop[i]['major_axis_length'] * metadata['PhysicalSizeX']
            cell_Prop[5, i] = cellprop[i]['minor_axis_length'] * metadata['PhysicalSizeX']

    cell_Prop = cell_Prop.T
    props = pd.DataFrame(cell_Prop, columns=['t_stamp'] + listprop)
    props = props.set_index('t_stamp')
    props['t'] = props.index.get_level_values('t_stamp') * metadata['TimeIncrement']
    props = props.astype(np.float)

    if np.all(props == 0):
        return pd.DataFrame([])
    else:
        return props
Пример #57
0
def get_skeletons(image):
    # Compute the medial axis (skeleton) and the distance transform
    skel, distance = medial_axis(data, return_distance=True)
    dist_on_skel = distance * skel
    return dist_on_skel
Пример #58
0
 def test_00_01_zeros_masked(self):
     '''Test skeletonize on an array that is completely masked'''
     result = medial_axis(np.zeros((10, 10), bool),
                                np.zeros((10, 10), bool))
     assert np.all(result == False)
Пример #59
0
 def test_00_00_zeros(self):
     '''Test skeletonize on an array of all zeros'''
     result = medial_axis(np.zeros((10, 10), bool))
     assert np.all(result == False)