Exemplo n.º 1
0
def evaluate(img_out, img_GT, do_map: bool = False):
    """
    Evaluate is a function which measures how good the image has been segmented.
    The criterium is imposed by the exercise.

    Parameters
    ----------
    img_out: np.ndarray<float||int> (square)
        The input image to evaluate.
    img_GT: np.ndarray<float||int> (square - same size as img_out)
        The ground-truth image to compare to img_out.
    do_map: bool, default is False
        if do_map is true, return the false positive and false negative images.

    Returns
    -------
    float 
        The accuracy: the part of real positives in the found positives.
    float
        The recall: the part of found positives in the total number of positives.
    float
        The F1 score. Defined by 2/(1/Acc+1/Rec).
    np.ndarray<int>
        The skeleton of the proposed image.
    np.ndarray<int>
        The skeleton of the ground truth image.
    np.ndarray<int>
        The map of false positives. Only if do_map is True.
    np.ndarray<int>
        The map of false negatives. Only if do_map is True.
    """
    GT_skel = thin(img_GT, max_iter=15)
    img_out_skel = thin(img_out, max_iter=15)

    TP = np.sum(img_GT & img_out)  # Vrais positifs
    if not do_map:
        FP = np.sum(img_out_skel & ~img_GT)  # Faux positifs (relaxes)
        FN = np.sum(GT_skel & ~img_out)  # Faux negatifs (relaxes)
    else:
        FP_map = img_out_skel & ~img_GT
        FN_map = GT_skel & ~img_out
        FP = np.sum(FP_map)
        FN = np.sum(FN_map)

    ACCU = TP / (TP + FP)  # Precision
    RECALL = TP / (TP + FN)  # Rappel

    if TP != 0:
        # F1 score - same weight for both measures
        F1 = 2 / (1/RECALL + 1/ACCU)
    else:
        F1 = 0
        print("Erreur lors du script. Résultat incohérent.")

    if not do_map:
        return ACCU, RECALL, F1, img_out_skel, GT_skel
    else:
        return ACCU, RECALL, F1, img_out_skel, GT_skel, FP_map, FN_map
def thin_fringes(image, inverse=False):
    if inverse:
        # If we want to trace the inverse, we invert the image with some
        # simple maths
        print("Thinning dark fringes...")
        return thin(-1*(image-1), max_iter=100)
    else:
        print("Thinning bright fringes...")
        return thin(image, max_iter=100)
Exemplo n.º 3
0
    def step_last(self):
        # Assign attributes to local variables for convenience.
        u = self._u
        mask = thin(
            find_boundaries(gvoronoi(label(u, connectivity=1)), mode='inner'))

        if u is None:
            raise ValueError(
                "the levelset function is not set (use set_levelset)")

        data = self.data

        # Determine c0 and c1.
        inside = u > 0
        outside = u <= 0
        c0 = data[outside].sum() / float(outside.sum())
        c1 = data[inside].sum() / float(inside.sum())

        # Image attachment.
        dres = np.array(np.gradient(u))
        abs_dres = np.abs(dres).sum(0)
        aux = abs_dres * (self.lambda1 * (data - c1)**2 - self.lambda2 *
                          (data - c0)**2)

        res = np.copy(u)
        res[aux < 0] = 1
        res[aux > 0] = 0

        # Smoothing.
        for i in range(self.smoothing):
            res = curvop(res)
        res[mask] = 0
        self._u = res
Exemplo n.º 4
0
    def processBinarization(self, algorithm=ImageAlgorithm.SAUVOLA):
        if algorithm == ImageAlgorithm.SAUVOLA:
            image = skimage.io.imread(fname=self.pathImg, as_gray=True)
            thresh_sauvola = threshold_sauvola(image, window_size=81)
            self.binary_sauvola = image > thresh_sauvola
            """
			self.binary_sauvola = invert(self.binary_sauvola)
			chull = convex_hull_image(self.binary_sauvola)
			[rows, columns] = np.where(chull)
			EPS = 50
			row1 = min(rows) - EPS
			row2 = max(rows) + EPS
			col1 = min(columns) - EPS
			col2 = max(columns) + EPS
			
			self.binary_sauvola = self.binary_sauvola[row1:row2, col1:col2]
			self.binary_sauvola = invert(self.binary_sauvola)
			"""
            self.binary_sauvola = invert(self.binary_sauvola)
            #selem = disk(6)

            self.binary_sauvola = thin(self.binary_sauvola, np.int(15))
            self.binary_sauvola = np.invert(self.binary_sauvola)

            #thresh_sauvola = threshold_sauvola(self.binary_sauvola, window_size=21)
            #self.binary_sauvola = self.binary_sauvola > thresh_sauvola

            self.binary_sauvola = erosion(self.binary_sauvola)

            #self.binary_sauvola = gaussian(self.binary_sauvola)

        elif algorithm == ImageAlgorithm.OTSU:
            self.otsuBinarization()
Exemplo n.º 5
0
 def extract_graph(self, img):
     img = self._to_numpy(img)
     vpp = self._vertical_pp(img)
     segments = self._profile_segment(img, vpp, True)
     v_sgmts = []
     for sgmt in segments:
         v_sgmts += self._segment_equidistant(sgmt[2], self.d_v, True,
                                              sgmt[0], sgmt[1])
     final_sgmts = []
     for sgmt in v_sgmts:
         hpp = self._horizontal_pp(sgmt[2])
         segments = self._profile_segment(sgmt[2], hpp, False, sgmt[0],
                                          sgmt[1])
         for s in segments:
             final_sgmts += self._segment_equidistant(
                 s[2], self.d_h, False, s[0], s[1])
     nodes = []
     locations = []
     for sgmt in final_sgmts:
         x, y = self._center_of_mass(sgmt[2])
         node = np.array([x + sgmt[0], y + sgmt[1]])
         nodes.append(node)
         locations.append((node, sgmt))
     thin = morph.thin(img)
     edges = []
     for loc in locations:
         neighbors = self._find_neighbors(loc, locations, thin)
         for n in neighbors:
             if loc[0][0] <= n[0][0]:
                 edges.append((loc[0], n[0]))
     assert len(nodes) > 1, 'only one node found'
     return self._build_graph(nodes, edges)
    def wallExtraction(self, img):

        if len(img.shape) > 2:  # if image is not grayscale
            # Converting to grayscale if it is not already
            img = sl.rgb2gray(img)

        # Thresholding it to binary just in case it is not already
        bwImage = np.array((img > (np.max(np.max(img)) / 2)), dtype=bool)

        # Make sure structures are in black (flase) and background is white (true)
        if (np.sum(bwImage) > np.sum(~bwImage)):
            bwImage = ~bwImage
            print('Image complemented')

        # Pre processing the image to make it nice and clean for the job :)
        bwImage = thin(bwImage)  # converting to int after thining
        bwImage = sl.bwmorph().diag(bwImage)
        bwImage = np.array(bwImage * 255, dtype=np.uint8)

        ############################################################################
        (labledImage, wallsCenter, _, numWalls) = sl.preciseHough(bwImage)
        labledImage = np.asarray(labledImage, dtype=np.int64)
        wallsCenter = np.asarray(wallsCenter, dtype=np.int64)

        wallLables = np.unique(labledImage)  # each lable is an individual wall
        wallLables = np.delete(
            wallLables,
            0)  # removing the first element as it's for the background

        ###########################################################################

        return labledImage, wallLables, wallsCenter
Exemplo n.º 7
0
def Preprocessing(image):
    blur = cv2.GaussianBlur(image, (1, 1), 0)
    binImage = cv2.threshold(blur, 0, 255,
                             cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]
    # binImage = remove_noise_and_smooth(image)
    processedImage = thin(binImage)
    return binImage
Exemplo n.º 8
0
def energy_baseline(msk = None,
                    energy = None,
                    threshold = 0.5,
                    thin_labels = False):

    msk_ths = (np.copy(msk)>255*threshold)*1
    energy_ths = (np.copy(energy)>255*0.4)*1

    distance = ndi.distance_transform_edt(msk_ths)
    
    # Marker labelling
    markers = label(energy_ths)    

    labels = watershed(-distance,
                       markers,
                       mask=msk_ths)

    if thin_labels == True:
        for i,lbl in enumerate(np.unique(labels)):
            if i == 0:
                # pass the background
                pass
            else:
                current_label = (labels==lbl) * 1
                thinned_label = thin(current_label,max_iter=1)
                labels[labels==lbl] = 0
                labels[thinned_label] = lbl

    return labels
Exemplo n.º 9
0
def erode_segmentation(labels_3d):

    kernel = np.array(
        [[[0, 0, 0], [0, 1, 0], [0, 0, 0]], [[0, 1, 0], [1, 1, 1], [0, 1, 0]],
         [[0, 0, 0], [0, 1, 0], [0, 0, 0]]],
        dtype=bool)

    labels_3d_binary = np.array(labels_3d, dtype=bool)

    closed_seg = morph.binary_closing(labels_3d_binary, structure=kernel)

    eroded_seg = np.zeros(labels_3d.shape)
    for i in range(labels_3d.shape[2]):
        eroded_seg[:, :, i] = mp.thin(closed_seg[:, :, i], max_iter=3)
    dilated_seg = morph.binary_dilation(closed_seg,
                                        structure=kernel,
                                        iterations=3)

    markers = np.zeros(labels_3d.shape)
    fg_markers = (np.logical_and(eroded_seg, dilated_seg)) * 1
    bg_markers = (np.logical_and(np.logical_not(markers),
                                 np.logical_not(dilated_seg))) * 2
    markers = fg_markers + bg_markers

    return markers
Exemplo n.º 10
0
def run(img, **args):
    if len(img.shape) > 2 and img.shape[2] == 4:
        img = color.rgba2rgb(img)
    if len(img.shape) == 2:
        img = color.gray2rgb(img)
    img = color.rgb2gray(img)
    return to_base64(thin(img, **args))
def erode_seg_markers(rw_data):
    
    closing_kernel = np.array([[[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                               [[0, 1, 0], [1, 1, 1], [0, 1, 0]],               
                               [[0, 0, 0], [0, 1, 0], [0, 0, 0]]], dtype=bool)
    
    erosion_kernel = closing_kernel
    dilation_kernel = erosion_kernel
    
    rw_bool = np.array(rw_data, dtype=bool)
    
    closed_seg = morph.binary_closing(rw_bool, structure=closing_kernel)
    # skeletonized_seg = mp.skeletonize_3d(closed_seg)
    # medial_axis_seg = mp.medial_axis(closed_seg)
    
    thinned_seg = np.zeros(rw_data.shape)
    for i in range(rw_data.shape[2]):
        thinned_seg[:,:,i] = mp.thin(closed_seg[:,:,i], max_iter = 5)
    # eroded_seg = morph.binary_erosion(closed_seg,structure=erosion_kernel,iterations=3)
    dilated_seg = morph.binary_dilation(closed_seg, structure=dilation_kernel, iterations=6)
       
    fg_markers = np.zeros(rw_data.shape)   
    bg_markers = np.zeros(rw_data.shape)   
    markers = np.zeros(rw_data.shape)   
    
    # fg_markers = (np.logical_and(eroded_seg,dilated_seg))*1
    fg_markers = (np.logical_and(thinned_seg, dilated_seg))*1
    bg_markers = (np.logical_and(np.logical_not(markers), np.logical_not(dilated_seg)))*2
    
    markers = fg_markers + bg_markers
    
    return markers, fg_markers, bg_markers
Exemplo n.º 12
0
def filterset(signal, pics = 1, sizesToRemove = 700, dilationSquare = 30, closingSquare = 30,boundary=1):
    grayscale2 =signal
    gaussed = skif.gaussian(grayscale2,3)
    if pics: plt.imshow(gaussed, cmap='gray')
    if pics: skif.thresholding.try_all_threshold(gaussed,figsize=(25, 25))
    thresh = skif.thresholding.threshold_li(gaussed)
    binary = gaussed > thresh
    if pics: plt.imshow(binary, cmap='gray')
    
    label_objects, nb_labels = ndi.label(binary)
    sizes = np.bincount(label_objects.ravel())
    mask_sizes = sizes > sizesToRemove
    mask_sizes[0] = 0
    binary_cleaned = mask_sizes[label_objects]

    if pics: plt.imshow(binary_cleaned, cmap='gray')
    diamclo = skimo.dilation(binary_cleaned, skimo.square(dilationSquare))
    if pics: plt.imshow(diamclo, cmap='gray')
    diamclo2 = skimo.binary_closing(diamclo, skimo.square(closingSquare))
    diamclo2 = addBoundaries(diamclo2, boundary)
    if pics: plt.imshow(diamclo2, cmap='gray', interpolation='none')
    eroded = skimo.thin(diamclo2).astype(int)
    if pics: plt.imshow(eroded, cmap='gray', interpolation='none')
    erodedWide = skimo.dilation(eroded, skimo.square(5))
    if pics: plt.imshow(erodedWide, cmap='gray', interpolation='none')
    diamclo4 =  np.where(erodedWide==0, 1, erodedWide)
    diamclo4 =  np.where(erodedWide==1, 0, diamclo4)
    all_labels = skim.label(diamclo4)
    blobs_labels = skim.label(diamclo4, background=0)
    return blobs_labels, erodedWide
def convert_to_imgs(traces_data, box_size=int(100)):

    patterns_enc = []
    classes_rejected = []

    for pattern in traces_data:

        trace_group = pattern['trace_group']

        'mid coords needed to shift the pattern'
        min_x, min_y, max_x, max_y = get_min_coords(trace_group)

        'traceGroup dimensions'
        trace_grp_height, trace_grp_width = max_y - min_y, max_x - min_x

        'shift pattern to its relative position'
        shifted_trace_grp = shift_trace_grp(trace_group,
                                            min_x=min_x,
                                            min_y=min_y)

        'Interpolates a pattern so that it fits into a box with specified size'
        'method: LINEAR INTERPOLATION'
        try:
            interpolated_trace_grp = interpolate(
                shifted_trace_grp,
                trace_grp_height=trace_grp_height,
                trace_grp_width=trace_grp_width,
                box_size=box_size - 1)
        except Exception as e:
            print(e)
            print('This data is corrupted - skipping.')
            classes_rejected.append(pattern.get('label'))

            continue

        'Get min, max coords once again in order to center scaled patter inside the box'
        min_x, min_y, max_x, max_y = get_min_coords(interpolated_trace_grp)

        centered_trace_grp = center_pattern(interpolated_trace_grp,
                                            max_x=max_x,
                                            max_y=max_y,
                                            box_size=box_size)

        'Center scaled pattern so it fits a box with specified size'
        pattern_drawn = draw_pattern(centered_trace_grp, box_size=box_size)
        # Make sure that patterns are thinned (1 pixel thick)
        pat_thinned = 1.0 - thin(1.0 - np.asarray(pattern_drawn))
        plt.imshow(pat_thinned, cmap='gray')
        plt.show()
        pattern_enc = dict({
            'features': pat_thinned,
            'label': pattern.get('label')
        })

        # Filter classes that belong to categories selected by the user
        #             if pattern_enc.get('label') in self.classes:

        patterns_enc.append(pattern_enc)

    return patterns_enc, classes_rejected
Exemplo n.º 14
0
def wallExtraction(img):

    if len(img.shape) > 2: # if image is not grayscale
        # Converting to grayscale if it is not already
        img = skc.rgb2gray(img)

    # Thresholding it to binary just in case it is not already
    bwImage = np.array((img > (img.max()/2)), dtype=bool)
    #bwImage = np.array(bwImage*255,dtype=np.uint8) # converting to a uint8 image
    # bwImage = cv2.cvtColor(bwImage,cv2.COLOR_BRG2GRAY) # figure is loded with mpl so it's RGB nor BRG

    # Make sure structures are in black (flase) and background is white (true)
    if (np.sum(bwImage) > np.sum(~bwImage)):
        bwImage = ~bwImage
        print('Image complemented')

    ############################################################################
    ###                 END OF INITIALIZATION
    ############################################################################

    # Pre processing the image to make it nice and clean for the jon :)
    bwImage = thin(bwImage) # converting to int after thining
    bwImage = sl.bwmorph().diag(bwImage)
    bwImage = np.array(bwImage*255,dtype=np.uint8)

    ############################################################################
    (labledImage,wallsCenter,_,numWalls) = sl.preciseHough(bwImage)
    wallLables = np.unique(labledImage) # each lable is an individual wall
    wallLables = np.delete(wallLables,0) # removing the first element as it's for the background

    ###########################################################################


    return labledImage,wallLables,wallsCenter
def thinning_img_new(img, median=False, resize=False):
    """
    No use opencv-contrib(for kernel competition)
    But little slow and results changed from Opencv
    TODO: optimizer when resize
    """
    height = img.shape[0]
    width = img.shape[1]
    img2 = img.copy()

    if resize:
        # For speed
        r = 3
        img2 = cv2.resize(img2, (width // r, height // r))

    img2 = img2.mean(axis=2)
    img3 = ((img2 < 220) * 255).astype(np.uint8)
    img4 = img3.copy()

    if median:
        img4 = cv2.medianBlur(img4, 5)
    kernel = np.ones((5, 5), np.uint8)
    img5 = cv2.erode(img4, kernel, iterations=1)
    img5 = cv2.dilate(img5, kernel, iterations=4)
    img5 = morphology.thin(img5 // 255)
    img5 = (img5 * 255).astype(np.uint8)

    if resize:
        img5 = cv2.resize(img5, (width, height))
        _, img5 = cv2.threshold(img5, 127, 255, cv2.THRESH_BINARY)
    return img5
Exemplo n.º 16
0
def extract_contours(image_abs_path):

    max_intensity = 1
    # Here we define the size of the square box that will contain a single pattern
    box_size = 32

    binary_img = binarize(image_abs_path)

    # Apply erosion step - make patterns thicker
    eroded_img = erosion(binary_img, selem=square(3))

    # Inverse colors: black --> white | white --> black
    binary_inv_img = max_intensity - eroded_img

    # Apply thinning algorithm
    thinned_img = thin(binary_inv_img)

    # Before we apply opencv method, we need to convert scikit image to opencv image
    thinned_img_cv = img_as_ubyte(thinned_img)

    # Find contours
    _, contours, _ = cv2.findContours(thinned_img_cv,
                                      mode=cv2.RETR_EXTERNAL,
                                      method=cv2.CHAIN_APPROX_SIMPLE)

    # Sort contours from left to right (sort by bounding rectangle's X coordinate)
    contours = sorted(contours, key=lambda cont: cv2.boundingRect(cont)[0])

    # Initialize patterns array
    patterns = []
    return contours
Exemplo n.º 17
0
Arquivo: utils.py Projeto: z0322/CONTA
def weighted_masked_embeddings(fmap_shape, label, fconv_norm, n_classes):
    label = label.unsqueeze(0).unsqueeze(0).cpu()
    thinned = thin(label[0][0])

    dt = ndimage.distance_transform_edt(np.logical_not(thinned))
    dt = torch.tensor(dt)
    dt[label[0][0] == 0] = 0
    dt = dt.max() - dt
    dt[label[0][0] == 0] = 0
    dt = dt / dt.sum()

    fconv_norm = nn.functional.interpolate(fconv_norm,
                                           size=(int(label.shape[2]),
                                                 int(label.shape[3])),
                                           mode='nearest')
    dt = dt.unsqueeze(0).float().cuda()

    fconv_pooled = torch.zeros(fmap_shape[0], n_classes + 1, fmap_shape[1], 1,
                               1)
    for i in range(int(fconv_norm.shape[1])):
        temp = fconv_norm[:, i, ...]
        for c in range(n_classes + 1):
            if len(temp[label[0] == c]) == 0:
                tempv = 0
            else:
                tempv = torch.sum(temp[label[0] == c] * dt[label[0] == c])
            fconv_pooled[:, c, i, 0, 0] = tempv
    return fconv_pooled
Exemplo n.º 18
0
def walk_neighbourhood(image):
    """Walk the neighbourhood of an edge detected image"""
    thinned_image = morphology.thin(np.where(image < 0.1, 0, 1))
    thinned_image = np.where(thinned_image < 0.1, 0, 1)
    centre = get_centre(thinned_image)

    try:
        totalPixels = exposure.histogram(thinned_image, nbins=2)[0][1]
    #return an empty list if no pixels were found to draw
    except IndexError:
        return []

    currentPixel = np.array([-1, -1])
    walkingOrder = []
    index = -1
    while totalPixels > 0:
        if np.array_equal(currentPixel, np.array([-1, -1])):
            currentPixel = get_unwalked_coords(thinned_image)[0]
            walkingOrder.append([])
            index += 1
        else:
            thinned_image[currentPixel[0], currentPixel[1]] = 0
            walkingOrder[index].append(currentPixel)
            currentPixel = next_neighbour(currentPixel, thinned_image, centre)
            totalPixels -= 1

    return walkingOrder
Exemplo n.º 19
0
    def _apply_morph_thinning(self,
                              img: np.ndarray,
                              niter: int = 3) -> np.ndarray:
        """Apply morphological thinning."""
        thin_img = thin(img, niter)

        return thin_img
Exemplo n.º 20
0
    def _preprocess_lung_mask(self, msk, voxel_dimensions):
        print('Eroding')
        start_time = time()
        selem = self._make_structure_element(voxel_dimensions)
        inner = binary_erosion(msk, selem=selem)
        outer = binary_erosion(np.logical_not(msk), selem=selem)
        print('Eroding took %.3f sec' % (time() - start_time))

        print('Thinning')
        start_time = time()
        for k in range(msk.shape[2]):
            inner[..., k] = thin(inner[..., k], max_iter=self.thinnings)
            outer[..., k] = thin(outer[..., k], max_iter=self.thinnings)
        print('Thinning took %.3f sec' % (time() - start_time))

        return inner, outer
Exemplo n.º 21
0
 def test_noiter(self):
     result = thin(self.input_image).astype(np.uint8)
     expected = np.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
                          [0, 1, 0, 1, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0]],
                         dtype=np.uint8)
     assert_array_equal(result, expected)
Exemplo n.º 22
0
def extract_skeleton(image):
    # get thinned image (skeleton)
    image = morphology.remove_small_holes(image, 2)
    image = morphology.thin(image)
    # convert skeleton into network of junction nodes (undirected graph)
    graph = build_sknw(image.astype(np.uint16))

    return graph
Exemplo n.º 23
0
 def test_iter_1(self):
     result = thin(self.input_image, 1).astype(np.uint8)
     expected = np.array([[0, 0, 0, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0],
                          [0, 1, 0, 1, 1, 0, 0], [0, 0, 1, 1, 1, 0, 0],
                          [0, 0, 1, 1, 1, 0, 0], [0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0]],
                         dtype=np.uint8)
     numpy.testing.assert_array_equal(result, expected)
Exemplo n.º 24
0
def get_best_skeleton(img):
    s1 = morphology.skeletonize(img)
    s2 = morphology.medial_axis(img)
    s3 = morphology.thin(img)

    values = np.asarray([s1, s2])
    sums = np.asarray([s1.sum(), s2.sum()])
    return values[np.argmin(sums)]
Exemplo n.º 25
0
    def convert_to_img(self, trace_grps, req_classes=None, box_size=100):
        classes = set()
        patterns_enc = []
        classes_rejected = []
        for pattern in trace_grps:
            if req_classes and pattern["label"] not in req_classes:
                continue
            if "label" in pattern and pattern["label"] not in classes:
                classes.add(pattern["label"])
            trace_group = pattern['traces']
            # mid coords needed to shift the pattern
            min_x, min_y, max_x, max_y = self.get_min_coords(trace_group)

            # traceGroup dimensions
            trace_grp_height, trace_grp_width = max_y - min_y, max_x - min_x

            # shift pattern to its relative position
            shifted_trace_grp = self.shift_trace_grp(trace_group,
                                                     min_x=min_x,
                                                     min_y=min_y)

            # Interpolates a pattern so that it fits into a box with specified size
            # method: LINEAR INTERPOLATION
            try:
                interpolated_trace_grp = self.interpolate(
                    shifted_trace_grp,
                    trace_grp_height=trace_grp_height,
                    trace_grp_width=trace_grp_width,
                    box_size=box_size - 1)
            except Exception as e:
                print(e)
                print('This data is corrupted - skipping.')
                classes_rejected.append(pattern.get('label'))
                continue

            # Get min, max coords once again in order to center scaled patter inside the box
            min_x, min_y, max_x, max_y = self.get_min_coords(
                interpolated_trace_grp)

            centered_trace_grp = self.center_pattern(interpolated_trace_grp,
                                                     max_x=max_x,
                                                     max_y=max_y,
                                                     box_size=box_size)

            # Center scaled pattern so it fits a box with specified size
            pattern_drawn = self.draw_pattern(centered_trace_grp,
                                              box_size=box_size)
            # Make sure that patterns are thinned (1 pixel thick)
            pat_thinned = 1.0 - thin(1.0 - np.asarray(pattern_drawn))
            # plt.imshow(pat_thinned, cmap='gray')
            # plt.show()
            pattern_enc = {
                'features': pat_thinned,
                'label': pattern.get('label')
            }

            patterns_enc.append(pattern_enc)
        return patterns_enc, classes_rejected, classes
Exemplo n.º 26
0
def skeletonize_edge(src):
    # Process sketch to fit input. Only used for test input
    src = np.asarray(src * 255, np.uint8)
    # Crop the sketch and minimize white padding.
    cropped = crop_and_resize(src, return_gray=True)
    # Skeletonize the lines
    skeleton = thin(cv2.bitwise_not(cropped))
    final = np.asarray(1 - np.float32(skeleton))
    return cv2.cvtColor(final, cv2.COLOR_GRAY2BGR)
Exemplo n.º 27
0
def preprocessing(data):  # Turn the Data to Tensor type .
    a = []
    for i in range(len(data)):
        a.append(np.asarray(thin(stretch(
            data[i]))))  # do the centralize and strectch and thinning
    a1 = np.asarray(a)
    return torch.Tensor(a1).view(
        len(data), 1, 32,
        32)  # add one dimension to make it suitable to put in dataloader
Exemplo n.º 28
0
def QuitarLineaRef(Im_BW, grosor_linea):
    suma_y = profile_y(thin(Im_BW))
    linea = np.argmax(suma_y)
    Im_BW_sin_linea = 1 * Im_BW
    Im_BW_sin_linea[linea - grosor_linea:linea + grosor_linea + 1,
                    0:Im_BW.shape[1]] = 0 * Im_BW[linea - grosor_linea:linea +
                                                  grosor_linea + 1,
                                                  0:Im_BW.shape[1]]
    return Im_BW_sin_linea
Exemplo n.º 29
0
def ridge_thinning(image: np.ndarray) -> np.ndarray:
    """
    Ridge thinning or image skeletonization
    :param image: the original image
    :return: the skeleton of the image
    """
    thinned = thin(image)
    thinned = thinned.astype(np.float)
    return thinned
Exemplo n.º 30
0
def thinning2(name):
    image = img_as_float(color.rgb2gray(io.imread(name)))
    image_binary = image < 0.5
    out_skeletonize = morphology.skeletonize(image_binary)
    out_thin = morphology.thin(image_binary)

    plt.imsave('gaps.jpg', out_skeletonize, cmap='gray')
    img = cv2.imread("gaps.jpg")
    cv2.imshow("Thinning2", img)
    cv2.waitKey(0)
Exemplo n.º 31
0
 def test_iter_1(self):
     result = thin(self.input_image, 1).astype(np.uint8)
     expected = np.array([[0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 1, 0, 0, 0, 0],
                          [0, 1, 0, 1, 1, 0, 0],
                          [0, 0, 1, 1, 1, 0, 0],
                          [0, 0, 1, 1, 1, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
     numpy.testing.assert_array_equal(result, expected)
Exemplo n.º 32
0
 def test_noiter(self):
     result = thin(self.input_image).astype(np.uint8)
     expected = np.array([[0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 1, 0, 0, 0, 0],
                          [0, 1, 0, 1, 0, 0, 0],
                          [0, 0, 1, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0],
                          [0, 0, 0, 0, 0, 0, 0]], dtype=np.uint8)
     assert_array_equal(result, expected)
Exemplo n.º 33
0
def preprocess(path, info_fn):
    with open(info_fn) as f:
        content = f.readlines()
    content = [x.strip() for x in content]
    for person, signum in zip(content[::2], content[1::2]):
        signum = signum.split(' ')
        ngenuines, nforgeries = signum
        ngenuines = int(ngenuines.replace("g:", ""))
        nforgeries = int(nforgeries.replace("f:", ""))
        genuines = []
        forgeries = []
        for gn in range(1, ngenuines+1):
            f = "g-%03d.png" % gn
            fn = P.join(P.join(path, person), f)
            im = cv2.imread(fn, 0)
            im = np.abs(im - 255)
            skel = thin(im)

            plt.imshow(skel, 'gray')

            plt.show()
            plt.figure()
Exemplo n.º 34
0
 def test_baddim(self):
     for ii in [np.zeros((3)), np.zeros((3, 3, 3))]:
         with testing.raises(ValueError):
             thin(ii)
Exemplo n.º 35
0
 def test_zeros(self):
     assert np.all(thin(np.zeros((10, 10))) == False)
Exemplo n.º 36
0
# **Morphological thinning**
#
# Morphological thinning, implemented in the `thin` function, works on the
# same principle as `skeletonize`: remove pixels from the borders at each
# iteration until none can be removed without altering the connectivity. The
# different rules of removal can speed up skeletonization and result in
# different final skeletons.
#
# The `thin` function also takes an optional `max_iter` keyword argument to
# limit the number of thinning iterations, and thus produce a relatively
# thicker skeleton.

from skimage.morphology import skeletonize, thin

skeleton = skeletonize(image)
thinned = thin(image)
thinned_partial = thin(image, max_iter=25)

fig, axes = plt.subplots(2, 2, figsize=(8, 8), sharex=True, sharey=True,
                         subplot_kw={'adjustable': 'box-forced'})
ax = axes.ravel()

ax[0].imshow(image, cmap=plt.cm.gray, interpolation='nearest')
ax[0].set_title('original')
ax[0].axis('off')

ax[1].imshow(skeleton, cmap=plt.cm.gray, interpolation='nearest')
ax[1].set_title('skeleton')
ax[1].axis('off')

ax[2].imshow(thinned, cmap=plt.cm.gray, interpolation='nearest')