Esempio n. 1
0
    def compute_mask(self, params):
        """Creates the mask for the base image.
        Needs the base image, an instance of imageloaderparams
        and the clip area, which should be already defined
        by the load_base_image method.
        Creates the mask by improving the base mask created by the
        compute_base_mask method. Applies the mask closing, dilation and
        fill holes parameters.
        """
        self.compute_base_mask(params)

        mask = np.copy(self.base_mask)
        closing_matrix = np.ones((params.mask_closing, params.mask_closing))

        if params.mask_closing > 0:
            # removes small dark spots and then small white spots
            mask = img_as_float(morphology.closing(
                mask, closing_matrix))
            mask = 1 - \
                img_as_float(morphology.closing(
                    1 - mask, closing_matrix))

        for f in range(params.mask_dilation):
            mask = morphology.erosion(mask, np.ones((3, 3)))

        if params.mask_fill_holes:
            # mask is inverted
            mask = 1 - img_as_float(ndimage.binary_fill_holes(1.0 - mask))

        self.mask = mask

        self.overlay_mask_base_image()
Esempio n. 2
0
def close_image(img, mask_length):
    # Morphological closing on greyscale/binary image
    
    img = img.astype(np.uint8)
    img = closing(img, rectangle(mask_length,1))
    
    return(img)
Esempio n. 3
0
def getRegions():
    """Geocode address and retreive image centered
    around lat/long"""
    address = request.args.get('address')
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
Esempio n. 4
0
def scikit_example_plot_label():
    image = data.coins()[50:-50, 50:-50]
    
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))
    
    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)
    
    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    
    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(label_image, cmap='jet')
    
    for region in regionprops(label_image, ['Area', 'BoundingBox']):
    
        # skip small images
        if region['Area'] < 100:
            continue
    
        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region['BoundingBox']
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)
    
    plt.show()
def threshold_image(image, threshold=0):
	"""
	This function takes out any values in an image's RGB matrix that are
	below the threshold value.

	Inputs:
	- image: a matrix describing an image with only one channel represented.
	- threshold: a value, between 0 and 1, for which if an image matrix's
				 value is below, will be set to 0, and if above, will be 
				 set to 1.

				 If the threshold is set to 0, then an Otsu thresholding will
				 be returned.

	Outputs:
	- thresholded_image: a matrix representation of the thresholded image.
						 this is essentially a black and white image.
	- thresh: the threshold value

	To screen: the black-and-white image representation.
	- 
	"""
	if threshold == 0:
		thresh = threshold_otsu(image)

	if threshold != 0:
		thresh = threshold

	thresholded_image = closing(image > thresh, square(3), out=None)
	imshow(thresholded_image)

	return thresholded_image, thresh
def segment_out_cells(base):
    # TODO: try using OTSU for GFP thresholding

    sel_elem = disk(2)
    gfp_collector = np.sum(base, axis=0)
    gfp_clustering_markers = np.zeros(gfp_collector.shape, dtype=np.uint8)
    # random walker segment
    gfp_clustering_markers[gfp_collector > np.mean(gfp_collector) * 2] = 2
    gfp_clustering_markers[gfp_collector < np.mean(gfp_collector) * 0.20] = 1
    labels = random_walker(gfp_collector, gfp_clustering_markers, beta=10, mode='bf')
    # round up the labels and set the background to 0 from 1
    labels = closing(labels, sel_elem)
    labels -= 1
    # prepare distances for the watershed
    distance = ndi.distance_transform_edt(labels)
    local_maxi = peak_local_max(distance,
                                indices=False,  # we want the image mask, not peak position
                                min_distance=10,  # about half of a bud with our size
                                threshold_abs=10,  # allows to clear the noise
                                labels=labels)
    # we fuse the labels that are close together that escaped the min distance in local_maxi
    local_maxi = ndi.convolve(local_maxi, np.ones((5, 5)), mode='constant', cval=0.0)
    # finish the watershed
    expanded_maxi_markers = ndi.label(local_maxi, structure=np.ones((3, 3)))[0]
    segmented_cells_labels = watershed(-distance, expanded_maxi_markers, mask=labels)

    # log debugging data
    running_debug_frame.gfp_collector = gfp_collector
    running_debug_frame.gfp_clustering_markers = gfp_clustering_markers
    running_debug_frame.labels = labels
    running_debug_frame.segmented_cells_labels = segmented_cells_labels

    return gfp_collector, segmented_cells_labels
Esempio n. 7
0
def smooth(image):
  filename_split = os.path.splitext(image)
  filename_zero, fileext = filename_split
  basename = os.path.basename(filename_zero)
  im = np.array(Image.open(image))
  with rasterio.open(image) as r:
    im = r.read()
    p = r.profile
  im = im.squeeze()
  selem = disk(1)
  print("image min and max: ", im.min(),im.max())
  dilated = skimage.morphology.dilation(im, selem)
  print("dilated image min and max: ", dilated.min(),dilated.max())
  eroded = skimage.morphology.erosion(im, selem)
  print("eroded image min and max: ", eroded.min(),eroded.max())
  opened = opening(im, selem)
  print("opened image min and max: ", opened.min(),opened.max())
  closed = closing(im, selem)
  print("closed image min and max: ", closed.min(),closed.max())
  #im[im==1]=0
  #im[im==2]=1
  median = cv2.medianBlur(im,9)
  average = cv2.blur(im,(9,9))
  #gaussian = cv2.GaussianBlur(im,(9,9),0)
  gaussian = cv2.GaussianBlur(dilated,(9,9),0)
  #bilateral = cv2.bilateralFilter(im,9,75,75)
  bilateral = cv2.bilateralFilter(gaussian,9,75,75)
  with rasterio.open(outPath+basename+fileext, 'w', **p) as dst:
      dst.write(bilateral, 1)
  color_outPath = outPath+'color/'
  if not os.path.exists(color_outPath):
        os.mkdir(color_outPath) 
  colored_image = color_outPath+basename+'.png'
  os.system("gdaldem color-relief", bilateral, colorfile, colored_image)
  return im, dilated, eroded, opened, closed, median, average, gaussian, bilateral
    def plot_preprocessed_image(self):
        """
        plots pre-processed image. The plotted image is the same as obtained at the end
        of the get_text_candidates method.
        """
        image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()

        label_image = measure.label(cleared)
        borders = np.logical_xor(bw, cleared)

        label_image[borders] = -1
        image_label_overlay = label2rgb(label_image, image=image)

        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        ax.imshow(image_label_overlay)

        for region in regionprops(label_image):
            if region.area < 10:
                continue

            minr, minc, maxr, maxc = region.bbox
            rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      fill=False, edgecolor='red', linewidth=2)
            ax.add_patch(rect)

        plt.show()
def detectOpticDisc(image):
    kernel = octagon(10, 10)
    thresh = threshold_otsu(image[:,:,1])
    binary = image > thresh
    print binary.dtype
    luminance = convertToHLS(image)[:,:,2]
    t = threshold_otsu(luminance)
    t = erosion(luminance, kernel)
    
    
    labels = segmentation.slic(image[:,:,1], n_segments = 3)
    out = color.label2rgb(labels, image[:,:,1], kind='avg')
    skio.imshow(out)
    
    x, y = computeCentroid(t)
    print x, y
    rows, cols, _ = image.shape
    p1 = closing(image[:,:,1],kernel)
    p2 = opening(p1, kernel)
    p3 = reconstruction(p2, p1, 'dilation')
    p3 = p3.astype(np.uint8)
    #g = dilation(p3, kernel)-erosion(p3, kernel)
    #g = rank.gradient(p3, disk(5))
    g = cv2.morphologyEx(p3, cv2.MORPH_GRADIENT, kernel)
    #markers = rank.gradient(p3, disk(5)) < 10
    markers = drawCircle(rows, cols, x, y, 85)
    #markers = ndimage.label(markers)[0]
    #skio.imshow(markers)
    g = g.astype(np.uint8)
    #g = cv2.cvtColor(g, cv2.COLOR_GRAY2RGB)
    w = watershed(g, markers)
    print np.max(w), np.min(w)
    w = w.astype(np.uint8)
    #skio.imshow(w)
    return w
Esempio n. 10
0
def closing(gray_img, kernel=None):
    """Wrapper for scikit-image closing functions. Opening can remove small dark spots (i.e. pepper).

    Inputs:
    gray_img = input image (grayscale or binary)
    kernel   = optional neighborhood, expressed as an array of 1s and 0s. If None, use cross-shaped structuring element.

    :param gray_img: ndarray
    :param kernel = ndarray
    :return filtered_img: ndarray
    """

    params.device += 1

    # Make sure the image is binary/grayscale
    if len(np.shape(gray_img)) != 2:
        fatal_error("Input image must be grayscale or binary")

    # If image is binary use the faster method
    if len(np.unique(gray_img)) == 2:
        bool_img = morphology.binary_closing(image=gray_img, selem=kernel)
        filtered_img = np.copy(bool_img.astype(np.uint8) * 255)
    # Otherwise use method appropriate for grayscale images
    else:
        filtered_img = morphology.closing(gray_img, kernel)

    if params.debug == 'print':
        print_image(filtered_img, os.path.join(params.debug_outdir, str(params.device) + '_opening' + '.png'))
    elif params.debug == 'plot':
        plot_image(filtered_img, cmap='gray')

    return filtered_img
def Mask_ROI_cl(im,disk_size,thresh=None,black_spots=None,with_morph=False):
    l=np.array([0,0])
    if not isinstance(im,l.__class__):
        numpy_array=np.array(im)
    else:
        numpy_array=im 
    if len(numpy_array.shape)==3:
        numpy_array=numpy_array[:,:,0:3].mean(axis=2)

    selem = disk(disk_size)
    closed = closing(numpy_array, selem)
    if thresh is None:
        thresh = threshold_otsu(closed)
    binary = closed > thresh
    if binary.dtype=='bool':
        binary=binary+0
    if black_spots is not None:    
        binary2 = closed > black_spots
        binary2 = binary2 + 0
        binary = binary - binary2 
    else:
        binary -=1
    binary=binary * -1
    if with_morph:
        return(binary,closed)
    else:
        return(binary)
Esempio n. 12
0
def test_large_radius():
    ''' Compare execution time against scikit: single closing case
    Here, our implementation does not take advantage of smaller radius results
    so ours is slower than scikit, but it uses significantly less memory.
    '''
    base_dir = '/home/omar/data/DATA_NeoBrainS12/'
    neo_subject = '30wCoronal/example2/'

    # Read subject files
    t2CurrentSubjectName  = base_dir + 'trainingDataNeoBrainS12/'+neo_subject+'T2_1-1.nii.gz'
    t2CurrentSubject_data = nib.load(t2CurrentSubjectName).get_data()
    affineT2CS            = nib.load(t2CurrentSubjectName).get_affine()
    zoomsT2CS             = nib.load(t2CurrentSubjectName).get_header().get_zooms()[:3]
    # Step 1.4 - Resampling for isotropic voxels

    n_zooms = (zoomsT2CS[0],zoomsT2CS[0],zoomsT2CS[0])
    t2CurrentSubject_data,affineT2CS = reslice(t2CurrentSubject_data,affineT2CS,zoomsT2CS,n_zooms)

    S = t2CurrentSubject_data.astype(np.float64)

    ###########compare times#########
    # in-house
    radius = 15
    start = time.time()
    d = isotropic_dilation(S, radius)
    c = isotropic_erosion(d, radius)
    end = time.time()
    print('Elapsed (in-home): %f'%(end-start,))

    # scikit
    start = time.time()
    expected = closing(S, ball(radius))
    end = time.time()
    print('Elapsed (scikit): %f'%(end-start,))
Esempio n. 13
0
def removeChessboard(img):

    # Get the major lines in the image
    edges, dilatedEdges, (h, theta, d) = findLines(img)

    # Create image with ones to fill inn lines
    lines = np.ones(img.shape[:2])

    # Add lines to image as zeroes
    for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
        y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
        y1 = (dist - img.shape[1] * np.cos(angle)) / np.sin(angle)
        x, y = line(int(y1), 0, int(y0), img.shape[1] - 1)
        x = np.clip(x, 0, img.shape[0] - 1)
        y = np.clip(y, 0, img.shape[1] - 1)
        lines[x, y] = 0

    # Remove border edges from image with all edges
    w = 4
    edges = np.pad(edges[w:img.shape[0] - w, w:img.shape[1] - w], w, mode='constant')

    # Erode the lines bigger, such that they cover the original lines
    lines = erosion(lines, square(13))

    # Remove major lines and close shape paths
    removedChessboard = closing(edges * lines, square(8))

    return removedChessboard
Esempio n. 14
0
def roofRegion(edge):
    """Estimate region based on edges of roofRegion
    """
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)

    for region in regionprops(label_image):

        # skip small images
        if region.area < 100:
            continue

        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)

    plt.show()
Esempio n. 15
0
    def get_valley_image(self, image):
        valley_img = np.zeros_like(image)
        for z in range(0, image.shape[0]):
            valley_img[z, :, :] = closing(image[z, :, :], disk(5))
        valley_img -= image

        return valley_img
Esempio n. 16
0
def Image_ws_tranche(image):
    
    laser = Detect_laser(image)
    laser_tranche = tranche_image(laser,60)
    
    image_g = skimage.color.rgb2gray(image)
    image_g = image_g * laser_tranche
    
    image_med = rank2.median((image_g*255).astype('uint8'),disk(8))
    
    image_clahe = exposure.equalize_adapthist(image_med, clip_limit=0.03)
    image_clahe_stretch = exposure.rescale_intensity(image_clahe, out_range=(0, 256))

    image_grad = rank2.gradient(image_clahe_stretch,disk(3))
    
    image_grad_mark = image_grad<20
    image_grad_forws = rank2.gradient(image_clahe_stretch,disk(1))
    
    image_grad_mark_closed = closing(image_grad_mark,disk(1))
    
    Labelised = (skimage.measure.label(image_grad_mark_closed,8,0))+1
    Watersheded  = watershed(image_grad_forws,Labelised)
    
    cooc = coocurence_liste(Watersheded,laser,3)
    
    x,y = compte_occurences(cooc)
    
    return x,y
Esempio n. 17
0
 def morphOps( imgIn, sizeSE, sizeCC ):
     imgOut = imgIn.astype(bool) #boolean image
     imgOut = ~imgOut #img negative
     imgOut = morphology.remove_small_objects( imgOut, sizeCC ) #cclargest
     SE = morphology.selem.disk( sizeSE ) #structuring element
     imgOut = morphology.closing(imgOut, SE)
     return imgOut
def main():
    for file_path in glob.glob("/home/lucas/Downloads/Lucas/GSK 10uM/*.JPG"):

        img = data.imread(file_path, as_grey=True)

        img = transform.resize(img, [600, 600])
        img_color = transform.resize(data.imread(file_path), [600, 600])

        img[img >img.mean()-0.1] = 0

        # io.imshow(img)
        # io.show()
        #
        edges = canny(img)
        bordas_fechadas = closing(img > 0.1, square(15)) # fechando gaps
        fill_cells = ndi.binary_fill_holes(bordas_fechadas)
        # io.imshow(fill_cells)
        # io.show()
        img_label = label(fill_cells, background=0)
        n= 0
        for  x in regionprops(img_label):
            if x.area < 2000 and x.area > 300:
                n +=1
                print x.area
                minr, minc, maxr, maxc = x.bbox
                try:
                    out_path_name = file_path.split("/")[-1].rstrip(".JPG")
                    io.imsave("out/cell_{}_pic_{}_area_{}.png".format(n, out_path_name, str(round(x.area))),img_color[minr-3: maxr+3, minc-3: maxc+3])
                    #io.show()
                except:
                    pass
Esempio n. 19
0
def im_proc(im):
    """Apply series of morphological procedures on image."""
    th = threshold_otsu(im)
    im_bin = im > th
    return(ndi.binary_fill_holes(
                morphology.closing(
                im_bin,np.ones((3,3)))))
Esempio n. 20
0
def test_accuracy():
    ''' Verify that our implementation returns exactly the same as scikit
    '''
    base_dir = '/home/omar/data/DATA_NeoBrainS12/'
    neo_subject = '30wCoronal/example2/'

    # Read subject files
    t2CurrentSubjectName  = base_dir + 'trainingDataNeoBrainS12/'+neo_subject+'T2_1-1.nii.gz'
    t2CurrentSubject_data = nib.load(t2CurrentSubjectName).get_data()
    affineT2CS            = nib.load(t2CurrentSubjectName).get_affine()
    zoomsT2CS             = nib.load(t2CurrentSubjectName).get_header().get_zooms()[:3]

    n_zooms = (zoomsT2CS[0],zoomsT2CS[0],zoomsT2CS[0])
    t2CurrentSubject_data,affineT2CS = reslice(t2CurrentSubject_data,affineT2CS,zoomsT2CS,n_zooms)

    S = t2CurrentSubject_data.astype(np.float64)

    max_radius = 4
    D = SequencialSphereDilation(S)
    for r in range(1, 1+max_radius):
        D.expand(S)
        expected = dilation(S, ball(r))
        actual = D.get_current_dilation()
        assert_array_equal(expected, actual)
        expected = closing(S, ball(r))
        actual = D.get_current_closing()
        assert_array_equal(expected, actual)
Esempio n. 21
0
def upsample_smooth(image):
  filename_split = os.path.splitext(image)
  filename_zero, fileext = filename_split
  basename = os.path.basename(filename_zero)
  upsampleRes = int(upsampleRes)
  upsampled_image = outPath+basename+'_cubicSpline.png'
  os.system("gdalwarp -tr", upsample_res, upsample_res," -r cubicspline ", image, upsampled_image)
  im = np.array(Image.open(upsampled_image))
  with rasterio.open(image) as r:
    im = r.read()
    p = r.profile
  im = im.squeeze()
  selem = disk(1)
  print("image min and max: ", im.min(),im.max())
  dilated = skimage.morphology.dilation(im, selem)
  print("dilated image min and max: ", dilated.min(),dilated.max())
  eroded = skimage.morphology.erosion(im, selem)
  print("eroded image min and max: ", eroded.min(),eroded.max())
  opened = opening(im, selem)
  print("opened image min and max: ", opened.min(),opened.max())
  closed = closing(im, selem)
  print("closed image min and max: ", closed.min(),closed.max())
  dilated = Image.fromarray(dilated)
  dilated.save(outPath+basename+'.png')
  with rasterio.open(outPath+basename+fileext, 'w', **p) as dst:
      dst.write(dilated, 1)
  color_outPath = outPath+'color/'
  if not os.path.exists(color_outPath):
        os.mkdir(color_outPath) 
  colored_image = color_outPath+basename+'.png'
  os.system("gdaldem color-relief", dilated, colorfile, colored_image)
  return im, dilated, eroded, opened, closed
Esempio n. 22
0
def processOneImage(inputPath, outputPath):
    image = io.imread(inputPath)
    greyImage = rgb2grey(image)
    threshold = threshold_otsu(greyImage)
    imgout = closing(greyImage > threshold, square(1))
    imgout = crop(imgout)
    imgout = transform.resize(imgout, (max(imgout.shape), max(imgout.shape)))
    io.imsave(outputPath, imgout)
Esempio n. 23
0
def cloud_mask_kmeans(image, k=3):
    image = np.array(image)
    km = KMeans(k).fit(image.reshape(-1, 1))
    labels, centers = km.labels_, km.cluster_centers_
    cloud_center = np.argmax(centers)
    mask = ((labels == cloud_center) & 1).reshape(image.shape)
    mask = closing(mask, square(5))
    del km, labels, centers, cloud_center
    return np.array(mask)
 def preprocess_image(self):
     # Total-variation denoising
     image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     # Return threshold value based on Otsu's method
     thresh = threshold_otsu(image)
     # Increases contrast
     self.bw = closing(image <= thresh, square(1))
     self.cleared = self.bw.copy()
     return self.cleared
def RemoveSpeckles(image,displayImages):
    #opening and closing to remove the speckles in background and holes in background
    radius = 5
    selem = disk(radius)
    opened = opening(image,selem=selem) 
    img_speckless = closing(opened,selem=disk(5))
    if displayImages == 1 :
        plt.figure(),plt.title('Opened and Closed Image'),plt.imshow(img_speckless,cmap='Greys')
    return img_as_ubyte(img_speckless)
Esempio n. 26
0
 def crop_to_object(self, img):
     img = img_as_ubyte(closing(img, square(5)))
     ret,thresh = cv2.threshold(img,20,255,0)
     _, lc, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
     assert len(lc) == 1
     rect = cv2.minAreaRect(lc[0])
     img = rotate(img, rect[2], resize = True)
     img = img_as_ubyte(img)
     ret,thresh = cv2.threshold(img,20,255,0)
     thresh = img_as_ubyte(closing(thresh, square(5)))
     _, lc, _ = cv2.findContours(thresh,cv2.RETR_TREE,cv2.CHAIN_APPROX_SIMPLE)
     assert len(lc) == 1
     rect = cv2.minAreaRect(lc[0])
     box = cv2.boxPoints(rect)
     box = np.int0(box)
     min_y, max_y = min(box[:,1]), max(box[:,1])
     min_x, max_x = min(box[:,0]), max(box[:,0])
     return img[min_y:max_y,min_x:max_x]
Esempio n. 27
0
def process_image(image,
                  net,
                  get_rough_background,
                  get_accurate_background,
                  patch_size=128,
                  n_input_chanels=1,
                  binarization_threshold=0.3,
                  closing_filter_size=8,
                  minimal_crack_size=100):
    image_h = image.shape[0]
    image_w = image.shape[1]

    plt.figure(figsize=(16, 24))
    plt.subplot(3, 2, 1)
    plt.imshow(image, cmap='gray')

    rough_background = get_rough_background(image[np.newaxis,
                                                  np.newaxis, :, :] + 0.5)
    accurate_background = get_accurate_background(rough_background)
    plt.subplot(3, 2, 2)
    plt.imshow(accurate_background[0, 0], cmap='gray')

    prediction = predict_image(image[np.newaxis, :, :], accurate_background[0,
                                                                            0],
                               net, patch_size, n_input_chanels)
    plt.subplot(3, 2, 3)
    plt.imshow(prediction, cmap='gray')

    binarized = np.where(prediction > binarization_threshold, 1, 0)
    plt.subplot(3, 2, 4)
    plt.imshow(binarized, cmap='gray')

    filtered = closing(binarized, square(closing_filter_size))
    plt.subplot(3, 2, 5)
    plt.imshow(filtered, cmap='gray')

    blobs_labels = label(filtered, background=0)
    labels, counts = np.unique(blobs_labels, return_counts=True)
    final = np.where(
        np.in1d(blobs_labels,
                np.where(counts <= minimal_crack_size)[0]).reshape(
                    (image_h, image_w)), 0, filtered)
    plt.subplot(3, 2, 6)
    plt.imshow(np.where(
        np.logical_and(accurate_background[0, 0] == 1, final == 0), 0.4,
        final),
               cmap='gray')

    plt.show()

    bins = np.arange(0, 180, 5)
    area, length, weights = get_statistics(final, bins.size)
    plt.hist(bins, bins=bins.size, weights=weights)
    plt.show()
    print 'area: ', area
    print 'length: ', length
    return final, (bins, weights)
Esempio n. 28
0
def asf_n(I, order=3):
    """

    """
    F = I.copy()
    for r in np.arange(1, order + 1):
        se = m.disk(r)
        F = m.opening(m.closing(F, selem=se), selem=se)
    return F
Esempio n. 29
0
def morphological_filter(img):
    thresh = threshold_otsu(img)
    print(thresh, len(img[img >= thresh].ravel()) / len(img.ravel()))
    binary = img >= thresh
    make_figure(binary).show()
    o_binary = opening(binary)
    make_figure(o_binary).show()
    c_binary = closing(o_binary)
    make_figure(c_binary).show()
Esempio n. 30
0
def getArea(address):
    """Geocode address and retreive image centered
    around lat/long"""
    address = address
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
    dist = []
    rp = regionprops(label_image)
    rp = [x for x in rp if 100 < x.area <= 900]

    for region in rp:

        # skip small images
        #if region.area < 100:
        #    continue
        dist.append(sqrt( ( 320-region.centroid[0] )**2 + ( 320-region.centroid[1] )**2 ))
        # draw rectangle around segmented coins
        #minr, minc, maxr, maxc = region.bbox
        #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
        #                      fill=False, edgecolor='red', linewidth=2)
        #ax.add_patch(rect)

    roof_index = dist.index(min(dist))
    minr, minc, maxr, maxc = rp[roof_index].bbox
    rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                          fill=False, edgecolor='red', linewidth=2)
    ax.add_patch(rect)

    img = StringIO()
    fig.savefig(img)
    img.seek(0)
    session['roof_area'] = rp[roof_index].area
    roof_area = (rp[roof_index].area)*12
    return(roof_area)
Esempio n. 31
0
def applyMorph(result):
    ## Image morphic operation
    # The morphological opening on an image is defined as an erosion followed by a dilation. 
    # Opening can remove small bright spots (i.e. “salt”) and connect small dark cracks. 
    # This tends to “open” up (dark) gaps between (bright) features.
    result = sm.opening(result, sm.disk(2))  #用边长为2的圆形滤波器进行膨胀滤波
    result = sm.dilation(result,sm.disk(tolerance))  #用边长为5的正方形滤波器进行膨胀滤波
    result = sm.closing(result, sm.disk(2))
    return result
Esempio n. 32
0
    def clicker(self, event, x, y, flags, param):
        """
        Function to store the upper and lower color bounds for the mask at the
        clicked location.
        """
        # parse the parameters
        bgr = param[0]
        hsv = param[1]
        resize_factor = param[2]
        if event == cv2.EVENT_LBUTTONDOWN:
            hsv = cv2.cvtColor(bgr, cv2.COLOR_BGR2HSV_FULL)
            pixel = hsv[y, x]
            # mask the following HSV (hue, saturation, brightness) ranges
            upper = np.array([pixel[0] + 6, pixel[1] + 6, pixel[2] + 30])
            lower = np.array([pixel[0] - 6, pixel[1] - 6, pixel[2] - 30])
            # add the clicked value
            self.lower = lower
            self.upper = upper
            # how does this mask look overlaid on the original image?
            image_mask = cv2.inRange(hsv, lower, upper)

            image_mask = np.invert(image_mask).astype(bool)
            # clean it up
            image_mask = morph.remove_small_holes(image_mask,
                                                  area_threshold=10,
                                                  connectivity=2)
            image_mask = morph.opening(image_mask, selem=morph.selem.disk(1))
            image_mask = morph.closing(image_mask, selem=morph.selem.disk(1))
            image_mask = image_mask.astype(np.uint8)
            image_mask[image_mask == 0] = 255
            image_mask[image_mask == 1] = 0
            image_mask = np.dstack((image_mask, image_mask, image_mask))
            image_mask = cv2.addWeighted(image_mask, 0.6, bgr, 1, 0)
            win_name = "Otsu Shadow Mask + Color Mask ('y' keep, 'n' to try another)"
            cv2.namedWindow(win_name, cv2.WINDOW_NORMAL)
            while cv2.getWindowProperty(win_name, 0) >= 0:
                cv2.imshow(win_name, image_mask)
                cv2.moveWindow(win_name, 0, 0)
                cv2.resizeWindow(win_name,
                                 resizeWin(image_mask, resize_factor)[0],
                                 resizeWin(image_mask, resize_factor)[1])
                k = cv2.waitKey(1)
                # only keep the bounds if the 'y' key is pressed
                if k == ord('y') & 0xFF:
                    cv2.destroyWindow(win_name)
                    self.closeWin = True
                    break
                # ignore the clicked bounds if 'n' or window is closed
                elif k == ord('n') & 0xFF:
                    self.lower = None
                    self.upper = None
                    cv2.destroyWindow(win_name)
                    break
                elif cv2.getWindowProperty(win_name, 0) == -1:
                    self.lower = None
                    self.upper = None
                    break
Esempio n. 33
0
 def func(frame):
     _dtype = frame.dtype
     kernel = mor.disk(3)
     frameWP = frame - mor.white_tophat(frame, kernel) * (mor.white_tophat(frame, kernel) > 1000).astype(float)
     kernel = mor.rectangle(25, 1)
     closed = mor.closing(frameWP, kernel)
     opened = mor.opening(closed, kernel)
     result = ((frameWP.astype(float) / opened.astype(float)) * 3000.0)
     return result.astype(_dtype)
def apply_morphology(img):
    selem = square(kernel_size)
    return erosion(img,
                   selem), dilation(img, selem), opening(img, selem), closing(
                       img,
                       selem), ndimage.gaussian_filter(img,
                                                       sigma=(kernel_size,
                                                              kernel_size),
                                                       order=0)
Esempio n. 35
0
 def preprocess_image(self):
     """
     Denoises and increases contrast. 
     """
     #image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     thresh = filter.threshold_adaptive(self.image, 21)
     self.bw = closing(self.image > thresh, square(2))
     self.cleared = self.bw.copy()
     return self.cleared
 def preprocess_image(self):
     """
     Denoises and increases contrast.
     """
     image = restoration.denoise_tv_chambolle(self.image, weight=0.1)
     thresh = threshold_otsu(image)
     self.bw = closing(image > thresh, square(2))
     self.cleared = self.bw.copy()
     return self.cleared
Esempio n. 37
0
def processOneImage(inputPath, outputPaths):
    image = io.imread(inputPath)
    greyImage = rgb2grey(image)
    threshold = threshold_otsu(greyImage)
    imgout = closing(greyImage > threshold, square(1))
    imgout = crop(imgout)
    imgout = transform.resize(imgout, (max(imgout.shape), max(imgout.shape)))
    for outputPath in outputPaths:
        io.imsave(outputPath, imgout)
Esempio n. 38
0
def get_open_close_info(im, sz):
    o_area = img_area(im)
    a = np.zeros((2 * sz))
    for i in range(sz):
        im_c = morpho.opening(im, morpho.disk(i + 1))
        a[i] = (o_area - img_area(im_c)) / o_area
        im_c = morpho.closing(im, morpho.disk(i + 1))
        a[i + sz] = (o_area - img_area(im_c)) / o_area
    return a
Esempio n. 39
0
def morph_version(grey_im, im):
    Thresh = filters.threshold_otsu(grey_im)

    io.imsave("test1.png", grey_im)
    print(Thresh)
    print(grey_im)
    picBW = grey_im < Thresh
    print(picBW)
    Strel = morph.disk(2)
    Strel2 = morph.disk(5)
    BWimg_dil = morph.dilation(picBW, Strel)
    BWimg_close = morph.closing(BWimg_dil, Strel2)
    foreground = morph.closing(grey_im > Thresh, morph.square(3))
    io.imsave("test.png", img_as_uint(foreground))

    L = label(BWimg_close)
    half_length = int(np.floor(np.size(L, 1) / 2))
    L_cntr = L[half_length, half_length]
    print("Label of blob that contains the center pixel: {}".format(L_cntr))

    cropped_images = []
    pad = 20

    top_ten_areas = sorted(regionprops(L), key=lambda x: x.area,
                           reverse=True)[:10]

    for region_index, region in enumerate(top_ten_areas):
        print("area: ", region.area)

        # draw a rectangle around the segmented articles
        # bbox describes: min_row, min_col, max_row, max_col
        minr, minc, maxr, maxc = region.bbox

        # use those bounding box coordinates to crop the image
        cropped_images.append(im[minr - pad:maxr + pad, minc - pad:maxc + pad])

    # create a directory in which to store cropped images
    out_dir = "segmented_articles/"
    if not os.path.exists(out_dir):
        os.makedirs(out_dir)

    # save each cropped image by its index number
    for c, cropped_image in enumerate(cropped_images):
        io.imsave(out_dir + str(c) + ".png", cropped_image)
Esempio n. 40
0
def get_label_image(image=None,
                    min_pixels=50,
                    square_width=3,
                    thresh=None,
                    connectivity=None,
                    semantic_segmentation=None,
                    min_thresh=0.1,
                    label_size_max_portion=0.5,
                    plot=False,
                    figsize=(20, 10)):
    """Use skimage to filter background and get labels
    
    Args:
        image: 2-D numpy array or torch.Tenor
        
    Returns:
        label_image: same size with image
        regions: skimage.measure.regionprops(image)
    """
    from skimage.measure import label, regionprops
    from skimage.morphology import closing, square
    from skimage.filters import threshold_otsu
    if semantic_segmentation is None:
        if isinstance(image, torch.Tensor):
            image = image.detach().cpu().numpy()
        if thresh is None:
            # apply threshold
            thresh = threshold_otsu(image)
        if min_thresh is not None:
            thresh = max(thresh, min_thresh)
        semantic_segmentation = closing(image > thresh,
                                        square(width=square_width))
    if plot:
        plt.figure(figsize=figsize)
        plt.imshow(semantic_segmentation)
        plt.title(f'Semantic segmentation with threshold={thresh:.2f}')
        plt.show()
    # label image regions
    label_image = label(semantic_segmentation, connectivity=connectivity)
    if plot:
        plt.figure(figsize=figsize)
        plt.imshow(label_image)
        plt.title(
            f'label image ({collections.Counter(label_image.reshape(-1))} before post-processing)'
        )
        plt.show()
    for k in np.unique(label_image):
        loc = label_image == k
        if loc.sum() < min_pixels or loc.sum(
        ) > label_image.size * label_size_max_portion:
            label_image[loc] = 0
    for i, k in enumerate(sorted(np.unique(label_image))):
        if i < k:
            label_image[label_image == k] = i
    regions = regionprops(label_image)
    return label_image, regions
Esempio n. 41
0
    def get_bounding_box(self, img):
        '''
			Find the bounding box of the blue barrel
			call other functions in this class if needed
			
			Inputs:
				img - original image
			Outputs:
				boxes - a list of lists of bounding boxes. Each nested list is a bounding box in the form of [x1, y1, x2, y2] 
				where (x1, y1) and (x2, y2) are the top left and bottom right coordinate respectively. The order of bounding boxes in the list
				is from left to right in the image.
				
			Our solution uses xy-coordinate instead of rc-coordinate. More information: http://scikit-image.org/docs/dev/user_guide/numpy_images.html#coordinate-conventions
		'''
        boxes = []
        threshold = 0.65
        binary_img = self.segment_image(img)
        binary_img_cp = np.copy(binary_img)
        mean_b = np.mean(binary_img_cp, dtype=float)
        std_b = np.std(binary_img_cp, dtype=float)
        while not boxes:
            threshold = threshold + 0.08 * np.log(mean_b / std_b)
            idx = (binary_img_cp >= threshold)
            binary_img = np.uint8(idx)
            selem = disk(10)
            binary_img = closing(binary_img, selem=selem)
            #clean up image
            #find connected components
            contours, hierarchy = cv2.findContours(binary_img,
                                                   cv2.RETR_EXTERNAL,
                                                   cv2.CHAIN_APPROX_SIMPLE)
            #iterate through all the top-level contours,
            #draw each connected component with its own random color
            for idx in range(len(contours)):
                color = 255 * np.random.random([3])
                cv2.drawContours(binary_img, contours, idx, color, -1)

            #go through each region
            #find apply shape statistic to include or exclude as barrel
            props = regionprops(binary_img)
            for reg in props:
                print(reg.area)
                #make sure area seen is sizable enough
                if reg.area > 400:
                    major = reg.major_axis_length
                    minor = reg.minor_axis_length + 0.001
                    ratio = major / minor
                    print(ratio)
                    #make sure area is shaped like barrel (longer than wider)
                    if ratio <= 4.2 and ratio >= 1.1:
                        y1, x1, y2, x2 = reg.bbox
                        boxes.append([x1 - 7, y1 + 5, x2 + 7, y2 + 5])
                        boxes.sort(key=itemgetter(1))

        boxes = boxes[:2]
        return boxes
Esempio n. 42
0
    def plot_preprocessed_image(self):
        '''plots pre-processed image and returns crops of chars.'''
        
        rects = []
        image = restoration.denoise_tv_chambolle(self.image, weight=0.2)
        thresh = threshold_otsu(image)
        bw = closing(image > thresh, square(2))
        cleared = bw.copy()
        
        label_image = measure.label(cleared)
        #borders = np.logical_xor(bw, cleared)
       
        #label_image[borders] = -1
        #image_label_overlay = label2rgb(label_image, image=image)
        
        #fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12))
        #ax.imshow(image_label_overlay)
        
        for region in regionprops(label_image):
            if region.area < 10:
                continue
        
            minr, minc, maxr, maxc = region.bbox

            if minr >= 2:
                minr -= 2
            if minc >= 2:
                minc -= 3
            maxr += 1
            maxc += 2
            
            rects.append([minc, minr, maxc, maxr]) #EXTREMELY IMPORTANT LINE, [x1, y1, x2, y2]
            
            #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                      #fill=False, edgecolor='red', linewidth=2)
            #ax.add_patch(rect)
        
        #plt.show()

        def is_nested(rects, i, j):
            return (rects[j][0] >= rects[i][0] and rects[j][2] <= rects[i][2] and rects[j][1] >= rects[i][1] and rects[j][3] <= rects[i][3]) or (rects[j][0] <= rects[i][0] and rects[j][2] >= rects[i][2] and rects[j][1] <= rects[i][1] and rects[j][3] >= rects[i][3])
        
        # makes list of indices of nested rectangles in rects
        deletes = []
        for i in range(len(rects)-1):
            for j in range(i+1, len(rects)):
                if (is_nested(rects, i, j)):
                    deletes.append(j)
        # deletes nested rectangles in rects
        num_dels = 0
        for d in deletes:
            del rects[d-num_dels]
            num_dels += 1
            print 'successful deletion'
        
        return rects
Esempio n. 43
0
def widens_mask(img):
    """ Morphological operators on img """
    img = closing(img, disk(2))
    img = opening(img, disk(1))
    img = remove_little_holes(img,
                              destructive=False)  # remove little black holes
    neg = ~img
    img = ~remove_little_holes(
        neg, destructive=False)  # remove little white elements
    return dilation(img, disk(1))
Esempio n. 44
0
def post_process(output_tensor,no_closing): #b,c,h,w->h,w
    out = torch.sigmoid(output_tensor[0])
    out_numpy = out.max(0)[1].cpu().numpy()
    out_numpy = np.array(out_numpy).astype('uint8')
    if not no_closing:
        out_numpy=sm.closing(out_numpy,sm.square(3))
    out_numpy_ske=sm.skeletonize(out_numpy)#骨架化
    out_numpy_ske=out_numpy_ske.astype('uint8')
    results={'out':out_numpy,'out_ske':out_numpy_ske}
    return results
    def preprocess_image(self):
        '''
        Denoise and increase contrast
        '''
        image = restoration.denoise_tv_chambolle(self.gray, weight=0.1)
        thresh = threshold_otsu(image)
        self.bw = closing(image > thresh, square(2))
        self.cleared = self.bw.copy()

        return self.cleared
 def prefilter(self, img, rec_size=20, se_size=3):
 
     se = morphology.disk(se_size)
     
     im1 = self.morpho_rec(img, rec_size)
     im2 = self.morpho_rec2(im1, int(rec_size / 2))
     
     im3 = morphology.closing(im2, se)
     
     return im3
    def get_enhanced_image_(self, raw_data: RawData, img: array) -> array:
        image = sobel(img)
        image = threshold(image, threshold_otsu(image))
        image = closing(image)

        if self.debug_level >= DebugLevel.ALL:
            imshow(image, cmap="gray")
            self.savers_['enhanced'].save(raw_data.name)

        return image
Esempio n. 48
0
    def close(self, alpha=ALPHA):
        '''performs normal closing operation'''
        try:
            seed = (min(len(self.altered_image), len(self.altered_image[0]))**
                    2) / 10000
            print(seed)
            assert seed < 50  # to prevent lags

            self.altered_image = morphology.opening(
                self.altered_image, morphology.disk(seed / alpha))
            self.altered_image = morphology.closing(
                self.altered_image, morphology.disk(seed / alpha))
        except AssertionError as e:
            self.altered_image = morphology.opening(
                self.altered_image, morphology.disk(seed / (alpha**2)))
            self.altered_image = morphology.closing(
                self.altered_image, morphology.disk(seed / (alpha**2)))
            print("seed size too big: {}! \nresize image please!".format(seed))
            print(e)
Esempio n. 49
0
def refine_mask(mask, type="calc"):
    if type == "calc":
        #mask = dilation(mask, square(9))
        return mask
    elif type == "mass":
        mask = closing(mask, square(9))
    else:
        raise TypeError("Unknown lesion type!")

    return mask
Esempio n. 50
0
def delsmall( img0 ):
    img =color.rgb2gray(img0)
    thresh = filters.threshold_otsu(img)  #用otsu算法确定最佳分割阈值

    bwimg =(img>=(thresh))  #用阈值进行分割,生成二值图像
    labels = measure.label(bwimg)  #标记连通域
    bw =morphology.closing(img < thresh, morphology.square(2))
    #img1 = morphology.remove_small_objects(labels, min_size=5, connectivity=1)
    img2= morphology.remove_small_objects(bw, min_size=200, connectivity=1)
    return img2
Esempio n. 51
0
def processing(path):
    img = imread(path)
    gray = rgb2gray(img)

    binary = 1 - (gray > 0.5)
    binary = closing(binary)
    binary = opening(binary)

    labeled, nr_objects = ndimage.label(binary)
    return nr_objects
Esempio n. 52
0
    def apply(self, img, param=None):
        """
        see Filter.apply()
        """
        if param['bin'] is not None and param['bin'] == 'y':
            result = binary_closing(img, selem=disk(param['size']))
        else:
            result = closing(img, selem=disk(param['size']))

        return result
Esempio n. 53
0
def post_process_my(Test_predicted):
    struc_element = disk(4)
    total = Test_predicted.shape[0]
    post_predicted = np.zeros((total, 128, 128, 1), dtype=np.uint8)
    for j in range(total):
        myimage = Test_predicted[j]  #to uint8
        opened = closing(myimage[:, :, 0], struc_element)
        openedd = np.expand_dims(opened, axis=-1)
        post_predicted[j, :, :, :] = openedd
    return post_predicted
    def prefilter(self, img, rec_size=20, se_size=3):

        se = morphology.disk(se_size)

        im1 = self.morpho_rec(img, rec_size)
        im2 = self.morpho_rec2(im1, int(rec_size / 2))

        im3 = morphology.closing(im2, se)

        return im3
Esempio n. 55
0
 def _get_membrane_cube_data(self):
     self._bdr_cube_data = np.zeros(self._cube_shape, dtype='bool')
     for i in range(self._cube_shape[0]):
         bdr_slice = mask_and_boundary_related.get_all_cell_boundary(
             utils.label_to_rgb_ndarray(self._label_cube_data[i, :, :]))
         self._bdr_cube_data[
             i, :, :] = self._bdr_cube_data[i, :, :] | closing(bdr_slice)
     for i in range(self._cube_shape[1]):
         bdr_slice = mask_and_boundary_related.get_all_cell_boundary(
             utils.label_to_rgb_ndarray(self._label_cube_data[:, i, :]))
         self._bdr_cube_data[:,
                             i, :] = self._bdr_cube_data[:, i, :] | closing(
                                 bdr_slice)
     for i in range(self._cube_shape[2]):
         bdr_slice = mask_and_boundary_related.get_all_cell_boundary(
             utils.label_to_rgb_ndarray(self._label_cube_data[:, :, i]))
         self._bdr_cube_data[:, :,
                             i] = self._bdr_cube_data[:, :, i] | closing(
                                 bdr_slice)
Esempio n. 56
0
def morph(X, strel, operation="opening"):
    if operation == "erosion":
        X_out = erosion(X)
    elif operation == "dilation":
        X_out = dilation(X)
    elif operation == "closing":
        X_out  = closing(X)
    else:
        X_out = opening(X)
    return X_out
Esempio n. 57
0
def postprocess_bladder_mask(mask):
    selem = morphology.ball(3)
    mask = morphology.opening(mask, selem)
    labels = measure.label(mask)
    l_max = largest_label_volume(labels, bg=0)
    if l_max is not None:
        mask[labels != l_max] = 0

    mask = morphology.closing(mask, selem)
    return mask
Esempio n. 58
0
def ApplyThresholdToImageRegion(image2, Tb, Bb, Lb, Rb,shouldThresholdImage):

    image = rgb2gray(image2)

    global foregroundPixelValue
    global backgroundPixelValue

    thresholdValue = threshold_otsu(image)

    NumberOfRows = image.shape[0]
    NumberOfColumns = image.shape[1]

    numberOfBlackPixels = 0
    numberOfWhitePixels = 0
    selem = disk(3)

    # simpe thresholding
    for y in range(NumberOfRows):
        for x in range(NumberOfColumns):

            isWithinBoundary = IsWithinBoundary(y,x,image2, Tb, Bb, Lb, Rb,shouldThresholdImage)

            if (isWithinBoundary):
                if image[y,x] > thresholdValue:
                    #black
                    image[y,x] = 0
                    numberOfBlackPixels += 1
                else:
                    #white
                    image[y,x] = 1
                    numberOfWhitePixels += 1

    # assume foreground has more pixels in face region
    if (numberOfWhitePixels > numberOfBlackPixels):
        foregroundPixelValue = 1
        backgroundPixelValue = 0
        #print("foreground color is white")
    else:
        foregroundPixelValue = 0
        backgroundPixelValue = 1
        #print("foreground color is black")

    image = opening(image,selem)
    if (foregroundPixelValue == 0):
        image = opening(image,selem)
    else:
        image = closing(image,selem)


    if drawFaceImages:
        io.imshow(image)
        io.show()

    return image
Esempio n. 59
0
	def parse_image(self):
		self.m_print("Parsing panel image",0)
		thresh = threshold_otsu(self.i_file)
		bw = closing(self.i_file > thresh, square(3))
		# remove artifacts connected to image border
		cleared = bw.copy()
		clear_border(cleared)
		# label image regions
		label_image = label(cleared)
		borders = np.logical_xor(bw, cleared)
		label_image[borders] = -1
		return label2rgb(label_image, image=self.i_file),label_image
    def prefilter_new(self, img, rec_size=20, se_size=3):
    
        img_cc = ccore.numpy_to_image(img, copy=True)        
        im1 = ccore.diameter_open(img_cc, rec_size, 8)        
        im2 = ccore.diameter_close(im1, int(rec_size / 2), 8)        

        #im1 = self.morpho_rec(img, rec_size)
        #im2 = self.morpho_rec2(im1, int(rec_size / 2))

        se = morphology.disk(se_size)        
        im3 = morphology.closing(im2.toArray(), se)
        
        return im3