def polylinesFromBinImage(img, minimum_cluster_size=6,
                          remove_small_obj_size=3,
                          reconnect_size=3,
                          max_n_contours=None, max_len_contour=None,
                          copy=True):
    '''
    return a list of arrays of un-branching contours

    img -> (boolean) array 

    optional:
    ---------
    minimum_cluster_size -> minimum number of pixels connected together to build a contour

    ##search_kernel_size -> TODO
    ##min_search_kernel_moment -> TODO

    numeric:
    -------------
    max_n_contours -> maximum number of possible contours in img
    max_len_contour -> maximum contour length

    '''
    assert minimum_cluster_size > 1
    assert reconnect_size % 2, 'ksize needs to be odd'

    # assert search_kernel_size == 0 or search_kernel_size > 2 and search_kernel_size%2, 'kernel size needs to be odd'
    # assume array size parameters, is not given:
    if max_n_contours is None:
        max_n_contours = max(img.shape)
    if max_len_contour is None:
        max_len_contour = sum(img.shape[:2])
    # array containing coord. of all contours:
    contours = np.zeros(shape=(max_n_contours, max_len_contour, 2),
                        dtype=np.uint16)  # if not search_kernel_size else np.float32)

    if img.dtype != np.bool:
        img = img.astype(bool)
    elif copy:
        img = img.copy()

    if remove_small_obj_size:
        remove_small_objects(img, remove_small_obj_size,
                             connectivity=2, in_place=True)
    if reconnect_size:
        # remove gaps
        maximum_filter(img, reconnect_size, output=img)
        # reduce contour width to 1
        img = skeletonize(img)

    n_contours = _populateContoursArray(img, contours, minimum_cluster_size)
    contours = contours[:n_contours]

    l = []
    for c in contours:
        ind = np.zeros(shape=len(c), dtype=bool)
        _getValidInd(c, ind)
        # remove all empty spaces:
        l.append(c[ind])
    return l
 def get_rough_detection(self, img, bigsize=40.0, smallsize=4.0, thresh = 0):
     diff = self.difference_of_gaussian(-img, bigsize, smallsize)
     diff[diff>thresh] = 1
     
     se = morphology.square(4)
     ero = morphology.erosion(diff, se)
     
     labimage = label(ero)
     #rec = morphology.reconstruction(ero, img, method='dilation').astype(np.dtype('uint8'))
     
     # connectivity=1 corresponds to 4-connectivity.
     morphology.remove_small_objects(labimage, min_size=600, connectivity=1, in_place=True)
     #res = np.zeros(img.shape)
     ero[labimage==0] = 0
     ero = 1 - ero
     labimage = label(ero)
     morphology.remove_small_objects(labimage, min_size=400, connectivity=1, in_place=True)
     ero[labimage==0] = 0
     res = 1 - ero
     res[res>0] = 255
     
     #temp = 255 - temp
     #temp = morphology.remove_small_objects(temp, min_size=400, connectivity=1, in_place=True)
     #res = 255 - temp
     
     return res
Example #3
0
	def predict(self, times, frames, output_times):
		nout = len(output_times)
		nf = len(frames)

		last_idx = np.argmax(times)
		zt = frames[last_idx] < self.thresh
		skmorph.remove_small_objects(zt, min_size=self.min_size, in_place=True)
		d = morphology.distance_transform_edt(np.invert(zt))
		return np.tile(d, (nout,1,1))
Example #4
0
def build_skeleton(frame):
    """
    build a corner tree, skeletonize, dilate
    """
    tree = trees.tree_corners(frame)
    tree = morphology.skeletonize(tree)
    # tree = morphology.binary_dilation(tree)
    morphology.remove_small_objects(tree, min_size=20, connectivity=2, in_place=True)
    tree = morphology.binary_dilation(tree)
    return tree
Example #5
0
def distance_trend(times, frames, threshold=25, min_size=12):
	nf = len(frames)
	zt = filters.gaussian_filter(frames,1.5) > threshold
	d_outer = np.zeros((nf,) + frames[0].shape)
	d_inner = np.zeros((nf,) + frames[0].shape)

	for i in range(nf):
		skmorph.remove_small_objects(zt, min_size=min_size, in_place=True)
		d_outer[i] = morphology.distance_transform_edt(np.invert(zt[i]))
		d_inner[i] = morphology.distance_transform_edt(zt[i])

	return d_outer, d_inner
Example #6
0
def nuclei_regions(comp_map):
    """
    NUCLEI_REGIONS: extract "support regions" for nuclei. This function
    expects as input a "tissue components map" (as returned, for example,
    by segm.tissue_components) where values of 1 indicate pixels having
    a color corresponding to nuclei.
    It returns a set of compact support regions corresponding to the
    nuclei.


    :param comp_map: numpy.ndarray
       A mask identifying different tissue components, as obtained
       by classification in RGB space. The value 0

       See segm.tissue.tissue_components()

    :return:
    """
    # Deprecated:...
    # img_hem, _ = rgb2he(img0, normalize=True)

    # img_hem = denoise_tv_bregman(img_hem, HE_OPTS['bregm'])

    # Get a mask of nuclei regions by unsupervised clustering:
    # Vector Quantization: background, mid-intensity Hem and high intensity Hem
    # -train the quantizer for 3 levels
    # vq = KMeans(n_clusters=3)
    # vq.fit(img_hem.reshape((-1,1)))
    # -the level of interest is the brightest:
    # k = np.argsort(vq.cluster_centers_.squeeze())[2]
    # mask_hem = (vq.labels_ == k).reshape(img_hem.shape)
    # ...end deprecated

    # Final mask:
    mask = (comp_map == 1)   # use the components classified by color

    # mask = morph.closing(mask, selem=HE_OPTS['strel1'])
    # mask = morph.opening(mask, selem=HE_OPTS['strel1'])
    # morph.remove_small_objects(mask, in_place=True)
    # mask = (mask > 0)

    mask = mahotas.close_holes(mask)
    morph.remove_small_objects(mask, in_place=True)

    dst  = mahotas.stretch(mahotas.distance(mask))
    Bc=np.ones((9,9))
    lmax = mahotas.regmax(dst, Bc=Bc)
    spots, _ = mahotas.label(lmax, Bc=Bc)
    regions = mahotas.cwatershed(lmax.max() - lmax, spots) * mask

    return regions
# end NUCLEI_REGIONS
def cleanImageOld(img, min_size, scale_factor, img_otsu=None, saver=lambda n,x: x):
    img, exposure_data = normalize_exposure2(img)
    img = saver("01-exposure", img)
    img_otsu = ski.filter.threshold_otsu(img) if not img_otsu else img_otsu
    
    img = saver("02-zoom", scipy.ndimage.zoom(img, scale_factor, order=3))
    print("img:",img.shape,img.dtype)
    
    img_cleaned = saver("03-bw", (img > img_otsu))
    
    dbg = DebugData()   
    
    img_cleaned = morphology.binary_erosion(img_cleaned,
                                            morphology.disk(int(2*scale_factor)))
    
    img_cleaned = saver("04-erosion", img_cleaned, dbg=dbg)
    
    img_cleaned = morphology.remove_small_objects(
                    img_cleaned, min_size=int(min_size*scale_factor), connectivity=2)
                    
    img_cleaned = saver("05-remove", img_cleaned)

    
    cleaned_sum = np.sum(img_cleaned)
    print("img_cleaned size:",cleaned_sum)
    
    return img_cleaned, exposure_data
    def clear_body(self, body, minimum_object_size_px=2400):
        """ Vycisti obraz od stolu a ostatnich veci okolo tela a zanecha pouze a jen telo """
        body = scipy.ndimage.filters.gaussian_filter(copy.copy(body).astype(float), sigma=[15, 0, 0]) > 0.7

        # fallowing lines are here to supress warning "Only one label was provided to `remove_small_objects`. "
        blabeled = morphology.label(body)
        if np.max(blabeled) > 1:
            body = morphology.remove_small_objects(morphology.label(blabeled), minimum_object_size_px)
        del blabeled
        
        body[0] = False
        body[-1] = False
        
        body = scipy.ndimage.filters.gaussian_filter(copy.copy(body.astype(float)), sigma=[1, 3, 3]) > 0.2
    
        bodylabel = label(body)
    
        n_of_pixels = [np.count_nonzero(bodylabel==i) for i in range(len(np.unique(bodylabel)))]
        labelsort = np.argsort(n_of_pixels)[::-1]
    
        newbody = np.zeros(body.shape)
        newbody[bodylabel==labelsort[0]] = body[(bodylabel==labelsort[0])]
        newbody[bodylabel==labelsort[1]] = body[(bodylabel==labelsort[1])]
    
        return newbody.astype(bool)
def getPara(predict, true, threshold, resolution, windowsize):
    (TP, FP, TN, FN, class_lable) = perf_measure(true, predict, threshold)
    if((TP + FN) == 0):
        TPR = 0
    else:
        TPR = np.float(TP) / (TP + FN)

    class_lable = class_lable.astype(bool).reshape(250,  130)
    true = true.astype(bool).reshape((250,  130))

    num = 2
    x = np.arange( -num , num+1, 1)
    xx, yy  = np.meshgrid( x, x )
    struc = (xx * xx + yy * yy)<= num * num
    class_lable = binary_dilation(class_lable, struc)
    class_lable = binary_erosion(class_lable, struc)

    # predict2 = remove_small_objects(class_lable, windowsize * resolution, in_place=False)
    predict2 = remove_small_objects(class_lable, windowsize, in_place=False)
    labeled_array1, num_features1 = label(predict2)
    labeled_array2, num_features2 = label(true)
    FP_num = num_features1 - num_features2
    if FP_num < 0:
        FP_num = 0
    return TPR, FP_num
Example #10
0
def get_bg_mask(img):
    
    #if img.ndim == 3:
    #    bg_mask = img.any(axis=-1)
    #    bg_mask = np.invert(bg_mask) # consistent with np.ma, True if masked

    #    # make multichannel (is it really this hard?)
    #    bg_mask = np.repeat(bg_mask[:,:,np.newaxis], 3, axis=2) 
    #
    #else:
    #    bg_mask = (img != 0)
    #    bg_mask = np.invert(bg_mask) # see above

    #bound = segmentation.find_boundaries(bg_mask, mode='inner', background=1)
    #bg_mask[bound] = 1
    #min_size = img.shape[0] * img.shape[1] // 4 
    #holes = morphology.remove_small_holes(bg_mask, min_size=min_size)
    #bg_mask[holes] = 1
    
    bg_mask = segmentation.find_boundaries(img)
    bg_mask = morphology.remove_small_objects(bg_mask)
    bg_mask = morphology.remove_small_holes(bg_mask)

    bg_mask = np.invert(bg_mask)
    return bg_mask
Example #11
0
def label_nuclei(binary, min_size):
    '''Label, watershed and remove small objects'''

    distance = medial_axis(binary, return_distance=True)[1]

    distance_blured = gaussian_filter(distance, 5)

    local_maxi = peak_local_max(distance_blured, indices=False, labels=binary, min_distance = 30)

    markers = measure_label(local_maxi)

#    markers[~binary] = -1

#    labels_rw = segmentation.random_walker(binary, markers)

#    labels_rw[labels_rw == -1] = 0

#    labels_rw = segmentation.relabel_sequential(labels_rw)

    labels_ws = watershed(-distance, markers, mask=binary)

    labels_large = remove_small_objects(labels_ws,min_size)

    labels_clean_border = clear_border(labels_large)

    labels_from_one = relabel_sequential(labels_clean_border)

#    plt.imshow(ndimage.morphology.binary_dilation(markers))
#    plt.show()

    return labels_from_one[0]
def segment_Image(im,depth):
    
    ## Setting lower threshold using Otsu method.
    thresh_l = 0.8*threshold_otsu(im)
    print thresh_l
    img = sitk.GetImageFromArray(im)

    ## Smoothing image.  
    imgSmooth = sitk.CurvatureFlow(image1=img,
                                    timeStep=0.125,
                                    numberOfIterations=10)
    lstSeeds=[]

    for i in range(0, im.shape[1]-1, 10 ):
        for j in range (0, im.shape[0]-1, 10 ):
            if im[j][i]>0:
             seed = [int(i), int(j)]
             lstSeeds.append( seed )
    
    ## Segmenting image.
    imgWhiteMatter = sitk.ConnectedThreshold(image1=imgSmooth, 
                                              seedList=lstSeeds, 
                                              lower=thresh_l, 
                                              upper=255,
                                              replaceValue=255)
    nda = sitk.GetArrayFromImage(imgWhiteMatter)
    
    nda.astype(bool)
    nda1=remove_small_objects(nda,150,connectivity=3)
    nda1.astype(int)
    newdepth = depth
    
    return nda1,newdepth
Example #13
0
File: Zad_2.py Project: gracz21/KCK
def main():
    plt.figure(figsize=(25, 24))
    planes = ['samolot00.jpg', 'samolot01.jpg', 'samolot03.jpg', 'samolot04.jpg', 'samolot05.jpg','samolot07.jpg',
              'samolot08.jpg', 'samolot09.jpg', 'samolot10.jpg', 'samolot11.jpg', 'samolot12.jpg', 'samolot13.jpg',
              'samolot14.jpg', 'samolot15.jpg', 'samolot16.jpg', 'samolot17.jpg', 'samolot18.jpg', 'samolot20.jpg']
    i = 1
    for file in planes:
        img = data.imread(file, as_grey=True)
        img2 = data.imread(file)
        ax = plt.subplot(6, 3, i)
        ax.axis('off')
        img **= 0.4
        img = filter.canny(img, sigma=3.0)
        img = morphology.dilation(img, morphology.disk(4))
        img = ndimage.binary_fill_holes(img)
        img = morphology.remove_small_objects(img, 1000)
        contours = measure.find_contours(img, 0.8)
        ax.imshow(img2, aspect='auto')
        for n, contour in enumerate(contours):
            ax.plot(contour[:, 1], contour[:, 0], linewidth=1.5)
            center = (sum(contour[:, 1])/len(contour[:, 1]), sum(contour[:, 0])/len(contour[:, 0]))
            ax.scatter(center[0], center[1], color='white')
        i += 1

    plt.savefig('zad2.pdf')
Example #14
0
def stk_to_rois(stk, threshold, min_size, max_window=8, downscale_factor=2):
    thresholded_stk = stk > threshold
    thresholded_stk = remove_small_objects(thresholded_stk, min_size)
    distance = ndi.distance_transform_edt(thresholded_stk)
    cropped_stk = stk.copy()
    cropped_stk[np.logical_not(thresholded_stk)] = 0
    combined_stk = cropped_stk + distance/distance.max()
    local_max = peak_local_max(combined_stk, indices=False, 
                               footprint=np.ones((max_window, max_window)), 
                               labels=thresholded_stk)
    markers = ndi.label(local_max)[0]
    labels = watershed(-combined_stk, markers, mask=thresholded_stk)
    new_markers = markers.copy()
    for i in set(labels.flatten()):
        if i == 0: continue
        if np.sum(labels==i) < min_size:
            new_markers[markers==i] = 0
    labels = watershed(-combined_stk, new_markers, mask=thresholded_stk)
    labels_set = set(labels.flatten())
    rois = []
    for label in labels_set:
        if label == 0: continue
        if np.sum((labels==label).astype(int)) < min_size: continue
        nroi = np.zeros((stk.shape[0], stk.shape[1]))
        cx,cy = np.where(labels==label)
        cx,cy = int(cx.mean()), int(cy.mean())
        x,y = np.ogrid[0:nroi.shape[0], 0:nroi.shape[1]]
        r = 4
        mask =  (cx-x)**2 + (cy-y)**2 <= r*r
        nroi[mask] = 1
        #nroi[labels==label] = 1
        rois.append(zoom(nroi, downscale_factor, order=0))
    rois = np.array(rois)
    return rois, thresholded_stk, labels
def analyze_image(parent_conn, frame, image, bgnd_im, bgnd_mask):
    image = cv2.cvtColor(image, cv2.cv.CV_RGB2GRAY);
    im_diff = np.abs(bgnd_im-image.astype(np.double));
    #mask = im_diff >20;
    #mask2 = cv2.adaptiveThreshold(image.astype(np.uint8),1,cv2.ADAPTIVE_THRESH_MEAN_C, cv2.THRESH_BINARY,61,35)
    mask2 = cv2.adaptiveThreshold(im_diff.astype(np.uint8),1,cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY,61,-10)
    L = label(mask2 | bgnd_mask)
    L = morphology.remove_small_objects(L, min_size=10, connectivity=2)
    image[L==0] = 0
    
    props = regionprops(L);
    
    coord_x = [x.centroid[0] for x in props];
    coord_y = [x.centroid[1] for x in props];
    area = [x.area for x in props];
    perimeter = [x.perimeter for x in props];
    major_axis = [x.major_axis_length for x in props];
    minor_axis = [x.minor_axis_length for x in props];
    eccentricity = [x.eccentricity for x in props];
    compactness = [x.perimeter**2/x.area for x in props];
    orientation = [x.orientation for x in props];
    solidity = [x.solidity for x in props];
    
    props_list = [coord_x, coord_y, area, perimeter, 
               major_axis, minor_axis, eccentricity, compactness, 
               orientation, solidity]

    
    parent_conn.send({'frame': frame, 'image': image, 'props_list' :props_list})
    parent_conn.close()
Example #16
0
def segment_roi(roi):
    # step 1. phase congruency (edge detection)
    Mm = phasecong_Mm(roi)
    # step 2. hysteresis thresholding (of edges)
    B = hysthresh(Mm,HT_T1,HT_T2)
    # step 3. trim pixels off border
    B[B[:,1]==0,0]=0
    B[B[:,-2]==0,-1]=0
    B[0,B[1,:]==0]=0
    B[-1,B[-2,:]==0]=0
    # step 4. threshold to find dark areas
    dark = dark_threshold(roi, DARK_THRESHOLD_ADJUSTMENT)
    # step 5. add dark areas back to blob
    B = B | dark
    # step 6. binary closing
    B = binary_closing(B,SE3)
    # step 7. binary dilation
    B = binary_dilation(B,SE2)
    # step 8. thinning
    B = bwmorph_thin(B,3)
    # step 9. fill holes
    B = binary_fill_holes(B)
    # step 10. remove blobs smaller than BLOB_MIN
    B = remove_small_objects(B,BLOB_MIN,connectivity=2)
    # done.
    return B
Example #17
0
    def load_cell_image(self, sensitivity = 5., min_cell_size = 4000):
        '''Load cell image and add cells to self'''

        pic_nuclei = self.get_source_pic_nuclei()
        self.shape = pic_nuclei.shape

        nuclei = find_nuclei(pic_nuclei, sensitivity, min_cell_size)

        self.cell_detect_params = (sensitivity, min_cell_size)

        labels = measure_label(nuclei)

        labelcount = np.bincount(labels.ravel())

        bg = np.argmax(labelcount)

        labels += 1

        labels[labels == bg + 1] = 0

        labels = remove_small_objects(labels, min_cell_size)

        self.nuclei = labels

        self.create_cells_from_nuclei(pic_nuclei)

        self.rescale_nuclei()
Example #18
0
def segment_digits(img):

    # la binariza en caso de que sea escala de grises
    if not img.dtype == 'bool':
        img = img > 0

    min_size = 32
    medfilt_k = 5

    img0 = morphology.remove_small_objects(img, min_size=min_size)

    sum1 = np.sum(img0, 1)
    sum1 = signal.medfilt(sum1, medfilt_k) # Suavizado del perfil acumulado
    bp1 = sum1 > 0

    # Obtener coordenada en y de los puntos inicio y fin de los dígitos (asumiendo una sola línea de dígitos)
    idx_top = [i for i in range(len(bp1)) if bp1[i]>0]
    idx_bottom = [len(bp1)-i+1 for i in range(len(bp1)) if bp1[len(bp1)-i-1]>0]
    if len(idx_top) > 0 and len(idx_bottom) > 0:
        bp1[idx_top[0]:idx_bottom[0]+1] = True

    sum0 = np.sum(img0, 0)
    sum0 = signal.medfilt(sum0, medfilt_k)
    bp0 = sum0 > 0

    # Obtener coordenada en x de los puntos inicio y fin de los dígitos
    idx_01_transition = [i for i in range(1, len(bp0)) if bp0[i-1]==False and bp0[i]==True]
    idx_10_transition = [i for i in range(len(bp0)-1) if bp0[i]==True and bp0[i+1]==False]
    bb=[]
    if len(idx_01_transition)==len(idx_10_transition):
        for i in range(len(idx_01_transition)):
            bb.append([idx_01_transition[i], idx_top[0], idx_10_transition[i], idx_bottom[0]])

    return bb
Example #19
0
def denoiseMask(mask, denoising_ratio=15):
    """
    Function to denoise a mask represented by a numpy array. The denoising is
    done with binary erosion and propagation.
    Args:
        mask (numpy array): The mask which should be denoised represented by a
            boolean numpy array.
        denoising_ratio (int): The ratio within which pixels the denoising step
            will be executed.
    Returns:
        denoised_mask (numpy array): The denoised mask represented by a boolean
            numpy array.
    """
    mask = ~mask
    # eroded_mask = scipy.ndimage.binary_erosion(
    #     mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    # denoised_mask = scipy.ndimage.binary_propagation(
    #     eroded_mask, structure=np.ones((denoising_ratio, denoising_ratio)),
    #     mask=mask)
    # opened_mask = scipy.ndimage.binary_opening(
    #     mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    # denoised_mask = scipy.ndimage.binary_opening(
    #     opened_mask, structure=np.ones((denoising_ratio, denoising_ratio)))
    denoised_mask = remove_small_objects(mask, denoising_ratio)
    denoised_mask = remove_small_holes(denoised_mask, denoising_ratio)
    denoised_mask = ~denoised_mask
    return denoised_mask
def blobs(image, remove_mb = None, val = 160, size = 100):
    """ Convolve a kernel on the image and a gaussian filter to highligh blobs. Find blobs using the
    Difference of Gaussian. Remove from the list of blobs the blobs that are at the membrane.
    return 3 different list
    """

    thresh = threshold_otsu(image)

    #Find all the blobs in the image using Difference of Gaussian
    blobs_in_image = feature.blob_dog(image, min_sigma=0.01,
                        max_sigma=3, threshold=thresh)
    blob_list = []
    for blob in blobs_in_image:
        y, x, r = blob
        blob_list.append((y, x))



    if remove_mb == None:
        blob_in_image_after_binary = set(blob_list)

    else:
        #Create a mask to remove blobs that are at the membrane and surrounded
        #by bright big object
        binary = image >= val*thresh/100
        binary = dilation(binary, square(3))
        binary = remove_small_objects(binary, min_size=size)
        # Create a list of coordinate with the binary image
        coor_binary = np.nonzero(binary)
        list_blob_masked = zip(*coor_binary)
        #Substract the list of coordinate from the binary image to the list of blobs
        blob_in_image_after_binary = (set(blob_list) - set (list_blob_masked))

    return blob_in_image_after_binary
Example #21
0
 def morphOps( imgIn, sizeSE, sizeCC ):
     imgOut = imgIn.astype(bool) #boolean image
     imgOut = ~imgOut #img negative
     imgOut = morphology.remove_small_objects( imgOut, sizeCC ) #cclargest
     SE = morphology.selem.disk( sizeSE ) #structuring element
     imgOut = morphology.closing(imgOut, SE)
     return imgOut
def cleanImage(img, min_size, scale_factor, img_otsu=None, saver=lambda n,x: x):
    img, exposure_data = normalize_exposure2(img)
    img = saver("01-exposure", img)
    img_otsu = ski.filter.threshold_otsu(img) if not img_otsu else img_otsu
    print('otsu:',img_otsu, ski.filter.threshold_otsu(img))

    img = saver("02-zoom", scipy.ndimage.zoom(img, scale_factor, order=3))
    print("shape after zoom:", img.shape)
#     img_pil = PIL.Image.fromarray(img).resize((np.array(img.shape)*scale_factor).tolist()[:2], resample=PIL.Image.BICUBIC)
#     img = saver("02-zoom", PIL2array(img_pil))
    print("img:",img.shape,img.dtype)

    img_cleaned = saver("03-bw", (img > img_otsu))
#     img_cleaned = saver("03-bw", (img > 0.2))

    dbg = DebugData()

    img_cleaned = morphology.binary_erosion(img_cleaned,morphology.disk(int(2*scale_factor)))
    img_cleaned = saver("04-erosion", img_cleaned, dbg=dbg)

    img_cleaned = morphology.remove_small_objects(img_cleaned, min_size=int(min_size*scale_factor), connectivity=2)
    img_cleaned = saver("05-remove", img_cleaned)


    cleaned_sum = np.sum(img_cleaned)
    print("img_cleaned size:",cleaned_sum)
#     if cleaned_sum < 1000 or cleaned_sum > 300000:
#         display(Image(str(dbg.saved_path)))
#         raise Exception("Image not cleaned correctly"+str(locals()))

    return img_cleaned, exposure_data
Example #23
0
File: image.py Project: gracz21/KCK
def load_scenes(filename):
    zipped_scenes = []
    print 'Working on: ' + filename
    img = data.imread('scenes/' + filename, as_grey=True)
    tmp = img
    tmp = filter.canny(tmp, sigma=2.0)
    tmp = ndimage.binary_fill_holes(tmp)
    #tmp = morphology.dilation(tmp, morphology.disk(2))
    tmp = morphology.remove_small_objects(tmp, 2000)
    contours = measure.find_contours(tmp, 0.8)
    ymin, xmin = contours[0].min(axis=0)
    ymax, xmax = contours[0].max(axis=0)
    if xmax - xmin > ymax - ymin:
        xdest = 1000
        ydest = 670
    else:
        xdest = 670
        ydest = 1000
    src = np.array(((0, 0), (0, ydest), (xdest, ydest), (xdest, 0)))
    dst = np.array(((xmin, ymin), (xmin, ymax), (xmax, ymax), (xmax, ymin)))
    tform3 = tf.ProjectiveTransform()
    tform3.estimate(src, dst)
    warped = tf.warp(img, tform3, output_shape=(ydest, xdest))
    tmp = filter.canny(warped, sigma=2.0)
    tmp = morphology.dilation(tmp, morphology.disk(2))
    descriptor_extractor.detect_and_extract(tmp)
    obj_key = descriptor_extractor.keypoints
    scen_desc = descriptor_extractor.descriptors
    zipped_scenes.append([warped, scen_desc, obj_key, filename])
    return zipped_scenes
def extract_yellow(rgb, depth, T_w_k):
    """
    extract red points and grey points and downsample
    """
        
    hsv = cv2.cvtColor(rgb, cv2.COLOR_BGR2HSV)
    h = hsv[:,:,0]
    s = hsv[:,:,1]
    v = hsv[:,:,2]

    hh = h[220:300, 550:580]
    ss = s[220:300, 550:580]
    vv = v[220:300, 550:580]

    r = rgb[220:300, 550:580, :]
    #cv2.imshow("r", r)
    #cv2.waitKey()


    #IPython.embed()




    h_mask = (h>0) & (h<40)
    s_mask = (s>20) & (s<90)
    v_mask = (v>200) & (v<255)
    yellow_mask = h_mask & s_mask & v_mask
    
    valid_mask = depth > 0
    
    xyz_k = clouds.depth_to_xyz(depth, berkeley_pr2.f)
    xyz_w = xyz_k.dot(T_w_k[:3,:3].T) + T_w_k[:3,3][None,None,:]
    
    z = xyz_w[:,:,2]   
    z0 = xyz_k[:,:,2]

    height_mask = xyz_w[:,:,2] > .50 # TODO pass in parameter
    
    good_mask = yellow_mask & height_mask
    good_mask =   skim.remove_small_objects(good_mask,min_size=64)

    if DEBUG_PLOTS:
        #cv2.imshow("z0",z0/z0.max())
        #cv2.imshow("z",z/z.max())
        cv2.imshow("hue", h_mask.astype('uint8')*255)
        cv2.imshow("sat", s_mask.astype('uint8')*255)
        cv2.imshow("val", v_mask.astype('uint8')*255)
        #cv2.imshow("yellow", yellow_mask.astype('uint8')*255)
        cv2.imshow("final",good_mask.astype('uint8')*255)
        #cv2.imshow("small", small)
        cv2.imshow("rgb", rgb)
        cv2.waitKey()
            
        
    

    good_xyz = xyz_w[good_mask]
    
    return clouds.downsample(good_xyz, .0125)
Example #25
0
def segment_cells(frame, mask=None):
    """
    Compute the initial segmentation based on ridge detection + watershed.
    This works reasonably well, but is not robust enough to use by itself.
    """
    
    blurred = filters.gaussian_filter(frame, 2)
    ridges = enhance_ridges(frame)
    
    # threshold ridge image
    thresh = filters.threshold_otsu(ridges)
    thresh_factor = 0.6
    prominent_ridges = ridges > thresh_factor*thresh
    prominent_ridges = morphology.remove_small_objects(prominent_ridges, min_size=256)
    prominent_ridges = morphology.binary_closing(prominent_ridges)
    prominent_ridges = morphology.binary_dilation(prominent_ridges)
    
    # skeletonize
    ridge_skeleton = morphology.medial_axis(prominent_ridges)
    ridge_skeleton = morphology.binary_dilation(ridge_skeleton)
    ridge_skeleton *= mask
    ridge_skeleton -= mask
    
    # label
    cell_label_im = measure.label(ridge_skeleton)
    
    # morphological closing to fill in the cracks
    for cell_num in range(1, cell_label_im.max()+1):
        cell_mask = cell_label_im==cell_num
        cell_mask = morphology.binary_closing(cell_mask, disk(3))
        cell_label_im[cell_mask] = cell_num
    
    return cell_label_im 
Example #26
0
    def __call__(self, image, window_size=10, threshold=0, fill_holes=True,
                 outline_smoothing=2, remove_borderobjects=True, size_min=1,
                 *args, **kw):

        thresh = threshold_adaptive(image, block_size=window_size,
                                    offset=-1*threshold)

        if outline_smoothing >= 1:
            thresh = outlineSmoothing(thresh, outline_smoothing)

        thresh = remove_small_objects(thresh, size_min)

        seeds = ndi.label(clear_border(~thresh))[0]
        thresh = ndi.binary_fill_holes(thresh)
        smask = seeds.astype(bool)

        # object don't touch border after outline smoothing
        if remove_borderobjects:
            thresh = clear_border(thresh)

        img = np.zeros(thresh.shape)
        img[~smask] = 1
        edt = ndi.morphology.distance_transform_edt(img)
        edt -= ndi.morphology.distance_transform_edt(seeds)

        labels = watershed(edt, seeds)
        labels[smask] = 0
        labels[~thresh] = 0

        return labels
Example #27
0
 def func(frame):
     frame = frame.astype(bool)
     binary = remove_small_objects(frame, smooth_size)
     #binary = ndi.binary_fill_holes(binary)
     #opened = binary_opening(frame, disk(smooth_size))
     #opened = opened & frame
     return binary
Example #28
0
 def detect_tc_in_step(self, nc, i, ecc_th=0.75):
     mask = self.ocean_mask.copy()
     uas = nc.variables["uas"][i].squeeze()
     vas = nc.variables["vas"][i].squeeze()
     wind_speed = numpy.sqrt(uas**2+vas**2)
     wind_mask = logical_and(self.ocean_mask, wind_speed > 20.)
     temp = nc.variables["ts"][i].squeeze()
     temp_mask = logical_and(self.ocean_mask, temp > 298.15)
     ps = nc.variables["ps"][i].squeeze()
     ps_mask = logical_and(self.ocean_mask, ps < 1005)
     mask = logical_or(wind_mask, logical_and(temp_mask, ps_mask))
     mask = remove_small_objects(mask, 20)
     lbl = label(mask)
     props_windspeed = regionprops(lbl, wind_speed)
     props_pressure = regionprops(lbl, ps)
     centroids = []
     for windspeed, pressure in zip(props_windspeed, props_pressure):
         max_wind_speed = windspeed["max_intensity"]
         min_pressure = pressure["min_intensity"]
         if windspeed["eccentricity"] > ecc_th or max_wind_speed<20.:
             lbl[lbl == windspeed["label"]]=0
         else:
             y, x = windspeed["centroid"]
             lon = float(self.idx_to_lon(x, y))
             lat = float(self.idx_to_lat(x, y))
             centroids.append([lon, lat, max_wind_speed, min_pressure])
     mask = lbl>0
     return mask, centroids
def pred_f(image, stepSize=stepSize, windowSize=windowSize, param=param, 
           marge=None, marge_cut_off=0, ClearSmallObjects=20, list_f=list_f):
    caffe.set_mode_cpu()
    cn_1 = "FCN_0.01_0.99_0.0005"
    wd_1 = "/share/data40T_v2/Peter/pretrained_models"
    net_1 = GetNet(cn_1, wd_1)
    cn_2 = "DeconvNet_0.01_0.99_0.0005"
    net_2 = GetNet(cn_2, wd_1)
    prob_image, bin_image, thresh = pred_image_from_two_nets(image, net_1, net_2, stepSize, windowSize, 
                                                               param=param, marge=marge, method="avg", 
                                                               ClearBorder="Reconstruction")

    segmentation_mask = DynamicWatershedAlias(prob_image, param)
    segmentation_mask = remove_small_objects(segmentation_mask, ClearSmallObjects)
    table = bin_analyser(image, segmentation_mask, list_f, marge_cut_off)
    segmentation_mask[segmentation_mask > 0] = 1.


    contours = dilation(segmentation_mask, disk(2)) - \
        erosion(segmentation_mask, disk(2))
    x, y = np.where(contours == 1)
    image[x, y] = np.array([0, 0, 0])


    segmentation_mask = img_as_ubyte(segmentation_mask)
    segmentation_mask[segmentation_mask > 0] = 255
    if marge_cut_off != 0:
         c = marge_cut_off
         image = image[c:-c, c:-c]
         segmentation_mask = segmentation_mask[c:-c, c:-c]
         prob_image = prob_image[c:-c, c:-c]
    return image, table, segmentation_mask, prob_image
def calculateTestedClassV2(testedimg, classId, thresholdarea):
    #this version 2 is trying to remove small tips classified as noise
    #img = Image.open(imgfile);
    img = testedimg
    npimg = np.array(img, dtype=np.uint8);
    #npimg = testedimg.copy()
    
    filtereddataclasses = npimg
    
    #make sure to keep only needed class
    filtereddataclasses[npimg != classId] = 0
    
    #find all connected components    
    #smooth the image to remove small objects?
    blurradius = 0.5
    #threshold = 0
    
    if useSmoothed == 1:
        smoothedimg = ndimage.gaussian_filter(filtereddataclasses, blurradius)
    else:
        smoothedimg = filtereddataclasses
    
    morphology.remove_small_objects(smoothedimg, 50, 4, True)
    
    labeled, nr_objects = ndimage.label(smoothedimg > 0)
    
    regionproperties = measure.regionprops(labeled, None, False)
    
    allarea = [r.area for r in regionproperties]
    
    removedindex = [index for index, area in enumerate(allarea) if area <= thresholdarea]
    
    filteredlabeled = np.array(labeled, dtype=np.uint8);
    
    for index in range(0, len(removedindex)):
        filteredlabeled[labeled == removedindex[index] + 1] = 0 # +1 because the background labeled 0
    
    smoothedimg[filteredlabeled == 0] = 0
    
    #label again
    labeled, nr_objects = ndimage.label(smoothedimg > 0)
    
    print 'Number of items (classid: ' + str(classId) + ') detected: ' + str(nr_objects)
    
    centreobjects = center_of_mass(labeled, labels=labeled, index=range(1, nr_objects+1))
    
    return centreobjects  # for each item, the format is y, x
Example #31
0
def threshold_nuclei(img_dapi,
                     t_dapi=None,
                     fiber_mask=None,
                     labeled=True,
                     multiplier=0.75,
                     z_first=False,
                     verbose=False):
    """Segment nuclei in a 3D DAPI image.

    Return a mask containing all nuclei from a 3D DAPI image. If `fiber_mask` 
    is provided, exclude any nuclei that do not intersect the fiber. If 
    `t_fiber` is provided, use that for thresholding; otherwise, determine the 
    threshold value automatically using Otsu's method.

    Parameters:

    img_dapi: np.array, 3D
        A 3D numpy array containing signal from the DAPI channel.

    
    Optional:

    t_fiber: float (default None)
        Threshold to use for binarization of the DAPI image, from 0 to 1. 
        If not set, automatically determine this threshold using Otsu's 
        method. 

    fiber_mask: np.array, 3D (default None)
        A 3D numpy array (`bool` or `int` type) containing a truth mask of the 
        muscle fiber. Used to exclude nuclei outside the fiber.
    
    labeled: bool (default True)
        If True, return a mask with each nucleus labeled as an integer and the 
        background labeled as 0. Otherwise, return a mask with all nuclear 
        pixels labeled as 1 and background as 0.
    
    multiplier: float (default 0.75)
        If using automatic thresholding of DAPI by Otsu's method, multiply the 
        threshold by this value before binarizing the DAPI channel.

    z_first: bool (default False)
        If True, treat `img_dapi` as image with dimensions [z, x, y].

    verbose: bool (default False)
        If True, print progress to stdout.
        
    """

    if verbose:
        print('\nSegmenting nuclei...')

    # image preprocessing
    if z_first:
        sig = (2, 20, 20)
    else:
        sig = (20, 20, 2)

    img_dapi = su.normalize_image(img_dapi)
    img_blur = filters.gaussian(img_dapi, sig)

    # get threshold using method determined from value of t_dapi
    if t_dapi is None:
        thresh = filters.threshold_otsu(img_blur) * multiplier
    elif type(t_dapi) == float:
        thresh = t_dapi
    else:
        raise TypeError('`t_dapi` argument not recognized. \
            Must be either float or None.')

    if verbose:
        print('DAPI threshold = ' + str(thresh))

    # binarize and clean up mask
    bin_dapi = np.where(img_dapi > thresh, 1, 0)
    bin_dapi = morphology.remove_small_objects(bin_dapi.astype(bool), 2048)
    bin_dapi = morphology.remove_small_holes(bin_dapi.astype(bool), 2048)
    nuclei_labeled, n_nuc = morphology.label(bin_dapi, return_num=True)

    if fiber_mask is not None:
        if verbose:
            print(
                'Removing nuclei that are not connected to main fiber segment...'
            )

        for i in range(1, n_nuc + 1):
            overlap = np.logical_and(
                np.where(nuclei_labeled == i, True, False),
                fiber_mask.astype(bool))
            if np.count_nonzero(overlap) == 0:
                nuclei_labeled[nuclei_labeled == i] = 0

    if labeled:
        return nuclei_labeled
    else:
        return np.where(nuclei_labeled > 0, 1, 0)
Example #32
0
def test_single_label_warning():
    image = np.array([[0, 0, 0, 1, 0],
                      [1, 1, 1, 0, 0],
                      [1, 1, 1, 0, 0]], int)
    with expected_warnings(['use a boolean array?']):
        remove_small_objects(image, min_size=6)
Example #33
0
def test_in_place():
    observed = remove_small_objects(test_image, min_size=6, in_place=True)
    assert_equal(observed is test_image, True,
                 "remove_small_objects in_place argument failed.")
Example #34
0
def test_float_input():
    float_test = np.random.rand(5, 5)
    with testing.raises(TypeError):
        remove_small_objects(float_test)
Example #35
0
def proc_np_hv(pred, marker_mode=2, energy_mode=2, rgb=None):
    """
    Process Nuclei Prediction with XY Coordinate Map

    Args:
        pred: prediction output, assuming 
                channel 0 contain probability map of nuclei
                channel 1 containing the regressed X-map
                channel 2 containing the regressed Y-map
    """
    assert marker_mode == 2 or marker_mode == 1, 'Only support 1 or 2'
    assert energy_mode == 2 or energy_mode == 1, 'Only support 1 or 2'

    blb_raw = pred[..., 0]
    h_dir_raw = pred[..., 1]
    v_dir_raw = pred[..., 2]

    ##### Processing
    blb = np.copy(blb_raw)
    blb[blb >= 0.5] = 1
    blb[blb < 0.5] = 0

    blb = measurements.label(blb)[0]
    blb = remove_small_objects(blb, min_size=10)
    blb[blb > 0] = 1  # back ground is 0 already
    #####

    if energy_mode == 2 or marker_mode == 2:
        h_dir = cv2.normalize(h_dir_raw,
                              None,
                              alpha=0,
                              beta=1,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_32F)
        v_dir = cv2.normalize(v_dir_raw,
                              None,
                              alpha=0,
                              beta=1,
                              norm_type=cv2.NORM_MINMAX,
                              dtype=cv2.CV_32F)

        sobelh = cv2.Sobel(h_dir, cv2.CV_64F, 1, 0, ksize=21)
        sobelv = cv2.Sobel(v_dir, cv2.CV_64F, 0, 1, ksize=21)

        sobelh = 1 - (cv2.normalize(sobelh,
                                    None,
                                    alpha=0,
                                    beta=1,
                                    norm_type=cv2.NORM_MINMAX,
                                    dtype=cv2.CV_32F))
        sobelv = 1 - (cv2.normalize(sobelv,
                                    None,
                                    alpha=0,
                                    beta=1,
                                    norm_type=cv2.NORM_MINMAX,
                                    dtype=cv2.CV_32F))

        overall = np.maximum(sobelh, sobelv)
        overall = overall - (1 - blb)
        overall[overall < 0] = 0

        if energy_mode == 2:
            dist = (1.0 - overall) * blb
            ## nuclei values form mountains so inverse to get basins
            dist = -cv2.GaussianBlur(dist, (3, 3), 0)

        if marker_mode == 2:
            overall[overall >= 0.4] = 1
            overall[overall < 0.4] = 0

            marker = blb - overall
            marker[marker < 0] = 0
            marker = binary_fill_holes(marker).astype('uint8')
            kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))
            marker = cv2.morphologyEx(marker, cv2.MORPH_OPEN, kernel)
            marker = measurements.label(marker)[0]
            marker = remove_small_objects(marker, min_size=10)

    if energy_mode == 1:
        dist = h_dir_raw * h_dir_raw + v_dir_raw * v_dir_raw
        dist[blb == 0] = np.amax(dist)
        # nuclei values are already basins
        dist = filters.maximum_filter(dist, 7)
        dist = cv2.GaussianBlur(dist, (3, 3), 0)

    if marker_mode == 1:
        h_marker = np.copy(h_dir_raw)
        v_marker = np.copy(v_dir_raw)
        h_marker = np.logical_and(h_marker < 0.075, h_marker > -0.075)
        v_marker = np.logical_and(v_marker < 0.075, v_marker > -0.075)
        marker = np.logical_and(h_marker > 0, v_marker > 0) * blb
        marker = binary_dilation(marker, iterations=2)
        marker = binary_fill_holes(marker)
        marker = measurements.label(marker)[0]
        marker = remove_small_objects(marker, min_size=10)

    proced_pred = watershed(dist, marker, mask=blb)

    return proced_pred
Example #36
0
    int_img = f.data.copy()
    int_img[:, 1] /= np.max(int_img[:, 1])
    int_img[:, 2] /= np.max(int_img[:, 2])
    int_img = int_img[:, 1:]
    int_img = np.max(int_img, axis=1)
    int_img *= np.iinfo(np.uint16).max
    int_img = int_img.astype(np.uint16)

    #del f

    from skimage.morphology import binary_closing, binary_opening
    ### Preprocessing
    footprint = get_footprint(3, 2)
    mask = int_img < 1. * mh.otsu(int_img, True)
    from skimage.morphology import remove_small_objects
    mask = remove_small_objects(mask, min_size=200, connectivity=2)
    #mask = binary_closing(mask, footprint)
    #mask = binary_opening(mask, footprint)
    int_img[mask] = 0

    smooth_img = int_img.copy()
    smooth_img = median_filter(smooth_img,
                               size=1,
                               footprint=get_footprint(3, 2))
    #    smooth_img = median_filter(smooth_img, size=1, footprint=get_footprint(3, 2))

    smooth_img = gaussian_filter(smooth_img, sigma=[.5, 1, 1])
    smooth_img = gaussian_filter(smooth_img, sigma=[.5, 1, 1])
    smooth_img = gaussian_filter(smooth_img,
                                 sigma=[.5 * 2 / 3, 1 * 2 / 3, 1 * 2 / 3])
    smooth_img = gaussian_filter(smooth_img,
Example #37
0
def test_negative_input():
    negative_int = np.random.randint(-4, -1, size=(5, 5))
    with testing.raises(ValueError):
        remove_small_objects(negative_int)
Example #38
0
File: Stacks.py Project: jungbt/NMB
 def segmentation(self,
                  out_dir,
                  sampling_rate=0.01,
                  clusters=3,
                  overwrite=False):
     """
     Segmentation of RGB histological images using a Gaussian Mixed Model. 
     Slices inside stack will be masked by the relevant cluster and output into  out_dir
     Slices in Stack object will be replaced with their segmented versions.
     Lower sampling rate to reducing processing time.
     Defaults to a sampling rate of 1% and 3 clusters.
     """
     out_dir = os.path.abspath(out_dir) + '/'
     if overwrite == False:
         #Check if all relevant files exist.
         for i in range(len(self.slices)):
             if not os.path.isfile(out_dir + self.slices[i].name):
                 break
         else:
             print(
                 ' - All Segmented Files Exist. Utilizing currently existing data.'
             )
             #Alter Stack data to include segmented files.
             for i in range(len(self.slices)):
                 self.slices[i].rename(out_dir, self.slices[i].name)
             self.dir = out_dir
             return
     #Start Gaussian Mixture Models
     gauss = GaussianMixture(clusters)
     gauss_sample = np.zeros([1, 3], 'uint8')
     print('Sampling Image Data')
     #Convert subset of image data from RGB to a 1D LAB array
     for i in range(len(self.slices)):
         print(self.slices[i].name)
         img = io.imread(self.slices[i].path)
         img = img[:, :, :3]
         img_1D = img.reshape(img.shape[0] * img.shape[1], 3)
         img_1D = img_1D[np.random.choice(list(range(len(img_1D))),
                                          size=int(
                                              len(img_1D) * sampling_rate))]
         lab_1D = color.rgb2lab(
             img_1D.reshape(img_1D.shape[0], 1, img_1D.shape[1]))
         gauss_sample = np.r_[gauss_sample,
                              lab_1D.reshape(img_1D.shape[0], 3)]
     print('Fitting Gaussian Mixed Model')
     #Fit model for all Slices in Stack.
     gauss.fit(gauss_sample)
     print('Applying Gaussian Mixed Model')
     #Open images and mask by a specific cluster.
     ##NOTE 12/23/2017:
     ###NEED TO INCLUDE SUPPORT FOR MULTIPLE CLUSTERS AT A LATER DATE.
     ###CLUSTER SELECTION REQUIRES USER INPUT. THIS WILL NOT WORK ON BIOWULF.
     ###WORKAROUND: RUN SEGMENTATION INTERACTIVELY, THEN RUN REST OF PIPELINE
     ###ON SBATCH WITH pipe.input.overwrite = False
     for i in range(len(self.slices)):
         #open and modify image data for GMM fit
         img = io.imread(self.slices[i].path)
         img = img[:, :, :3]
         lab = color.rgb2lab(img[:, :, :3])
         img_1D = lab.reshape(lab.shape[0] * lab.shape[1], 3)
         #Generate mask from clusters
         mask_1D = gauss.predict(img_1D)
         mask = mask_1D.reshape(lab.shape[0], lab.shape[1])
         #First instance: User input to identify cluster containing histology
         if i == 0:
             for m in range(clusters):
                 #Generate boolean mask for each cluster.
                 mask_binary = np.zeros((mask.shape[0], mask.shape[1]),
                                        dtype=bool)
                 mask_binary[mask != m] = False
                 mask_binary[mask == m] = True
                 mask_binary = mask_binary.reshape(mask.shape)
                 mask_binary_d = remove_small_objects(
                     mask_binary.astype(bool), min_size=64)
                 mask_binary_d = binary_dilation(mask_binary_d).astype(
                     img.dtype)
                 #Generate masked images from each cluster.
                 print('Cluster {}'.format(m))
                 masked_rgb = np.array(np.zeros(img.shape))
                 for j in range(clusters):
                     masked_rgb[:, :, j] = mask_binary_d * img[:, :, j]
                 plt.imshow(masked_rgb.astype(img.dtype),
                            cmap=plt.cm.binary)
                 plt.show()
             #Prompt user to select cluster.
             k = input("Which cluster contains histology?")
             k = int(k)
         #Generate boolean mask for selected cluster.
         mask_binary = np.zeros((mask.shape[0], mask.shape[1]), dtype=bool)
         mask_binary[mask != k] = False
         mask_binary[mask == k] = True
         mask_binary = mask_binary.reshape(mask.shape)
         mask_binary_d = remove_small_objects(mask_binary.astype(bool),
                                              min_size=64)
         mask_binary_d = binary_dilation(mask_binary_d).astype(img.dtype)
         #Mask image by boolean mask
         masked_rgb = np.array(np.zeros(img.shape))
         for j in range(3):
             masked_rgb[:, :, j] = mask_binary_d * img[:, :, j]
         plt.imsave(out_dir + self.slices[i].name,
                    masked_rgb.astype(img.dtype))
         #Modify Stack data to incorporate new segmented data.
         self.slices[i].rename(out_dir, self.slices[i].name)
     self.dir = out_dir
Example #39
0
    def postfilter(self):
        """Tidy the output bec zones by applying several filters:
        - majority
        - noise
        - area closing (fill in 0 areas created by noise filter)
        - majority (again) to tidy edge effects created by area_closing()
        - noise (again) to remove any noise created by 2nd majority
        """
        # shortcuts
        config = self.config
        data = self.data

        # before performing the majority filter, group high elevation
        # labels across rule polygons (alpine, parkland, woodland)
        data["becinit_grouped"] = data["becinit"].copy()

        # define new becvalues for aggregated high elevation labels
        # generate these dynamically based on current max value because using
        # arbitrary large values decreases performace of scikit-img rank
        # filters (majority)
        if len(self.high_elevation_types) >= 1:

            max_value = data["becmaster"]["becvalue"].max()
            high_elevation_aggregates = {
                "alpine": max_value + 1,
                "parkland": max_value + 2,
                "woodland": max_value + 3,
            }
            for key in high_elevation_aggregates:
                if key in self.high_elevation_types:
                    for becvalue in self.high_elevation_dissolves[key]:
                        data["becinit_grouped"] = np.where(
                            data["becinit_grouped"] == becvalue,
                            high_elevation_aggregates[key],
                            data["becinit_grouped"],
                        )

        # ----------------------------------------------------------------
        # majority filter
        # ----------------------------------------------------------------
        LOG.info("Running majority filter")
        data["majority"] = np.where(
            data["slope"] <
            config["majority_filter_steep_slope_threshold_percent"],
            majority(
                data["becinit_grouped"],
                morphology.rectangle(nrows=self.filtersize_low,
                                     ncols=self.filtersize_low),
            ),
            majority(
                data["becinit_grouped"],
                morphology.rectangle(nrows=self.filtersize_steep,
                                     ncols=self.filtersize_steep),
            ),
        )

        # to ungroup the high elevation values while retaining the result of
        # the majority filter, loop through the rule polygons and re-assign
        # the becvalues
        data["postmajority"] = data["majority"].copy()

        for zone in self.high_elevation_types:
            for lookup in [
                    r for r in self.high_elevation_merges if r["type"] == zone
            ]:
                data["postmajority"][
                    (data["ruleimg"] == lookup["rule"])
                    & (data["majority"] == high_elevation_aggregates[zone]
                       )] = lookup["becvalue"]

        # ----------------------------------------------------------------
        # Basic noise filter
        # Remove holes < the noise_removal_threshold within each zone
        # ----------------------------------------------------------------
        LOG.info("Running noise removal filter")

        # convert noise_removal_threshold value from ha to n cells
        noise_threshold = int((config["noise_removal_threshold_ha"] * 10000) /
                              (config["cell_size_metres"]**2))

        # initialize the output raster for noise filter
        data["noise"] = np.zeros(shape=self.shape, dtype="uint16")

        # process each non zero becvalues
        for becvalue in [v for v in self.beclabel_lookup if v != 0]:

            # extract given becvalue
            X = np.where(data["postmajority"] == becvalue, 1, 0)

            # fill holes, remove small objects
            Y = morphology.remove_small_holes(
                X, noise_threshold, connectivity=config["cell_connectivity"])
            Z = morphology.remove_small_objects(
                Y, noise_threshold, connectivity=config["cell_connectivity"])

            # insert values into output
            data["noise"] = np.where(Z != 0, becvalue, data["noise"])

        # ----------------------------------------------------------------
        # Fill holes introduced by noise filter
        #
        # The noise filter removes small holes / objects surrounded by
        # contiguous zones.
        # When a small area is bordered by more than 1 becvalue, it does not
        # get filled and leaves a hole.
        # Fill these holes using the distance transform (as done with
        # expansion of rule polys). Restrict the expansion to within the rule
        # polys only, otherwise the results bleed to the edges of the extent
        # (note that this removes need for area closing, edges are filled too)
        # ----------------------------------------------------------------
        a = np.where(data["noise"] == 0, 1, 0)
        b, c = ndimage.distance_transform_edt(a, return_indices=True)
        data["noise_fill"] = np.where(
            (data["noise"] == 0) & (data["ruleimg"] != 0),
            data["noise"][c[0], c[1]],
            data["noise"],
        )

        # ----------------------------------------------------------------
        # High elevation noise removal
        # Process alpine / parkland / woodland / high elevation labels
        # and merge the with the label below if not of sufficent size
        # ----------------------------------------------------------------
        # initialize output image
        data["highelev"] = data["noise_fill"].copy()

        # convert high_elevation_removal_threshold value from ha to n cells
        high_elevation_removal_threshold = int(
            (self.config["high_elevation_removal_threshold_ha"] * 10000) /
            (self.config["cell_size_metres"]**2))

        # remove high elevation noise only if high elevation types are present
        if len(self.high_elevation_types) >= 1:

            # Because we are finding noise by aggregating and finding holes,
            # iterate through all but the lowest high elevation type.
            dissolve_types = list(self.high_elevation_dissolves.keys())
            for i, highelev_type in enumerate(dissolve_types[:-1]):
                LOG.info(
                    "Running high_elevation_removal_threshold on {}".format(
                        highelev_type))

                # Extract area of interest
                # eg, Find and aggregate all parkland values - holes within the
                # created patches can be assumed to be alpine, so we can fill
                # holes < area threshold

                # find all becvalues of zone below zone of interest
                # (all parkland becvalues if we are eliminating alpine)
                to_agg = self.high_elevation_dissolves[dissolve_types[i + 1]]

                # aggregate the areas, creating a boolean array
                X = np.isin(data["highelev"], to_agg)

                # remove small holes (below our threshold) within the boolean array
                Y = morphology.remove_small_holes(
                    X,
                    high_elevation_removal_threshold,
                    connectivity=config["cell_connectivity"],
                )

                # find the difference
                # (just fill the holes, don't write the entire zones)
                Z = np.where((X == 0) & (Y == 1), 1, 0)

                # note that for QA, we could add  X/Y/Z arrays to the data dict
                # something like this, - they'll get written to temp
                # data[highelev_type+"_X"] = X
                # data[highelev_type+"_Y"] = Y

                # remove the small areas in the output image by looping through
                # the merges for the given type, this iterates through the
                # rule polygons.
                for merge in [
                        m for m in self.high_elevation_merges
                        if m["type"] == highelev_type
                ]:
                    data["highelev"] = np.where(
                        (Z == 1) & (data["ruleimg"] == merge["rule"]),
                        merge["becvalue_target"],
                        data["highelev"],
                    )

        # ----------------------------------------------------------------
        # Convert to poly
        # ----------------------------------------------------------------
        fc = FeatureCollection([
            Feature(geometry=s, properties={"becvalue": v})
            for i, (s, v) in enumerate(
                shapes(
                    data["highelev"],
                    transform=self.transform,
                    connectivity=(config["cell_connectivity"] * 4),
                ))
        ])
        data["becvalue_polys"] = gpd.GeoDataFrame.from_features(fc)

        # add beclabel column to output polygons
        data["becvalue_polys"]["BGC_LABEL"] = data["becvalue_polys"][
            "becvalue"].map(self.beclabel_lookup)

        # set crs
        data["becvalue_polys"].crs = "EPSG:3005"

        # clip to aggregated rule polygons
        # (buffer the dissolved rules out and in to ensure no small holes
        # are created by dissolve due to precision errors)
        data["rulepolys"]["rules"] = 1
        X = data["rulepolys"].dissolve(by="rules").buffer(0.01).buffer(-0.01)
        Y = gpd.GeoDataFrame(X).rename(columns={
            0: "geometry"
        }).set_geometry("geometry")
        data["becvalue_polys"] = gpd.overlay(data["becvalue_polys"],
                                             Y,
                                             how="intersection")

        # add area_ha column
        data["becvalue_polys"]["AREA_HA"] = (
            data["becvalue_polys"]["geometry"].area / 10000)

        # round to 1 decimal place
        data["becvalue_polys"].AREA_HA = data["becvalue_polys"].AREA_HA.round(
            1)

        # remove rulepoly fields
        data["becvalue_polys"] = data["becvalue_polys"][[
            "BGC_LABEL", "AREA_HA", "becvalue", "geometry"
        ]]

        self.data = data
cov_10cm.values = (tch.values >= 0.1).astype('float')
cov_10cm.values[np.isnan(tch.values)] = np.nan
io.write_xarray_to_GeoTiff(cov_10cm, '%s_cover10cm_1m' % site)

# Identify "trees"
# - define as contiguous regions with canopy cover >2m comprising >8 pixels
#   (~3m diameter)
# - use two-step procedure
#       (i)  fill "holes"
#       (ii) remove objects based on connectivity direct connections in either
#            row or column (ignoring diagonals)
trees = cov_2m.copy(deep=True)
trees.values[trees.values < 0] = 0
trees.values = morphology.remove_small_holes(trees.values.astype('int'),
                                             area_threshold=1)
trees.values = morphology.remove_small_objects(trees.values,
                                               min_size=8).astype('float')
trees.values[np.isnan(tch.values)] = np.nan
io.write_xarray_to_GeoTiff(trees, '%s_trees2m_1m' % site)

trees = None
cov_2m = None
cov_10cm = None

# Use gdal to regrid
os.system(
    'gdalwarp -overwrite -r average -te %f %f %f %f -tr %f %f %s_cover2m_1m.tif %s_cover2m_%.0fm.tif'
    % (W, S, E, N, dx, dy, site, site, dx))
os.system(
    'gdalwarp -overwrite -r max -te %f %f %f %f -tr %f %f %s_trees2m_1m.tif %s_trees2m_%.0fm.tif'
    % (W, S, E, N, dx, dy, site, site, dx))
os.system(
Example #41
0
                                   lambda1=1,
                                   lambda2=1,
                                   tol=1e-3,
                                   max_iter=200,
                                   dt=0.5,
                                   init_level_set="checkerboard",
                                   extended_output=True)
    img_f_t_average_cv_1 = img_f_t_average_cv[0]
    img_f_t_average_cv_1 = img_f_t_average_cv_1[extend_window:extend_window +
                                                X + 1,
                                                extend_window:extend_window +
                                                Y + 1]
    #Save_as_tiff('save_path', img_f_t_average_cv_1, 'CR2_LHF_Average_threshold_average_CV')

    img_f_t_average_cv_rm = mor.remove_small_objects(img_f_t_average_cv_1,
                                                     min_size=600,
                                                     connectivity=1,
                                                     in_place=False)
    #Save_as_tiff('save_path', img_f_t_average_cv_rm, 'CR2_LHF_Average_threshold_average')

    fig, axes = plt.subplots(2, 3, figsize=(9, 3), sharex=True, sharey=True)
    ax = axes.ravel()

    ax[0].imshow(img, cmap=plt.cm.gray)
    ax[0].set_title('original image')
    ax[1].imshow(img_f, cmap=plt.cm.gray)
    ax[1].set_title('binary image')
    ax[2].imshow(img_f_t, cmap=plt.cm.gray)
    ax[2].set_title('LHF')
    ax[3].imshow(img_f_t_average, cmap=plt.cm.gray)
    ax[3].set_title('average Filtering')
    ax[4].imshow(img_f_t_average_cv[0], cmap=plt.cm.gray)
Example #42
0
def gen_crop_mask(image):
    '''

    :param image:image dat in hsv space
    :return: mask dat for crop
    '''
    # #change color space
    # hsv_img_dat = color.rgb2hsv(image)
    # #plot pic relevance
    # imsave("h_space.bmp",hsv_img_dat[:,:,0])
    # imsave("s_space.bmp",hsv_img_dat[:,:,1])
    # imsave("v_space.bmp",hsv_img_dat[:,:,2])

    # #change color space
    # hsv_img_dat = color.rgb2hed(image)
    # #plot pic relevance
    # imsave("h_space.bmp",hsv_img_dat[:,:,0])
    # imsave("e_space.bmp",hsv_img_dat[:,:,1])
    # imsave("d_space.bmp",hsv_img_dat[:,:,2])

    #change color space
    xyz_img_dat = color.rgb2xyz(image)
    #plot pic relevance
    # imsave("x_space.bmp",xyz_img_dat[:,:,0])
    # imsave("y_space.bmp",xyz_img_dat[:,:,1])
    # imsave("z_space.bmp",xyz_img_dat[:,:,2])

    # np.save("xyz.npy", xyz_img_dat[:,:,2])

    mask = (xyz_img_dat[:, :, 2] < 0.9)
    # mask = (hsv_img_dat[:, :, 1] > 0.2)

    mask = mask.astype(np.int32)

    ##get central of label#####
    # imsave("mask1.bmp",mask)
    # print(np.shape(mask))

    filled_image = nd.morphology.binary_fill_holes(mask)
    imsave("mask_fill.bmp", filled_image)

    filled_image_re = morphology.remove_small_objects(filled_image,
                                                      min_size=50,
                                                      connectivity=1)
    imsave("mask_fill_re.bmp", filled_image_re)

    # mask_label = measure.label(filled_image)
    # max_label = np.amax(mask_label)
    # print(max_label)
    #
    # mask_label = measure.label(filled_image_re)
    # max_label = np.amax(mask_label)
    #
    # print(max_label)

    #repet error
    # for i in range(0, max_label):
    #     region = (properties[i].centroid[0]*math.pow(2,3), properties[i].centroid[1]*math.pow(2,3))
    #     cor.append(region)
    #
    # patch_xml.generate_xml_from_coords(cor, "./a.xml", 128)

    return filled_image_re
Example #43
0
def test_two_connectivity():
    expected = np.array([[0, 0, 0, 1, 0],
                         [1, 1, 1, 0, 0],
                         [1, 1, 1, 0, 0]], bool)
    observed = remove_small_objects(test_image, min_size=7, connectivity=2)
    assert_array_equal(observed, expected)
Example #44
0
    """
    assert len(x.shape) == nshape, 'This input array must be a {X}-D \
    array'.format(X=nshape)

    if x.dtype != np.float:
        x = np.asarray(x, dtype=float)
    return x


seedpoints_final = np.load(TEMP_PATH + 'seedpoints.npy',
                           allow_pickle=True).item()

image = io.imread(IMAGE_PATH, as_gray=True)
thresh = threshold_otsu(image)
binary = image < thresh
binary = morphology.remove_small_objects(binary, min_size=64, connectivity=2)


def frame_image(img, frame_width):
    b = frame_width  # border size in pixel
    ny, nx = img.shape[0], img.shape[
        1]  # resolution / number of pixels in x and y
    if img.ndim == 3:  # rgb or rgba array
        framed_img = np.zeros((b + ny + b, b + nx + b, img.shape[2]))
    elif img.ndim == 2:  # grayscale image
        framed_img = np.zeros((b + ny + b, b + nx + b))
    framed_img[b:-b, b:-b] = img
    return framed_img


binary = frame_image(binary, 15)
Example #45
0
from scipy import ndimage as ndi

fill_img = ndi.binary_fill_holes(edges)

# fig, ax = plt.subplots(figsize=(4, 3))
# ax.imshow(fill_img, cmap=plt.cm.gray, interpolation='nearest')
# ax.set_title('filling the holes')
# ax.axis('off')

######################################################################
# Small spurious objects are easily removed by setting a minimum size for
# valid objects.

from skimage import morphology

img_cleaned = morphology.remove_small_objects(fill_img, 21)

# fig, ax = plt.subplots(figsize=(4, 3))
# ax.imshow(img_cleaned, cmap=plt.cm.gray, interpolation='nearest')
# ax.set_title('removing small objects')
# ax.axis('off')

######################################################################
# However, this method is not very robust, since contours that are not
# perfectly closed are not filled correctly, as is the case for one unfilled
# coin above.
#
# Region-based segmentation
# =========================
#
# We therefore try a region-based method using the watershed transform.
Example #46
0
import numpy as np
import scipy.ndimage as ndi
from skimage import morphology
import matplotlib.pyplot as plt

#编写一个函数来生成原始二值图像
def microstructure(l=256):
    n = 5
    x, y = np.ogrid[0:l, 0:l]  #生成网络
    mask = np.zeros((l, l))
    generator = np.random.RandomState(1)  #随机数种子
    points = l * generator.rand(2, n**2)
    mask[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1
    mask = ndi.gaussian_filter(mask, sigma=l/(4.*n)) #高斯滤波
    return mask > mask.mean()

data = microstructure(l=128) #生成测试图片

dst=morphology.remove_small_objects(data,min_size=300,connectivity=1)

fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4))
ax1.imshow(data, plt.cm.gray, interpolation='nearest')
ax2.imshow(dst,plt.cm.gray,interpolation='nearest')

fig.tight_layout()
plt.show()
Example #47
0
def TUBA1B_HiPSC_Pipeline(struct_img,rescale_ratio, output_type, output_path, fn, output_func=None):
    ##########################################################################
    ##########################################################################
    # PARAMETERS:
    #   note that these parameters are supposed to be fixed for the structure
    #   and work well accross different datasets

    intensity_norm_param = [1.5, 8.0] 
    vesselness_sigma = [1]
    vesselness_cutoff = 0.01
    minArea = 20
    ##########################################################################

    out_img_list = []
    out_name_list = []

    ###################
    # PRE_PROCESSING
    ###################
    # intenisty normalization (min/max)
    struct_img = intensity_normalization(struct_img, scaling_param=intensity_norm_param)
    
    out_img_list.append(struct_img.copy())
    out_name_list.append('im_norm')

    # rescale if needed
    if rescale_ratio>0:
        struct_img = processing.resize(struct_img, [1, rescale_ratio, rescale_ratio], method="cubic")
        struct_img = (struct_img - struct_img.min() + 1e-8)/(struct_img.max() - struct_img.min() + 1e-8)

    # smoothing with boundary preserving smoothing
    structure_img_smooth = boundary_preserving_smoothing_3d(struct_img)

    out_img_list.append(structure_img_smooth.copy())
    out_name_list.append('im_smooth')

    ###################
    # core algorithm
    ###################

    # vesselness 3d 
    response = vesselness3D(structure_img_smooth, sigmas=vesselness_sigma,  tau=1, whiteonblack=True)
    bw = response > vesselness_cutoff
    
    ###################
    # POST-PROCESSING
    ###################
    seg = remove_small_objects(bw>0, min_size=minArea, connectivity=1, in_place=False)

    # output
    seg = seg>0
    seg = seg.astype(np.uint8)
    seg[seg>0]=255

    out_img_list.append(seg.copy())
    out_name_list.append('bw_final')

    if output_type == 'default': 
        # the default final output
        save_segmentation(seg, False, output_path, fn)
    elif output_type == 'AICS_pipeline':
        # pre-defined output function for pipeline data
        save_segmentation(seg, True, output_path, fn)
    elif output_type == 'customize':
        # the hook for passing in a customized output function
        output_fun(out_img_list, out_name_list, output_path, fn)
    else:
        # the hook for other pre-defined RnD output functions (AICS internal)
        TUBA1B_output(out_img_list, out_name_list, output_type, output_path, fn)
Example #48
0
def show_groundtruth(uid, x, y, y_c, y_m, gt, gt_s, gt_c, gt_m, save=False):
    threshold = config['param'].getfloat('threshold')
    threshold_edge = config['param'].getfloat('threshold_edge')
    threshold_mark = config['param'].getfloat('threshold_mark')
    segmentation = config['post'].getboolean('segmentation')
    remove_objects = config['post'].getboolean('remove_objects')
    remove_fiber = config['post'].getboolean('filter_fiber')
    min_object_size = config['post'].getint('min_object_size')
    only_contour = config['contour'].getboolean('exclusive')
    view_color_equalize = config['valid'].getboolean('view_color_equalize')
    print_table = config['valid'].getboolean('print_table')

    fig, (ax1, ax2, ax3) = plt.subplots(3, 4, sharey=True, figsize=(12, 8))
    fig.suptitle(uid, y=1)

    y_s = y  # to show pure semantic predict later

    if view_color_equalize:
        x = clahe(x)
    ax1[0].set_title('Image')
    ax1[0].imshow(x, aspect='auto')
    if segmentation:
        y, markers = partition_instances(y, y_m, y_c)
    if remove_objects:
        y = remove_small_objects(y, min_size=min_object_size)
    if remove_fiber:
        y = filter_fiber(y)
    _, count = label(y, return_num=True)
    ax1[1].set_title('Final Pred, #={}'.format(count))
    ax1[1].imshow(y, cmap='gray', aspect='auto')
    # overlay contour to semantic ground truth (another visualized view for instance ground truth, eg. gt)
    _, count = label(gt, return_num=True)
    ax1[2].set_title('Instance Lbls, #={}'.format(count))
    ax1[2].imshow(gt_s, cmap='gray', aspect='auto')
    gt_c2, cmap = _make_overlay(gt_c)
    ax1[2].imshow(gt_c2, cmap=cmap, alpha=0.7, aspect='auto')
    if only_contour:  # can not tell from instances in this case
        iou = iou_metric(y, label(gt > 0), print_table)
    else:
        iou = iou_metric(y, gt, print_table)
    ax1[3].set_title('Overlay, IoU={:.3f}'.format(iou))
    ax1[3].imshow(gt_s, cmap='gray', aspect='auto')
    y, cmap = _make_overlay(y)
    ax1[3].imshow(y, cmap=cmap, alpha=0.3, aspect='auto')

    y_s = y_s > threshold
    _, count = label(y_s, return_num=True)
    ax2[0].set_title('Semantic Predict, #={}'.format(count))
    ax2[0].imshow(y_s, cmap='gray', aspect='auto')
    _, count = label(gt_s, return_num=True)
    ax2[1].set_title('Semantic Lbls, #={}'.format(count))
    ax2[1].imshow(gt_s, cmap='gray', aspect='auto')

    if y_c is not None:
        y_c = y_c > threshold_edge
        _, count = label(y_c, return_num=True)
        ax2[2].set_title('Contour Predict, #={}'.format(count))
        ax2[2].imshow(y_c, cmap='gray', aspect='auto')
        _, count = label(gt_c, return_num=True)
        ax2[3].set_title('Contour Lbls, #={}'.format(count))
        ax2[3].imshow(gt_c, cmap='gray', aspect='auto')

    _, count = label(markers, return_num=True)
    ax3[0].set_title('Final Markers, #={}'.format(count))
    ax3[0].imshow(markers, cmap='gray', aspect='auto')
    if y_m is not None:
        y_m = y_m > threshold_mark
        _, count = label(y_m, return_num=True)
        ax3[1].set_title('Marker Predict, #={}'.format(count))
        ax3[1].imshow(y_m, cmap='gray', aspect='auto')
        _, count = label(gt_m, return_num=True)
        ax3[2].set_title('Marker Lbls, #={}'.format(count))
        ax3[2].imshow(gt_m, cmap='gray', aspect='auto')

    plt.tight_layout()

    if save:
        dir = predict_save_folder()
        fp = os.path.join(dir, uid + '.png')
        plt.savefig(fp)
    else:
        show_figure()
    import time
    import os
    import cv2
    import utils

    time1 = time.time()
    from skimage import morphology

    path = "perspective"
    sum = len(os.listdir(path))
    write_path = "edge"
    for c in range(1, sum + 1):
        item = path + '//frame' + str(c) + '.jpg'
        bird = cv2.imread(item)
        mask = np.ones_like(bird)
        filter = np.max(bird) * 0.2
        mask[bird < filter] = 0

        edge = utils.thresholding(bird)
        bird = cv2.medianBlur(bird, 9)
        # 去连通域
        edge = morphology.remove_small_objects(edge.astype('bool'),
                                               min_size=80,
                                               connectivity=2,
                                               in_place=True)
        edge = np.multiply(edge, mask)

        cv2.imwrite(write_path + '//frame' + str(c) + '.jpg', edge * 255)

    time2 = time.time()
    print("用时:", np.round((time2 - time1) / 60, 2), "min")
        os.makedirs(PostPath)
    # if not os.path.exists(CRFFigPath):
    #     os.makedirs(CRFFigPath)
    print("remove background")
    rmbackground(test_img_pth,
                 OldMatPath,
                 MatPath,
                 ref_extent=ref_extent,
                 ref_area=ref_area)
    SavePatchMap(MatPath, FigPath, slidename + "_Map.npz")
    pred_img_pth = os.path.join(FigPath, slidename + "_Map.png")
    # crf_img_name = slidename+"_Map.png"
    # crf_img_pth = os.path.join(CRFFigPath, crf_img_name)
    # CRFs(test_img_pth, pred_img_pth, crf_img_pth)
    post_img_pth = os.path.join(PostPath, slidename + "_Map.png")
    img = imread(pred_img_pth)
    img = im2vl(img)
    img_close = closing(img, square(3))
    labeled_img = label(img_close, connectivity=2)
    new_labeled_img = remove_small_objects(labeled_img,
                                           min_size=rm_rf_area,
                                           connectivity=2)
    # remove non-circle region
    props = regionprops(new_labeled_img)
    for i, reg in enumerate(props):
        if reg.eccentricity > ref_ecc:
            new_labeled_img[new_labeled_img == reg.label] = 0
    new_img = np.asarray(new_labeled_img != 0, dtype=np.uint8)
    new_img = vl2im(new_img)
    imsave(post_img_pth, new_img)
Example #51
0
def test(tesample, model):
    if not os.path.exists(output):
        os.makedirs(output)
    for itr in range(len(tesample['ID'])):
        teim = tesample['Image'][itr]
        # print(np.shape(teim))
        teid = tesample['ID'][itr]
        Da = teim.shape[2]
        Db = teim.shape[3]
        if teim.shape[2] != 1024 or teim.shape[3] != 1024:
            qq = np.empty((teim.shape[0], teim.shape[1], 1024, 1024))
            for j in range(teim.shape[0]):
                for k in range(3):
                    qq[j,
                       k, :, :] = scipy.misc.imresize(teim[j, k, :, :],
                                                      (1024, 1024))
            teim = qq
        ott = np.empty((teim.shape[0], Da, Db))
        stk = np.zeros((Da, Db))
        for itt in range(teim.shape[0]):
            xx = teim[itt:itt + 1, :, :, :]
            # num = 1 - (xx>0).sum()/(xx.shape[1]*xx.shape[2] * xx.shape[3])
            # print(num)
            xt = Cuda(
                Variable(
                    torch.from_numpy(teim[itt:itt + 1, :, :, :]).type(
                        torch.FloatTensor)))
            # print(xt)
            pred_mask = model(xt)
            # raw = scipy.misc.imresize(F.sigmoid(pred_mask).cpu().data.numpy()[0,0,:,:],(Da, Db))
            # raw = (raw/raw.max()*255).astype(np.uint8)
            pred_np = F.sigmoid(pred_mask).cpu().data.numpy()
            # print(np.shape(pred_np))
            pred_np = scipy.misc.imresize(pred_np[0, 0, :, :], (Da, Db))

            # pred_np = mph.remove_small_objects(pred_np.astype(bool), min_size=600, connectivity=2)

            # pred_np = mph.remove_small_holes(pred_np, min_size=1000, connectivity=2)
            stk = stk + pred_np
            ott[itt, :, :] = pred_np
        # pred_np = np.reshape(pred_np, [pred_np.shape[-4], pred_np.shape[-2], pred_np.shape[-1]])
        io.imsave(output + '/' + teid + '_raw.tif',
                  ((ott / ott.max()) * 255).astype(np.uint8))
        markers = np.zeros(ott.shape, dtype=np.uint)
        markers[ott < 125] = 1
        markers[ott > 175] = 2
        io.imsave(output + '/' + teid + '_mk.tif',
                  ((markers / markers.max()) * 255).astype(np.uint8))
        ott = seg.random_walker(ott, markers, beta=10, mode='cg')
        ott = ott - 1
        io.imsave(output + '/' + teid + '_raw2.tif',
                  ((ott / ott.max()) * 255).astype(np.uint8))
        for m in range(ott.shape[0]):
            im = ott[m, :, :]
            im = mph.remove_small_objects(im.astype(bool),
                                          min_size=600,
                                          connectivity=2).astype(np.uint8)
            im = mph.remove_small_holes(im, min_size=1000,
                                        connectivity=2).astype(np.uint8)
            ott[m, :, :] = im
        ott = mph.remove_small_objects(ott.astype(bool),
                                       min_size=600,
                                       connectivity=2).astype(np.uint8)
        ott = mph.remove_small_holes(ott, min_size=1000000,
                                     connectivity=2).astype(np.uint8)
        io.imsave(output + '/' + teid + '_pred.tif',
                  ((ott / ott.max()) * 255).astype(np.uint8))
        io.imsave(output + '/' + teid + '_stk.tif',
                  ((stk / stk.max()) * 255).astype(np.uint8))
Example #52
0
# Next, we create three different segmentations with different characteristics.
# The first one uses :func:`skimage.segmentation.watershed` with
# *compactness*, which is a useful initial segmentation but too fine as a
# final result. We will see how this causes the oversegmentation metrics to
# shoot up.

edges = sobel(image)
im_test1 = watershed(edges, markers=468, compactness=0.001)

###############################################################################
# The next approach uses the Canny edge filter, :func:`skimage.filters.canny`.
# This is a very good edge finder, and gives balanced results.

edges = canny(image)
fill_coins = ndi.binary_fill_holes(edges)
im_test2 = ndi.label(remove_small_objects(fill_coins, 21))[0]

###############################################################################
# Finally, we use morphological geodesic active contours,
# :func:`skimage.segmentation.morphological_geodesic_active_contour`, a method
# that generally produces good results, but requires a long time to converge on
# a good answer. We purposefully cut short the procedure at 100 iterations, so
# that the final result is *undersegmented*, meaning that many regions are
# merged into one segment. We will see the corresponding effect on the
# segmentation metrics.

image = img_as_float(image)
gradient = inverse_gaussian_gradient(image)
init_ls = np.zeros(image.shape, dtype=np.int8)
init_ls[10:-10, 10:-10] = 1
im_test3 = morphological_geodesic_active_contour(gradient,
def main(argv):

    parser = ArgumentParser(description='...')

    parser.add_argument('datadir', help='...')
    parser.add_argument('-S', '--scale', default=3, help='...')
    parser.add_argument('-s',
                        '--zyxSpacing',
                        default=(1, 1, 1),
                        type=float,
                        nargs=3,
                        help='...')
    parser.add_argument('-o',
                        '--zyxOffset',
                        default=(0, 0, 0),
                        type=int,
                        nargs=3,
                        help='...')
    parser.add_argument('-l',
                        '--zyxLength',
                        default=(100, 100, 100),
                        type=int,
                        nargs=3,
                        help='...')
    parser.add_argument('-r',
                        '--rsfac',
                        default=(1, 1, 1),
                        type=float,
                        nargs=3,
                        help='...')
    parser.add_argument('-L',
                        '--labelimages',
                        default=['segments'],
                        nargs='+',
                        help='...')
    parser.add_argument('-M',
                        '--maskimages',
                        default=['mask'],
                        nargs='+',
                        help='...')
    parser.add_argument('-w',
                        '--ws',
                        action='store_true',
                        help='use watershed to fill volume')
    parser.add_argument('-m',
                        '--usempi',
                        action='store_true',
                        help='use mpi4py')

    args = parser.parse_args()

    datadir = args.datadir
    scale = args.scale
    #     cutout = args.cutout
    zyxSpacingOrig = args.zyxSpacing
    zyxOffset = args.zyxOffset
    zyxLength = args.zyxLength
    rsfac = args.rsfac
    labelimages = args.labelimages
    maskimages = args.maskimages
    ws = args.ws
    usempi = args.usempi

    cu_scale = '-{0}-'.format(scale)
    cu = [(o, zyxLength[i]) for i, o in enumerate(zyxOffset)]
    cus = ['{0}_{1}-'.format(o, o + l) for o, l in cu]
    cutout = '-default-hdf5' + cu_scale + cus[2] + cus[1] + cus[
        0] + 'ocpcutout.h5'

    res = {}
    if all(np.array(rsfac) == 1):
        res = {'op': 'orig', 'rs': rsfac}
    elif any(np.array(rsfac) < 1):
        res = {'op': 'us', 'rs': (1 / rsfac[0], 1 / rsfac[1], 1 / rsfac[1])}
    elif any(np.array(rsfac) > 1):
        res = {'op': 'ds', 'rs': rsfac}

    ### load the data
    data = loadh5(datadir, 'kasthuri11cc' + cutout, '/default/CUTOUT')
    data, zyxSpacing = resample_volume(data, False, zyxSpacingOrig, res)
    writeh5(data, datadir, 'data_' + res['op'] + '.h5', dtype='uint8')
    data_smooth = gaussian_filter(data, [zyxSpacing[2] / zyxSpacing[0], 1, 1])
    writeh5(data_smooth,
            datadir,
            'data_smooth_' + res['op'] + '.h5',
            dtype='uint8')

    ### load the mask
    mask = np.zeros_like(data, dtype='bool')
    for m in maskimages:
        newmask = loadh5(datadir, 'kat11' + m + cutout, '/default/CUTOUT')
        mask = mask | np.array(newmask, dtype='bool')
    mask, _ = resample_volume(mask, True, zyxSpacingOrig, res)

    ### process the labelimages
    for l in labelimages:
        labeldata = loadh5(datadir, 'kat11' + l + cutout, '/default/CUTOUT')
        labeldata[~mask] = 0
        labeldata, _ = resample_volume(labeldata, True, zyxSpacingOrig, res)
        writeh5(labeldata,
                datadir,
                l + '_' + res['op'] + '.h5',
                dtype='uint32')

        compdict = {}
        if l == 'segments':
            ### load the label-to-compartment mappings
            compdict['DD'] = np.loadtxt(os.path.join(datadir, os.pardir,
                                                     'dendrites.txt'),
                                        dtype='int')
            compdict['UA'] = np.loadtxt(os.path.join(datadir, os.pardir,
                                                     'axons.txt'),
                                        dtype='int')
            compdict['MA'] = np.loadtxt(os.path.join(datadir, os.pardir,
                                                     'MA.txt'),
                                        dtype='int')
            compdict['MM'] = np.loadtxt(os.path.join(datadir, os.pardir,
                                                     'MM.txt'),
                                        dtype='int')

            labeldata = remove_small_objects(labeldata, 100)

            L = {}
            ### separate the nested components and fill holes with parent label
            # define the object hierarchies  # TODO: automate detection of parent/child
            # NOTE: MA not fully encapsulated by MM; TODO: does this cause problems for DifSim?
            # NOTE: 6640 is myelinated but parent is not segmented; TODO: adapt segmentation
            # NOTE: still one myelinated axon missing (the small one running along the green central dendrite) (count is 7 in the videos)
            nested_labels_MM_MA = [{
                'parent': 2064,
                'children': [4247]
            }, {
                'parent': 4387,
                'children': [4143]
            }, {
                'parent': 5004,
                'children': [4142]
            }, {
                'parent': 5006,
                'children': [3962]
            }, {
                'parent': 5105,
                'children': [4249]
            }]
            L['segments'], L['MA'] = dissolve_nesting(
                labeldata, nested_labels_MM_MA)  # first level
            # NOTE: it might be better to create seperate images for top level objects and nested objects
            # and then infer a single object number from the overlap of the nested objects
            # top-level objects are: DD, UA, MM; nested objects are: mito, vesicles, MA; TODO: what are sysnapses???
            # TODO: check if nested objects are fully contained in a single object
            # (and do not touch boundary; and do not extend into ECS) (e.g., not the case for synapses)

            ### watershed the segments to label every voxel in the volume
            if ws:
                L['segments'] = watershed(-data_smooth, L['segments'])
                writeh5(L['segments'], datadir,
                        'segments_ws_' + res['op'] + '.h5')
                L_ECS = enforce_ECS(
                    L['segments'])  # TODO: create flag to control this
                writeh5(L_ECS, datadir, 'L_ECS_' + res['op'] + '.h5')

            # create the new labelclasses from 'segments' and remove the old one
            # NB better to keep them as the parent classes as this saves processing in vtk-meshing
            # NB not for distributed processing
            ### update the compartment class volume
    #         Lclass = np.zeros_like(data, dtype='S2')
    #         Lclass.fill('NN')
    #         for labelclass, labels in compdict.items():
    #             for label in labels:
    #                 Lclass[L['segments']==label] = labelclass
    #         newlabels = ['NN', 'DD', 'UA', 'MM']
    #         for nl in newlabels:
    #             L[nl] = np.copy(L['segments'])
    #             L[nl][Lclass!=nl] = 0
    #         L.pop("segments", None)
            for _, labeldata in L.items():
                labels2meshes_vtk(datadir,
                                  compdict,
                                  np.transpose(labeldata),
                                  spacing=zyxSpacing[::-1],
                                  offset=zyxOffset[::-1])
        else:
            compdict[l] = np.unique(labeldata)
            labeldata = remove_small_objects(labeldata, 100)
            labels2meshes_vtk(datadir,
                              compdict,
                              np.transpose(labeldata),
                              spacing=zyxSpacing[::-1],
                              offset=zyxOffset[::-1])
def segment_images(inpDir, outDir, config_data):
    """ Workflow for data with filamentous structures
    such as ZO1, Beta Actin, Titin, Troponin 1.

    Args:
        inpDir : path to the input directory
        outDir : path to the output directory
        config_data : path to the configuration file
    """

    logging.basicConfig(
        format='%(asctime)s - %(name)-8s - %(levelname)-8s - %(message)s',
        datefmt='%d-%b-%y %H:%M:%S')
    logger = logging.getLogger("main")
    logger.setLevel(logging.INFO)

    inpDir_files = os.listdir(inpDir)
    for i, f in enumerate(inpDir_files):
        logger.info('Segmenting image : {}'.format(f))

        # Load image
        br = BioReader(os.path.join(inpDir, f))
        image = br.read_image()
        structure_channel = 0
        struct_img0 = image[:, :, :, structure_channel, 0]
        struct_img0 = struct_img0.transpose(2, 0, 1).astype(np.float32)

        # main algorithm
        intensity_scaling_param = config_data['intensity_scaling_param']
        struct_img = intensity_normalization(
            struct_img0, scaling_param=intensity_scaling_param)
        gaussian_smoothing_sigma = config_data['gaussian_smoothing_sigma']

        if config_data[
                'preprocessing_function'] == 'image_smoothing_gaussian_3d':
            structure_img_smooth = image_smoothing_gaussian_3d(
                struct_img, sigma=gaussian_smoothing_sigma)
        elif config_data[
                'preprocessing_function'] == 'edge_preserving_smoothing_3d':
            structure_img_smooth = edge_preserving_smoothing_3d(struct_img)

        f3_param = config_data['f3_param']
        bw = filament_3d_wrapper(structure_img_smooth, f3_param)
        minArea = config_data['minArea']
        seg = remove_small_objects(bw > 0,
                                   min_size=minArea,
                                   connectivity=1,
                                   in_place=False)
        seg = seg > 0
        out_img = seg.astype(np.uint8)
        out_img[out_img > 0] = 255

        # create output image
        out_img = out_img.transpose(1, 2, 0)
        out_img = out_img.reshape(
            (out_img.shape[0], out_img.shape[1], out_img.shape[2], 1, 1))

        # write image using BFIO
        bw = BioWriter(os.path.join(outDir, f), metadata=br.read_metadata())
        bw.num_x(out_img.shape[1])
        bw.num_y(out_img.shape[0])
        bw.num_z(out_img.shape[2])
        bw.num_c(out_img.shape[3])
        bw.num_t(out_img.shape[4])
        bw.pixel_type(dtype='uint8')
        bw.write_image(out_img)
        bw.close_image()
Example #55
0
def extract_line_segments(image, 
                          grid_size, 
                          loc, 
                          R, 
                          line_gap, 
                          search_range, 
                          p_removal,
                          display=False):
    """
        Extract line segments from an image file
            Args:
                - image: ndarray image data
                - grid_size: size of each pixel
                - loc: center of the actual region
                - R: radius of the actual region
                - line_gap: maximum gap in meters
                - search_range: used in pixel removal
                - p_removal: probability to remove a pixel
    """
    line_gap_in_pixel = int(line_gap/grid_size+0.5)

    all_lines = []
    lines = probabilistic_hough_line(image, 
                                     line_length=100,
                                     line_gap=line_gap_in_pixel)
   
    if display:
        fig = plt.figure(figsize=const.figsize)
        ax = fig.add_subplot(111)
        ax.imshow(image.T, cmap='gray')
        for line in lines:
            ax.plot([line[0][1], line[1][1]],
                    [line[0][0], line[1][0]], 'r-', linewidth=2)

        ax.set_xlim([0, image.shape[0]])
        ax.set_ylim([0, image.shape[1]])
        plt.show()

    all_lines.extend(lines)
    modified_img1 = remove_pixels(image, 
                                  lines, 
                                  p_removal=p_removal,
                                  search_range=search_range)
    modified_img1 = morphology.remove_small_objects(modified_img1, 10)

    new_lines1 = probabilistic_hough_line(modified_img1, 
                                          line_length=50,
                                          line_gap=line_gap_in_pixel)

    if display:
        fig = plt.figure(figsize=const.figsize)
        ax = fig.add_subplot(111)
        ax.imshow(modified_img1.T, cmap='gray')
        for line in new_lines1:
            ax.plot([line[0][1], line[1][1]],
                    [line[0][0], line[1][0]], 'r-', linewidth=2)

        ax.set_xlim([0, image.shape[0]])
        ax.set_ylim([0, image.shape[1]])
        plt.show()
 
    all_lines.extend(new_lines1)
    modified_img2 = remove_pixels(modified_img1,
                                  new_lines1,
                                  p_removal=p_removal,
                                  search_range=search_range)
    modified_img2 = morphology.remove_small_objects(modified_img2, 20)

    new_lines2 = probabilistic_hough_line(modified_img2,
                                          line_length=20,
                                          line_gap=line_gap_in_pixel)
    all_lines.extend(new_lines2)

    return all_lines

    if display:
        fig = plt.figure(figsize=const.figsize)
        ax = fig.add_subplot(111)
        ax.imshow(modified_img2.T, cmap='gray')
        for line in new_lines2:
            ax.plot([line[0][1], line[1][1]],
                    [line[0][0], line[1][0]], 'r-', linewidth=2)

        ax.set_xlim([0, image.shape[0]])
        ax.set_ylim([0, image.shape[1]])
        plt.show()
 
    orig_lines = []
    for line in all_lines:
        line_start = line[0]
        line_end = line[1]
        start_e = line_start[1]*grid_size + loc[0] - R
        start_n = line_start[0]*grid_size + loc[1] - R

        end_e = line_end[1]*grid_size + loc[0] - R
        end_n = line_end[0]*grid_size + loc[1] - R
        
        orig_line1 = [(start_e, start_n), (end_e, end_n)]
        orig_lines.append(orig_line1)
        orig_line2 = [(end_e, end_n), (start_e, start_n)]
        orig_lines.append(orig_line2)

    return np.array(orig_lines)
Example #56
0
import cv2
from skimage.morphology import remove_small_objects

im1 = cv2.imread('222.jpg')
b, g, r = np.double(cv2.split(im1))
shadow_ratio = (4 / np.pi) * np.arctan2(
    (b - g), (b + g))  #mutiply 4/pi is to ensure value[0,1]
shadow_mask = shadow_ratio > 0.2
#cv2.imshow("shadow_mask",np.uint8(shadow_mask*255))
shadow_mask[:5, :] = 0
shadow_mask[-5:, :] = 0
shadow_mask[:, :5] = 0
shadow_mask[:, -5:] = 0  #边界上的值=0
#print(shadow_mask)
#cv2.imshow("shadow_mask1",np.uint8(shadow_mask*255))
shadow_mask = remove_small_objects(shadow_mask, min_size=100, connectivity=3)
# opencv 中没有matlab 中类似bwareaopen的函数,二值图像面积开运算
#cv2.imshow("shadow_mask2",np.uint8(shadow_mask*255))
kernel = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (5, 5))

kernel[1, 0] = 0
kernel[3, 0] = 0
kernel[1, 4] = 0
kernel[3, 4] = 0

shadow_mask1 = np.uint8(shadow_mask * 1)
#print(shadow_mask1)
mask = cv2.dilate(shadow_mask1, kernel) - shadow_mask1
#cv2.imshow("boundary",np.uint8(mask*255))
#substarct shadow_mask is to get boundary
#get boundary
Example #57
0
def isolate_character(image, file, file2):
    image = np.where(image > 0.5, 0, 1)
    lb_image = measure.label(image)
    lb_image = morphology.remove_small_objects(lb_image,
                                               min_size=50,
                                               connectivity=1)

    regions = measure.regionprops(lb_image)
    count = 0
    for region in regions:
        min_row, min_col, max_row, max_col = region.bbox
        count = count + (max_col - min_col)
    count = count / len(regions)

    i = 0
    DPI = 100
    tem = 0
    rows, cols = image.shape
    figsize = cols / DPI, rows / DPI
    # fig = plt.figure(figsize=figsize)
    # ax = fig.add_axes([0, 0, 1, 1])
    # # regions[0][0] = regions[0][0] + regions[0][1]
    # for region in regions:
    #     min_row, min_col, max_row, max_col = region.bbox
    #     if (max_col - min_col) < count * 6:
    #         # plt.imsave(path2 + str(i) + ".png", region.image,cmap = "gray")
    #         # print(path2 + str(i) + ".png")
    #         rec = Rectangle((min_col - 1, min_row - 1), max_col - min_col, max_row - min_row, fill=False,
    #                         edgecolor="red")
    #         ax.add_patch(rec)
    #
    #         # io.imsave(file2 + str(i) + ".png", region.image.astype(np.uint8) * 255, cmap="gray")
    #         # print(file2 + str(i) + ".png")
    #     # i = i + 1
    # ax.imshow(image,cmap = "gray")

    totallist = []
    for region in regions:
        min_row, min_col, max_row, max_col = region.bbox
        # if ((max_col - min_col) > count * 1.6 and (max_col - min_col) / (max_row - min_row) < 1.8 )or ((max_col - min_col) / (max_row - min_row)  > count * 2.5):
        totallist.append([region.bbox, region.image.astype(np.int32), 0])
    tem_image = 0
    # for i in range(len(totallist)):
    #     for j in range(i + 1,(len(totallist)),1):
    #         if totallist[i][0][1] < totallist[j][0][1] and totallist[i][0][3] < totallist[j][0][3]:
    #             min_x1,mid_x1,mid_x2,max_x2 = totallist[i][0][1],totallist[j][0][1],totallist[i][0][3],totallist[j][0][3]
    #         elif (totallist[i][0][1] > totallist[j][0][1] and totallist[i][0][3] > totallist[j][0][3]):
    #             min_x1, mid_x1, mid_x2, max_x2 = totallist[j][0][1],totallist[i][0][1],totallist[j][0][3],totallist[i][0][3]
    #         else:
    #             min_x1, mid_x1, mid_x2, max_x2 = np.sort([totallist[j][0][1], totallist[i][0][1], totallist[j][0][3],totallist[i][0][3]])
    #         if totallist[i][2] == 2:
    #             break
    #         if totallist[j][2] == 2:
    #             continue
    #         min_width = np.min([totallist[i][0][3] - totallist[i][0][1], totallist[j][0][3] - totallist[j][0][1]])
    #         confidence_value = (mid_x2 - mid_x1) / min_width
    for i in range(len(totallist)):
        for j in range((len(totallist))):
            confidence_value = nmovlp(totallist[i][0], totallist[j][0])

            if totallist[i][0][1] < totallist[j][0][1] and totallist[i][0][
                    3] < totallist[j][0][3]:
                min_x1, mid_x1, mid_x2, max_x2 = totallist[i][0][1], totallist[
                    j][0][1], totallist[i][0][3], totallist[j][0][3]
            elif (totallist[i][0][1] > totallist[j][0][1]
                  and totallist[i][0][3] > totallist[j][0][3]):
                min_x1, mid_x1, mid_x2, max_x2 = totallist[j][0][1], totallist[
                    i][0][1], totallist[j][0][3], totallist[i][0][3]
            else:
                min_x1, mid_x1, mid_x2, max_x2 = np.sort([
                    totallist[j][0][1], totallist[i][0][1], totallist[j][0][3],
                    totallist[i][0][3]
                ])
            if totallist[i][2] == 2:
                break
            if totallist[j][2] == 2:
                continue
            if i == j:
                continue
            print(totallist[i][0], totallist[j][0], "confidence",
                  confidence_value)
            if (confidence_value > 0):
                totallist[i][2] = 1
                if (totallist[j][2] == 1):
                    totallist[j][2] == 2
                if (totallist[i][0][1] < totallist[j][0][1]
                        and totallist[i][0][3] > totallist[j][0][3]) or (
                            totallist[j][0][1] < totallist[i][0][1]
                            and totallist[j][0][3] > totallist[i][0][3]):
                    min_x1, mid_x1, mid_x2, max_x2 = np.sort([
                        totallist[i][0][1], totallist[j][0][1],
                        totallist[i][0][3], totallist[j][0][3]
                    ])
                    min_y1, a, b, max_y2 = np.sort([
                        totallist[i][0][0], totallist[j][0][0],
                        totallist[i][0][2], totallist[j][0][2]
                    ])
                    a1_y1, a1_y2, a1_x1, a1_x2 = totallist[i][0][
                        1] - min_x1, totallist[i][0][3] - min_x1, totallist[i][
                            0][0] - min_y1, totallist[i][0][2] - min_y1
                    b1_y1, b1_y2, b1_x1, b1_x2 = totallist[j][0][
                        1] - min_x1, totallist[j][0][3] - min_x1, totallist[j][
                            0][0] - min_y1, totallist[j][0][2] - min_y1
                    tem_image1 = np.zeros((max_y2 - min_y1, max_x2 - min_x1))
                    tem_image2 = np.zeros((max_y2 - min_y1, max_x2 - min_x1))

                    a1_y2 = a1_y2 - 1
                    b1_y2 = b1_y2 - 1
                    # print(totallist[i][1].shape)
                    # print(tem_image.shape)
                    # tem_image1[0:19,0:32] = totallist[i][1]
                    row, col = totallist[i][1].shape
                    for ii in range(row):
                        for jj in range(col):
                            tem_image1[a1_x1 +
                                       ii][a1_y1 +
                                           jj] = totallist[i][1][ii][jj]
                    row, col = totallist[j][1].shape
                    for iii in range(row):
                        for jjj in range(col):
                            tem_image2[b1_x1 +
                                       iii][b1_y1 +
                                            jjj] = totallist[j][1][iii][jjj]

                    tem_image = tem_image1 + tem_image2
                    tem = np.where(tem_image > 0.6, 1, 0)
                    # print(tem_image.shape)
                    totallist[i][1] = tem
                    totallist[i][0] = [min_y1, min_x1, max_y2, max_x2]
                    totallist[j][2] = 2

                else:
                    min_y1, a, b, max_y2 = np.sort([
                        totallist[i][0][0], totallist[j][0][0],
                        totallist[i][0][2], totallist[j][0][2]
                    ])
                    a1_y1, a1_y2, a1_x1, a1_x2 = totallist[i][0][
                        1] - min_x1, totallist[i][0][3] - min_x1, totallist[i][
                            0][0] - min_y1, totallist[i][0][2] - min_y1
                    b1_y1, b1_y2, b1_x1, b1_x2 = totallist[j][0][
                        1] - min_x1, totallist[j][0][3] - min_x1, totallist[j][
                            0][0] - min_y1, totallist[j][0][2] - min_y1
                    tem_image1 = np.zeros((max_y2 - min_y1, max_x2 - min_x1))
                    tem_image2 = np.zeros((max_y2 - min_y1, max_x2 - min_x1))
                    tem_image = np.zeros((max_y2 - min_y1, max_x2 - min_x1))
                    # dd = plt.figure()
                    # dd.add_axes([0, 0, 1, 1]).imshow(totallist[i][1])
                    # plt.show()

                    tem_image1[a1_x1:a1_x2, a1_y1:a1_y2] = totallist[i][1]
                    tem_image2[b1_x1:b1_x2, b1_y1:b1_y2] = totallist[j][1]
                    tem_image = tem_image1 + tem_image2
                    tem = np.where(tem_image > 0.6, 1, 0)
                    # print(tem_image.shape)
                    totallist[i][1] = tem
                    totallist[i][0] = [min_y1, min_x1, max_y2, max_x2]
                    totallist[j][2] = 2
    for i in range(len(totallist)):
        if (totallist[i][2] != 2):
            min_row, min_col, max_row, max_col = totallist[i][0][0], totallist[
                i][0][1], totallist[i][0][2], totallist[i][0][3]
            if (max_col - min_col) <= 1.3 * count:
                io.imsave("D:\\datebase\\character\\isolate_character\\" +
                          file.split(".")[0] + str(i) + ".png",
                          totallist[i][1] * 255,
                          cmap="gray")
                print("D:\\datebase\\character\\isolate_character\\" +
                      file.split(".")[0] + str(i) + ".png")
            elif ((max_col - min_col) > 1.3 * count
                  and (max_col - min_col) <= 1.7 * count):
                io.imsave(
                    "D:\\datebase\\character\\touch_character\\two_touch_character\\"
                    + file.split(".")[0] + str(i) + ".png",
                    totallist[i][1] * 255,
                    cmap="gray")
                print(
                    "D:\\datebase\\character\\touch_character\\two_touch_character\\"
                    + file.split(".")[0] + str(i) + ".png")
            else:
                io.imsave(
                    "D:\\datebase\\character\\touch_character\\three_touch_character\\"
                    + file.split(".")[0] + str(i) + ".png",
                    totallist[i][1] * 255,
                    cmap="gray")
                print(
                    "D:\\datebase\\character\\touch_character\\three_touch_character\\"
                    + file.split(".")[0] + str(i) + ".png")
def remove_small_regions(img, size):
    """Morphologically removes small (less than size) connected regions of 0s or 1s."""
    img = morphology.remove_small_objects(img, size)
    img = morphology.remove_small_holes(img, size)
    return img
Example #59
0
    def get_seeds_using_prob_class1(self,
                                    data,
                                    class1,
                                    roi=None,
                                    dens_min=20,
                                    dens_max=255,
                                    thresholdType='percOfMaxDist',
                                    percT=0.5):
        # calculates probability based on similarity of intensities
        probs, mu = tools.intensity_probability(data, std=10)
        # sed3.sed3(data).show()
        # sed3.sed3(probs).show()
        # normalizing and calculating reciprocal values
        # weights_ints = skexp.rescale_intensity(probs,
        # in_range=(0,probs.max()), out_range=(1,0))
        weights_ints = np.exp(-probs)

        weights_h = np.where(data > mu, 1 - probs, 0)
        weights_l = np.where(data < mu, 1 - probs, 0)
        # sed3.sed3(1 - probs).show()
        sed3.sed3(weights_h).show()
        sed3.sed3(weights_l).show()

        if roi is None:
            roi = np.logical_and(data >= dens_min, data <= dens_max)
        dist_data = np.where(class1 == 1, False, True)
        dist_data *= roi > 0
        # dists = distance_transform_edt(dist_data)
        # sed3.sed3(dists).show()

        # print 'dists max = %i' % dists.max()
        # print 'dists min = %i' % dists.min()
        # print 'weights_ints max = %.4f' % weights_ints.max()
        # print 'weights_ints min = %.4f' % weights_ints.min()
        # print 'probs max = %.4f' % probs.max()
        # print 'probs min = %.4f' % probs.min()

        # energy = dists * weights_ints
        energy = weights_ints
        # sed3.sed3(energy).show()

        seeds = np.zeros(data.shape, dtype=np.bool)
        if thresholdType == 'percOfMaxDist':
            seeds = energy > (percT * energy.max())
        elif thresholdType == 'mean':
            seeds = energy > 2 * (energy[np.nonzero(energy)]).mean()

        # TODO: tady je problem, ze energy je v intervalu <0.961, 1> - hrozne
        # maly rozsah
        print('energy max = %.4f' % energy.max())
        print('energy min = %.4f' % energy.min())
        print('thresh = %.4f' % (percT * energy.max()))
        print(seeds.min())
        print(seeds.max())
        print(
            'seed perc = %.2f' %
            ((energy > percT * energy.max()).sum() / np.float(energy.nbytes)))
        sed3.sed3(seeds).show()

        # removing to small objects
        min_size_of_seed_area = 60
        print('before removing: %i' % seeds.sum())
        seeds = skimor.remove_small_objects(seeds,
                                            min_size=min_size_of_seed_area,
                                            connectivity=1,
                                            in_place=False)
        print('after removing: %i' % seeds.sum())

        all_seeds = np.zeros(data.shape, dtype=np.int)
        # allSeeds[np.nonzero(self.segmentation)] = 80
        all_seeds[np.nonzero(class1)] = 1  # zdrava tkan
        all_seeds[np.nonzero(seeds)] = 2  # outliers
        # kvuli segmentaci pomoci random walkera se omezi obraz pouze na
        # segmentovana jatra a cevy
        all_seeds = np.where(roi == 0, -1, all_seeds)

        sed3.sed3(all_seeds).show()

        return all_seeds
Example #60
0
def grid_img(point_cloud,
             grid_size,
             loc,
             R,
             threshold,
             sigma=5):
    """ Sample the input point cloud using a uniform grid. 
        Args:
            - point_cloud: an object of PointCloud class
            - grid_size: in meters
            - loc: center
            - R: diameter
            - sigma: gaussian distribution variance, in meters
            - threshold: minimum value to count the box as one
        Return:
            - results: ndarray, rectangular image.
    """
    sample_points = []
    sample_directions = []
    
    min_easting = loc[0]-R
    max_easting = loc[0]+R
    min_northing = loc[1]-R
    max_northing = loc[1]+R

    n_grid_x = int((max_easting - min_easting)/grid_size + 0.5)
    n_grid_y = int((max_northing - min_northing)/grid_size + 0.5)

    print "Will generate image of size (%d, %d)"%(n_grid_x, n_grid_y)

    results = np.zeros((n_grid_x+1, n_grid_y+1))

    if n_grid_x > 1E4 or n_grid_y > 1E4:
        print "ERROR! The sampling grid is too small!"
        sys.exit(1)
   
    three_sigma = 3*sigma/grid_size
    
    geo_hash = {}
    for pt_idx in range(0, len(point_cloud.locations)):
        pt = point_cloud.locations[pt_idx]

        px = int((pt[0] - min_easting) / grid_size)
        py = int((pt[1] - min_northing) / grid_size)

        if px<0 or px>=n_grid_x or py<0 or py>=n_grid_y:
            continue

        # Expand around neighbor 
        pt_dir = point_cloud.directions[pt_idx]
        if np.linalg.norm(pt_dir) > 0.1:
            delta_x = np.dot(three_sigma*pt_dir, np.array([1.0, 0.0]))
            delta_y = np.sqrt(three_sigma**2 - delta_x**2)
            larger_one = max(abs(delta_x), abs(delta_y))
            n_pt_to_add = int(larger_one*2 + 1.5)

            tmp_i = np.linspace(px-delta_x, px+delta_x, n_pt_to_add)
            tmp_j = np.linspace(py-delta_y, py+delta_y, n_pt_to_add)

            for s in range(0, n_pt_to_add):
                i = int(tmp_i[s])
                j = int(tmp_j[s])

                if i<0 or i>=n_grid_x or j<0 or j>n_grid_y:
                    continue
                if geo_hash.has_key((i,j)):
                            geo_hash[(i,j)] += 1.0
                else:
                    geo_hash[(i,j)] = 1.0
        else:
            if geo_hash.has_key((px,py)):
                geo_hash[(px,py)] += 1.0
            else:
                geo_hash[(px,py)] = 1.0

    for key in geo_hash.keys():
        if geo_hash[key] >= threshold:
            results[key[0], key[1]] = geo_hash[key]

    filtered_img = results>0.9
    
    filtered_img = morphology.dilation(filtered_img, morphology.square(3))
    filtered_img = morphology.erosion(filtered_img, morphology.square(3))
    filtered_img = morphology.remove_small_objects(filtered_img, 10)

    results = filtered_img>0.9

    return results