示例#1
1
def human_afterloop(output_directory, pre_time, fle_name, buffer_directory):
    start2 = time()

    d_c = import_edited(buffer_directory)
    rebw = load(open(buffer_directory+'-'+'DO_NOT_TOUCH_ME.dmp','rb'))
    seg_dc = (label(d_c,neighbors=4)+1)*d_c
    if np.max(seg_dc)<4:
        return 'FAILED: mask for %s looks unsegmented' % fle_name

    colormap = repaint_culsters(int(np.max(seg_dc)))

    segs = len(set(seg_dc.flatten().tolist()))-1

    # shows the result before saving the clustering and printing to the user the number of the images
    plt.subplot(1,2,1)
    plt.title(fle_name)
    plt.imshow(rebw, cmap='gray', interpolation='nearest')

    plt.subplot(1,2,2)
    plt.title('Segmentation - clusters: %s'%str(segs))
    plt.imshow(mark_boundaries(rebw, d_c))
    plt.imshow(seg_dc, cmap=colormap, interpolation='nearest', alpha=0.3)

    plt.show()

    plt.imshow(mark_boundaries(rebw, d_c))
    plt.imshow(seg_dc, cmap=colormap, interpolation='nearest', alpha=0.3)

    plt.savefig(path.join(output_directory, fle_name+'_%s_clusters.png'%str(segs)), dpi=500, bbox_inches='tight', pad_inches=0.0)

    return fle_name+'\t clusters: %s,\t total time : %s'%(segs, "{0:.2f}".format(time()-start2+pre_time))
def watershed(base_image, seed_image=None, threshold_distance=80):
    """ execute watershed with chosen seeds
    """
    from scipy import ndimage as ndi
    
    from skimage.morphology import watershed
    from skimage.feature import peak_local_max
    from skimage.morphology import label
    import matplotlib.pyplot as plt
    
    
    distance = ndi.distance_transform_edt(base_image)
    #    imgplot = plt.imshow(distance)
    fig = plt.figure()  # a new figure window
    ax = fig.add_subplot(1, 1, 1)  # specify (nrows, ncols, axnum)
    #    ax.imshow(distance>threshold_distance, cmap='Greys')
    
    thresh = distance > threshold_distance
    ax.imshow(thresh, cmap='Greys') 
    
    
    
    #local_maxi = peak_local_max(distance, labels=jac, footprint=np.ones((100, 100)), indices=False)
    if seed_image is None: 
        markers = label(thresh)
    else:
        markers = label(seed_image)

    #    imgplot = plt.imshow(markers)
     
      
    watersh = watershed(-distance, markers, mask=base_image) 
    #    plt.imshow(watersh, cmap=plt.cm.viridis, interpolation='nearest')
    
    return watersh
def segmentationize(imageSe):
    """
    Divides coherent forms of an image in smaller groups of type integer.
    """
    
    # create an matrix of distances to the next sourrounding area
    distance = ndimage.distance_transform_edt(imageSe, sampling=3)
    erosed = ndimage.binary_erosion(imageSe, iterations=8).astype(imageSe.dtype)
    distanceE = ndimage.distance_transform_edt(erosed, sampling=3)
    distance += (2 * distanceE)
    labels, num = label(imageSe, background=0, return_num='True')
    sizes_image = ndimage.sum(imageSe, labels, range(num))
    sizes_image = np.sort(sizes_image, axis=None)
    pos = int(0.4 * num)
    areal = int(sizes_image[pos] ** 0.5)
    if areal <= 10:
        areal = 10
    elif (areal % 2) != 0:
        areal += 1
    footer = circarea(areal) # draw circle area
    
    # find the positions of the maxima from the distances
    local_maxi = peak_local_max(distance, indices=False, footprint=footer, labels=imageSe)
    markers = label(local_maxi)
    
    # watershed algorithm starts at the maxima and returns labels of particles
    simplefilter("ignore", FutureWarning)   # avoid warning in watershed method
    labels_ws = watershed(-distance, markers, mask=imageSe)
    simplefilter("default", FutureWarning)
    
    return labels, labels_ws, local_maxi
示例#4
0
    def test_return_num(self):
        x = np.array([[1, 0, 6],
                      [0, 0, 6],
                      [5, 5, 5]])

        assert_array_equal(label(x, return_num=True)[1], 4)
        assert_array_equal(label(x, background=0, return_num=True)[1], 3)
    def clear_body(self, body, minimum_object_size_px=2400):
        """ Vycisti obraz od stolu a ostatnich veci okolo tela a zanecha pouze a jen telo """
        body = scipy.ndimage.filters.gaussian_filter(copy.copy(body).astype(float), sigma=[15, 0, 0]) > 0.7

        # fallowing lines are here to supress warning "Only one label was provided to `remove_small_objects`. "
        blabeled = morphology.label(body)
        if np.max(blabeled) > 1:
            body = morphology.remove_small_objects(morphology.label(blabeled), minimum_object_size_px)
        del blabeled
        
        body[0] = False
        body[-1] = False
        
        body = scipy.ndimage.filters.gaussian_filter(copy.copy(body.astype(float)), sigma=[1, 3, 3]) > 0.2
    
        bodylabel = label(body)
    
        n_of_pixels = [np.count_nonzero(bodylabel==i) for i in range(len(np.unique(bodylabel)))]
        labelsort = np.argsort(n_of_pixels)[::-1]
    
        newbody = np.zeros(body.shape)
        newbody[bodylabel==labelsort[0]] = body[(bodylabel==labelsort[0])]
        newbody[bodylabel==labelsort[1]] = body[(bodylabel==labelsort[1])]
    
        return newbody.astype(bool)
示例#6
0
 def test_4_vs_8(self):
     x = np.array([[0, 1],
                   [1, 0]], dtype=int)
     assert_array_equal(label(x, 4),
                        [[0, 1],
                         [2, 3]])
     assert_array_equal(label(x, 8),
                        [[0, 1],
                         [1, 0]])
def analyseClusters(binary, newlabels):
    """
    Calculates the sizes and porosities of the clusters.
    """
    
    # dilate particles to find cluster
    dilated = ndimage.binary_dilation(binary, iterations=_DILATIONFACTOR_TO_FIND_CLUSTER)
    labels, num = label(dilated, background=0, return_num=True)
    pxArea = (_CONVERSIONFACTOR_FOR_PIXEL) ** 2
    outputImage = labels.copy()
    clusterAreas = np.zeros(num)
    porosities = np.zeros(num)
    circumference = np.zeros(num)
    fcirc = np.zeros(num)
    particlesPerCluster = np.zeros(num)
    illegalIndex = []
    
    for i in range(num):
        cluster = labels == i
        cluster = ndimage.binary_fill_holes(cluster)
        helper = np.zeros_like(newlabels)
        helper[cluster] = newlabels[cluster]
        newLabel, particleNum = label(helper, background=0, return_num=True)
        particlesPerCluster[i] = particleNum
        particleArea = float(np.sum(binary[cluster].astype(np.int)))
        
        # cluster area and porosity
        outputImage[cluster] = i
        helper = ndimage.binary_erosion(cluster, iterations=_DILATIONFACTOR_TO_FIND_CLUSTER-3, border_value=1)        
        helper = ndimage.binary_erosion(helper, iterations=3, border_value=0)
        fl = float(np.sum(helper[cluster].astype(np.int)))
        clusterAreas[i] = fl * pxArea
        porosity = (fl - particleArea)/ fl
        porosity = porosity if porosity >= 0 else 0.0  # porosity can not be less than 0
        porosities[i] = porosity
        
        # circumference
        new = np.zeros((helper.shape[0],helper.shape[1],3), dtype=np.uint8)
        new[:,:,1] = helper
        gray = cv2.cvtColor(new, cv2.COLOR_RGB2GRAY)
        gray[gray > 0] = 255
        blur = cv2.GaussianBlur(gray,(5,5),0)
        gray = cv2.Canny(blur, 10, 200)
        contours, hierarchy = cv2.findContours(gray, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
        arclength = 0
        for con in contours:
            arclength += cv2.arcLength(con,True)
        circumference[i] = arclength * _CONVERSIONFACTOR_FOR_PIXEL
        fcirc[i] = (4. * np.pi * fl) / arclength**2
        
        if fcirc[i] > 1.0:  # fcirc can not be greater than 1
            illegalIndex.append(i)
    
    fcirc = np.delete(fcirc, illegalIndex)
    clusterData = {'areas':clusterAreas,'circ':circumference,'ppc':particlesPerCluster,'fcirc':fcirc,'porosities':porosities}
    return outputImage, clusterData, num
示例#8
0
 def test_4_vs_8(self):
     x = np.array([[0, 1],
                   [1, 0]], dtype=int)
     with catch_warnings():
         assert_array_equal(label(x, 4),
                            [[0, 1],
                             [2, 3]])
         assert_array_equal(label(x, 8),
                            [[0, 1],
                             [1, 0]])
def label_img(img):
    # Labelling the nests is done using connected components
    img = create_bin(img)

    labeled_img = label(input=img, connectivity=2, background=0)
    #min size holes ina nest
    rem_holes = remove_small_holes(labeled_img, min_size=100, connectivity=2)
    #min size of a nest
    labeled_img1 = remove_small_objects(rem_holes, min_size=70, connectivity=2)
    labeled = label(labeled_img1, connectivity=2, background=0)

    print labeled
    return labeled
示例#10
0
    def test_background(self):
        x = np.array([[1, 0, 0],
                      [1, 1, 5],
                      [0, 0, 0]])

        assert_array_equal(label(x), [[0, 1, 1],
                                      [0, 0, 2],
                                      [3, 3, 3]])

        assert_array_equal(label(x, background=0),
                           [[0, -1, -1],
                            [0,  0,  1],
                            [-1, -1, -1]])
    def run(self, workspace):
        cell_object = workspace.object_set.get_objects(self.primary_objects.value)
        cell_labeled = cell_object.get_segmented()
        cell_image = cell_object.get_parent_image()

        cell_image = (cell_image * 1000).astype(np.uint16)
        #        object_count = cell_labeled.max()
        maxi = skr.maximum(cell_image.astype(np.uint8), skm.disk(10))
        local_max = maxi - cell_image < 10

        local_max_labelize, object_count = scipy.ndimage.label(local_max, np.ones((3, 3), bool))
        histo_local_max, not_use = np.histogram(local_max_labelize, range(object_count + 2))
        old = local_max_labelize.copy()

        # filter in intensity mean
        # =======================================================================
        #
        # regionprops_result = skmes.regionprops(local_max_labelize, intensity_image=cell_image)
        #
        # for region in regionprops_result:
        #     if region["mean_intensity"]
        # =======================================================================

        # filter on size
        for i in range(object_count + 1):
            value = histo_local_max[i]
            if value > self.range_size.max or value < self.range_size.min:
                local_max_labelize[local_max_labelize == i] = 0

        # split granule for each cell
        cell_labeled = skm.label(cell_labeled)
        cell_count = np.max(cell_labeled)

        for cell_object_value in range(1, cell_count):
            cell_object_mask = cell_labeled == cell_object_value
            granule_in_cell = np.logical_and(cell_object_mask, local_max_labelize)
            granule_in_cell = skm.label(granule_in_cell)
            # ===================================================================
            # plt.imshow(granule_in_cell + cell_object_mask)
            # plt.show()
            # ===================================================================
        #
        # get the filename
        #
        measurements = workspace.measurements
        file_name_feature = self.source_file_name_feature
        filename = measurements.get_current_measurement("Image", file_name_feature)
        print "filename = ", filename
示例#12
0
def process_img (timestamp,img, point):
    img_gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
    thr = cv2.adaptiveThreshold(img_gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,\
            cv2.THRESH_BINARY,11,2)
    
    thr = 255 - thr
    kernel = np.ones((1,7),np.uint8)
    dilated_img = cv2.dilate(thr, kernel,iterations = 1)
    L = morphology.label(dilated_img)
    #cv2.circle(img,point,5,(255,0,255),3)
    #cv2.imshow('img',img)
    
    cnt = 1
    for region in regionprops(L):
        minr, minc, maxr, maxc = region.bbox
        
        if maxr - minr > 200:
            continue

        if point[1]>=minr and point[1]<=maxr and point[0]>=minc and point[0]<=maxc:
            subimg = img[minr:maxr,minc:maxc]
            
            imgName = 'sub/' + timestamp + '_' + str(cnt) + '.png'
            cnt = cnt + 1
            cv2.imwrite(imgName,subimg)
示例#13
0
 def findAllRegions(self):
    cleared = self.filtered.copy()
    #clear_border(cleared)
    label_image = label(cleared)
    #borders = numpy.logical_xor(self.filtered, cleared)
    #label_image[borders] = 0
    return regionprops(label_image, ['Area', 'BoundingBox', 'Centroid'])
示例#14
0
def scikit_example_plot_label():
    image = data.coins()[50:-50, 50:-50]
    
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))
    
    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)
    
    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    
    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(label_image, cmap='jet')
    
    for region in regionprops(label_image, ['Area', 'BoundingBox']):
    
        # skip small images
        if region['Area'] < 100:
            continue
    
        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region['BoundingBox']
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)
    
    plt.show()
def large_contiguous_regions(thresholded_image, min_area):
	"""
	NOTE: THIS FUNCTION IS SPECIALLY WRITTEN FOR TRACHEAL SECTIONS.

	This function gets the largest contiguous region of an image, based on 
	its labels.
	"""

	labelled_image = label(thresholded_image)

	regions = np.zeros((np.shape(thresholded_image)[0], np.shape(thresholded_image)[1]))
	maxarea = 0

	for region in np.unique(labelled_image):
		image_area = np.shape(labelled_image)[0] ** 2
		
		region_matrix = labelled_image == region
		region_in_thresholded = np.logical_and(region_matrix, thresholded_image)
		region_in_thresholded_area = np.sum(region_in_thresholded)

		if region_in_thresholded_area > image_area * min_area:

			regions = regions + region_in_thresholded

	return regions		
示例#16
0
    def separate_segments(self):
        """
        Perform image segmentation on the "segment image", and remove any
        segments that aren't the right part of the track.
        """

        # binary image
        binary_segment_image = (
            self.end_segment_image > self.options.low_threshold_kev)
        # segmentation: labeled regions, 8-connectivity
        labels = morph.label(binary_segment_image, connectivity=2)
        x1 = self.end_coordinates[0] - self.end_segment_offsets[0]
        y1 = self.end_coordinates[1] - self.end_segment_offsets[1]
        x2 = self.start_coordinates[0] - self.end_segment_offsets[0]
        y2 = self.start_coordinates[1] - self.end_segment_offsets[1]
        chosen_label = labels[x1, y1]
        if labels[x2, y2] != chosen_label:
            # this happens with 4-connectivity. need to use 8-connectivity
            raise RuntimeError('What the heck happened?')
        binary_again = (labels == chosen_label)
        # dilate this region, in order to capture information below threshold
        #  (it won't include the other regions, because there must be a gap
        #   between)
        pix_to_keep = morph.binary_dilation(binary_again)
        self.end_segment_image[np.logical_not(pix_to_keep)] = 0
def single_out_annotation(base_image, small_cc_image):
    """ extracting individual annotations :
    starting from potential annotation + noise, we remove the noise and 
     consolidate annotation area, then return the coordinates of center of 
     potential annotations""" 
     
    #  remove small stuff
    filtered_small_cc, removed_small_cc_small = remove_small_ccomponents(small_cc_image, size_closing=5, hist_thres=120)
    #plot_image(removed_small_cc_small)
    
    # dilate 
    from skimage.morphology import binary_dilation, disk
    dilation_radius = 10
    small_cc_cleaned_mask = binary_dilation(filtered_small_cc, disk(dilation_radius))
    #plot_image(small_cc_cleaned_mask) 
    
    #label connected compoenents
    from skimage.morphology import label
    from skimage.measure import regionprops
    from skimage.io import imsave
    markers, n_label = label(small_cc_cleaned_mask, connectivity=1, background=0, return_num=True)
    
    #for each cc, defines a region    
    region_prop = regionprops(markers, (base_image*255).astype(np.uint8))
    
    #for each region, do something
    
    base_path = '/media/sf_RemiCura/PROJETS/belleepoque/extract_data_from_old_paris_map/jacoubet/results/annotations/'
    for region in region_prop: 
        #print(region.bbox, region.area)
        imsave(base_path+str(region.bbox)+'.png', region.intensity_image) 
        
    return region_prop
示例#18
0
def roofRegion(edge):
    """Estimate region based on edges of roofRegion
    """
    # apply threshold
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)

    for region in regionprops(label_image):

        # skip small images
        if region.area < 100:
            continue

        # draw rectangle around segmented coins
        minr, minc, maxr, maxc = region.bbox
        rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                                  fill=False, edgecolor='red', linewidth=2)
        ax.add_patch(rect)

    plt.show()
def get_cells(image):
    '''
    Get cellls from the polygon.
    '''
    new_image=np.ones([3,image.shape[0],image.shape[1]],dtype=float)
    # apply threshold
    thresh = threshold_otsu(image)
    bw=image

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    #skimage.measure.label
    #find_contours
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    #extract the regions and get a polygon per region
    polygons=[]
    for i,region in enumerate(regionprops(label_image)):
        # skip small images
        if region.area < 100:
            continue
        #polygons.append(matplotlib.path.Path(region.coords))
        print (region.coords.shape)
        a=np.zeros(region.coords.shape)
        a[:,0]=region.coords[:,1]
        a[:,1]=region.coords[:,0]
        polygons.append(a)   
    return polygons
示例#20
0
文件: data.py 项目: irbdavid/celsius
def remove_none_edge_intersecting(img, edge=0, width=1):

    mask = np.zeros(img.shape,dtype=int)
    out = np.zeros(img.shape,dtype=int)
    # print '--->', img.sum()

    if edge == 0:
        mask[:,0:0+width] = 1
    elif edge == 1:
        mask[:,-1-width:-1] = 1
    elif edge == 2:
        mask[0:width,:] = 1
    elif edge == 3:
        mask[-1-width:-1,:] = 1
    else:
        raise ValueError('Edge is duff')

    s = label(img.astype(int))
    s_set = np.unique(s * mask)
    if s_set.sum() > 0:
        for v in s_set:
            q = (s == v)
            if np.all(img[q]):
                out[s == v] = 1

    return out
def single_out_annotation(base_image, small_cc_image):
    """ extracting individual annotations :
    starting from potential annotation + noise, we remove the noise and
     consolidate annotation area, then return the coordinates of center of
     potential annotations"""
    import numpy as np

    # remove small stuff
    filtered_small_cc, removed_small_cc_small = remove_small_ccomponents(
        small_cc_image, size_closing=5, hist_thres=120)
    # plot_image(removed_small_cc_small)

    # dilate
    from skimage.morphology import binary_dilation, disk
    dilation_radius = 10
    small_cc_cleaned_mask = binary_dilation(filtered_small_cc, disk(dilation_radius))
    # plot_image(small_cc_cleaned_mask)

    # label connected compoenents
    from skimage.morphology import label
    from skimage.measure import regionprops

    markers, n_label = label(small_cc_cleaned_mask, connectivity=1, background=0, return_num=True)

    # for each cc, defines a region
    image_for_region = (base_image*255).astype(np.uint8)
    region_prop = regionprops(markers, image_for_region)

    # for each region, do something

    return region_prop
示例#22
0
def getRegions():
    """Geocode address and retreive image centered
    around lat/long"""
    address = request.args.get('address')
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
示例#23
0
 def FilterChangeByArea(self,area):
     size = np.round(area / (0.4**2)) #devide with square of image resolution
     
     
     
     BinArray = gfh.ReadData(self.chng)
     
     BigChng = np.zeros(np.shape(BinArray))
     
     LabelArray = morphology.label(np.squeeze(BinArray))        
     
     N_chng = np.max(LabelArray)
     if N_chng > 0:
         k = 1
         for i in range(N_chng):
             indx,indy = np.where(LabelArray == i)    
             #larger than x m² and smaller than half the image
             if (len(indx) >= size) & (len(indx) < (self.Ndata/2)): 
                 BigChng[indx,indy] = k
                 k += 1     
         
         print str(k)
         ROI = gfh.ReadData(self.ROI)
         xROI,yROI,_ = np.where(ROI == 0)
         BigChng[xROI,yROI] = -9999
         
         gfh.WriteToFile(self.chng,BigChng)
         
         gfh.TIFF2PNG(self.chng,self.png_path + 'chng_AreaSelect' +
         self.png_fn_tag,'Fit')
     else:
         print 'no changes found, test thresholdvalues!'
     return 0
示例#24
0
    def test_random(self):
        x = (np.random.random((20, 30)) * 5).astype(np.int)

        labels = label(x)
        n = labels.max()
        for i in range(n):
            values = x[labels == i]
            assert np.all(values == values[0])
示例#25
0
 def treshold(self, value):
     value = 2 * value
     self.image_tresholded = (self.image_array > value)
     self.image_labeled = label(self.image_tresholded)
     self.image_labl.setImage(self.image_labeled)
     self.image_orig.setImage(np.ma.masked_array(self.image_tresholded,
                              mask=(self.image_array > value)))
     self.text.setText('value {}'.format(value))
     self.tresh = value
示例#26
0
    def test_background_two_regions(self):
        x = np.array([[0, 0, 6],
                      [0, 0, 6],
                      [5, 5, 5]])

        assert_array_equal(label(x, background=0),
                           [[-1, -1, 0],
                            [-1, -1, 0],
                            [ 1,  1, 1]])
示例#27
0
def ProcessImage(im, targetDim = 250, doDenoiseOpening = True):

	#Resize to specified pixels max edge size
	scaling = 1.
	if im.shape[0] > im.shape[1]:
		if im.shape[0] != targetDim:
			scaling = float(targetDim) / im.shape[0]
			im = misc.imresize(im, (targetDim, int(round(im.shape[1] * scaling))))
	else:
		if im.shape[1] != targetDim:
			scaling = float(targetDim) / im.shape[1]
			im = misc.imresize(im, (int(round(im.shape[0] * scaling)), targetDim))
	#print "scaling", scaling

	greyim = 0.2126 * im[:,:,0] + 0.7152 * im[:,:,1] + 0.0722 * im[:,:,2]

	#Highlight number plate
	imnorm = np.array(greyim, dtype=np.uint8)
	se = np.ones((3, 30), dtype=np.uint8)
	opim = morph.opening(imnorm, se)
	diff = greyim - opim + 128.

	misc.imsave("diff.png", diff)

	#Binarize image
	vals = diff.copy()
	vals = vals.reshape((vals.size))

	meanVal = vals.mean()
	stdVal = vals.std()
	threshold = meanVal + stdVal

	#print "Threshold", threshold

	binIm = diff > threshold
	misc.imsave("threshold.png", binIm)
	#print vals.shape
	#plt.plot(vals)
	#plt.show()

	#Denoise
	diamond = morph.diamond(2)
	if doDenoiseOpening:
		currentIm = morph.binary_opening(binIm, diamond)
	else:
		currentIm = binIm
	denoiseIm2 = morph.binary_closing(currentIm, np.ones((3, 13)))

	#print "currentIm", currentIm.min(), currentIm.max(), currentIm.mean()
	#print "denoiseIm2", denoiseIm2.min(), denoiseIm2.max(), currentIm.mean()
	#misc.imsave("denoised1.png", currentIm * 255)
	#misc.imsave("denoised2.png", denoiseIm2 * 255)

	#Number candidate regions
	#print "Numbering regions"
	numberedRegions, maxRegionNum = morph.label(denoiseIm2, 4, 0, return_num = True)
	return numberedRegions, scaling
示例#28
0
def getArea(address):
    """Geocode address and retreive image centered
    around lat/long"""
    address = address
    results = Geocoder.geocode(address)
    lat, lng = results[0].coordinates
    zip_code = results[0].postal_code

    map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1'
    request_url = map_url.format(lat, lng)
    req = urllib.urlopen(request_url)
    img = io.imread(req.geturl(),flatten=True)
    labels, numobjects = ndimage.label(img)
    image = filter.canny(img, sigma=3)
    thresh = threshold_otsu(image)
    bw = closing(image > thresh, square(3))

    # remove artifacts connected to image border
    cleared = bw.copy()
    clear_border(cleared)

    # label image regions
    label_image = label(cleared)
    borders = np.logical_xor(bw, cleared)
    label_image[borders] = -1
    image_label_overlay = label2rgb(label_image, image=image)

    fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
    ax.imshow(image_label_overlay)
    dist = []
    rp = regionprops(label_image)
    rp = [x for x in rp if 100 < x.area <= 900]

    for region in rp:

        # skip small images
        #if region.area < 100:
        #    continue
        dist.append(sqrt( ( 320-region.centroid[0] )**2 + ( 320-region.centroid[1] )**2 ))
        # draw rectangle around segmented coins
        #minr, minc, maxr, maxc = region.bbox
        #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
        #                      fill=False, edgecolor='red', linewidth=2)
        #ax.add_patch(rect)

    roof_index = dist.index(min(dist))
    minr, minc, maxr, maxc = rp[roof_index].bbox
    rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
                          fill=False, edgecolor='red', linewidth=2)
    ax.add_patch(rect)

    img = StringIO()
    fig.savefig(img)
    img.seek(0)
    session['roof_area'] = rp[roof_index].area
    roof_area = (rp[roof_index].area)*12
    return(roof_area)
def mixing_region(filename):
    white = io.imread(filename)
    val = filter.threshold_otsu(white)
    light_mask = white > val
    regions = morphology.label(light_mask)
    index_large_region = np.argmax(np.bincount(regions.ravel()))
    fluid_mask = regions == index_large_region
    fluid_mask = morphology.binary_erosion(fluid_mask, selem=np.ones((3, 3)))
    return fluid_mask
示例#30
0
    def test_background_one_region_center(self):
        x = np.array([[0, 0, 0],
                      [0, 1, 0],
                      [0, 0, 0]])

        assert_array_equal(label(x, neighbors=4, background=0),
                           [[-1, -1, -1],
                            [-1,  0, -1],
                            [-1, -1, -1]])
示例#31
0
def quick_features(img,save_to_disk=False,abs_path='',file_prefix='',cfg = []):

    # Pull out some settings from cfg if available
    if cfg:
        min_obj_area = cfg.get('MinObjectArea',100)
        objs_per_roi = cfg.get('ObjectsPerROI',1)
        deconv = cfg.get("Deconvolve").lower() == 'true'
        edge_thresh = cfg.get('EdgeThreshold',2.5)
        use_jpeg = cfg.get("UseJpeg").lower() == 'true'
        raw_color = cfg.get("SaveRawColor").lower() == 'true'
    else:
        min_obj_area = 100
        objs_per_roi = 1
        deconv = False
        use_jpeg = False
        raw_color = True
        edge_thresh = 2.5

    # Define an empty dictionary to hold all features
    features = {}
    
    features['rawcolor'] = np.copy(img)
    # compute features from gray image
    gray = np.uint8(np.mean(img,2))

    # threshold-based segmentation
    #med_val = np.median(gray)
    #std_val = np.std(gray)
    #thresh1 = threshold_otsu(gray)
    #thresh3 = med_val + 1.6*std_val
    #binary = (gray >= thresh1) | (gray >= thresh3)
    #bw_img1 = morphology.closing(binary,morphology.square(3))

    # edge-based segmentation
    edges_mag = scharr(gray)
    edges_med = np.median(edges_mag)
    edges_thresh = edge_thresh*edges_med
    edges = edges_mag >= edges_thresh
    edges = morphology.closing(edges,morphology.square(3))
    filled_edges = ndimage.binary_fill_holes(edges)
    edges = morphology.erosion(filled_edges,morphology.square(3))
    #edges = morphology.erosion(edges,morphology.square(3))

    # combine threshold and edge based segmentations
    bw_img2 = edges
    #bw_img = np.pad(bw_img2,1, 'constant')
    bw_img = bw_img2

    # Compute morphological descriptors
    label_img = morphology.label(bw_img,neighbors=8,background=0)
    props = measure.regionprops(label_img,gray)
    
    # clear bw_img
    bw_img = 0*bw_img
    
    props = sorted(props, reverse=True, key=lambda k: k.area) 

    if len(props) > 0:

        # Init mask with the largest area object in the roi
        bw_img = (label_img)== props[0].label
        
        base_area = props[0].area
    
        # use only the features from the object with the largest area
        max_area = 0
        max_area_ind = 0
        avg_area = 0.0
        avg_maj = 0.0
        avg_min = 0.0
        avg_or = 0.0
        avg_count = 0
        
        if len(props) > objs_per_roi:
            n_objs = objs_per_roi
        else:
            n_objs = len(props)
        
        for f in range(0,n_objs):
        
            if props[f].area > min_obj_area:
                bw_img = bw_img + ((label_img)== props[f].label)
                avg_count = avg_count + 1
            
            if f >= objs_per_roi:
                break
        
        # Take the largest object area as the roi area
        # no average
        avg_area = props[0].area
        avg_maj = props[0].major_axis_length
        avg_min = props[0].minor_axis_length
        avg_or = props[0].orientation
        
        
        # Check for clipped image
        if np.max(bw_img) == 0:
            bw = bw_img
        else:
            bw = bw_img/np.max(bw_img)
            
        clip_frac = float(np.sum(bw[:,1]) +
                np.sum(bw[:,-2]) +
                np.sum(bw[1,:]) +
                np.sum(bw[-2,:]))/(2*bw.shape[0]+2*bw.shape[1])
        features['clipped_fraction'] = clip_frac

        # Save simple features of the object
        features['area'] = avg_area
        features['minor_axis_length'] = avg_min
        features['major_axis_length'] = avg_maj
        if avg_maj == 0:
            features['aspect_ratio'] = 1
        else:
            features['aspect_ratio'] = avg_min/avg_maj
        features['orientation'] = avg_or
        
        # print "Foreground Objects: " + str(avg_count)

    else:

        features['clipped_fraction'] = 0.0

        # Save simple features of the object
        features['area'] = 0.0
        features['minor_axis_length'] = 0.0
        features['major_axis_length'] = 0.0
        features['aspect_ratio'] = 1
        features['orientation'] = 0.0
    
    # Masked background with Gaussian smoothing, image sharpening, and
    # reduction of chromatic aberration

    # mask the raw image with smoothed foreground mask
    blurd_bw_img = gaussian(bw_img,3)
    img[:,:,0] = img[:,:,0]*blurd_bw_img
    img[:,:,1] = img[:,:,1]*blurd_bw_img
    img[:,:,2] = img[:,:,2]*blurd_bw_img

    # Make a guess of the PSF for sharpening
    psf = make_gaussian(5, 3, center=None)

    # sharpen each color channel and then reconbine
    
    
    if np.max(img) == 0:
        img = np.float32(img)
    else:
        img = np.float32(img)/np.max(img)
    
    if deconv:
            
        img[img == 0] = 0.0001
        img[:,:,0] = restoration.richardson_lucy(img[:,:,0], psf, 7)
        img[:,:,1] = restoration.richardson_lucy(img[:,:,1], psf, 7)
        img[:,:,2] = restoration.richardson_lucy(img[:,:,2], psf, 7)

    # Estimate color channel shifts and try to align.
    # this works for most images but some still retain and offset.
    # need to figure out why...
    # r_shift, r_error, r_diffphase = register_translation(img[:,:,1], img[:,:,2],1)
    # b_shift, b_error, b_diffphase = register_translation(img[:,:,1], img[:,:,0],1)

    # # this swap of values is needed for some reason
    # if r_shift[0] < 0 and r_shift[1] < 0:
       # r_shift = -r_shift

    # if b_shift[0] < 0 and b_shift[1] < 0:
       # b_shift = -b_shift

    # r_tform = transform.SimilarityTransform(scale=1,rotation=0,translation=r_shift)
    # img[:,:,2] = transform.warp(img[:,:,2],r_tform)

    # b_tform = transform.SimilarityTransform(scale=1,rotation=0,translation=b_shift)
    # img[:,:,0] = transform.warp(img[:,:,0],b_tform)

    # Rescale image to uint8 0-255
    img[img < 0] = 0
    
    if np.max(img) == 0:
        img = np.uint8(255*img)
    else:
        img = np.uint8(255*img/np.max(img))

    features['image'] = img
    features['binary'] = 255*bw_img
        
    # Save the binary image and also color image if requested
    if save_to_disk:

        #try:

        # convert and save images

        # Raw color (no background removal)
        if use_jpeg:
            if raw_color:
                cv2.imwrite(os.path.join(abs_path,file_prefix+"_rawcolor.jpeg"),features['rawcolor'])
            # Save the processed image and binary mask
            cv2.imwrite(os.path.join(abs_path,file_prefix+".jpeg"),features['image'])
        else:
            if raw_color:
                cv2.imwrite(os.path.join(abs_path,file_prefix+"_rawcolor.png"),features['rawcolor'])
            # Save the processed image and binary mask
            cv2.imwrite(os.path.join(abs_path,file_prefix+".png"),features['image'])
        
        # Binary should also be saved png
        cv2.imwrite(os.path.join(abs_path,file_prefix+"_binary.png"),features['binary'])


    return features
示例#32
0
    image[ii, jj] = 0

binary_image = image > local_otsu

print "Distance Transform"
distance = ndimage.distance_transform_edt(binary_image)

print "Extract local maxima"
local_maxi = peak_local_max(distance,
                            indices=False,
                            footprint=np.ones((55, 55)),
                            threshold_abs=10,
                            labels=binary_image)

print "Markers for WS"
markers = morphology.label(local_maxi)

print "Watershed"
start_ws = time.clock()
labels_ws = watershed(-distance, markers, mask=binary_image)
end_ws = time.clock()
time_ws = end_ws - start_ws

imsave("labelsws.png", labels_ws)

print "Threshold to extract circles"
rest, ground_truth = cv2.threshold(ground_truth, 7, 255, cv2.THRESH_BINARY)

print "HoughCircles"
circles = cv2.HoughCircles(ground_truth,
                           cv.CV_HOUGH_GRADIENT,
示例#33
0
def segment_image(pic_array, gaussian, blurred, equalized, cwhite):
    ###Blur image with the specified gaussian kernel
    if (cwhite):
        mean = np.mean(pic_array)
        pic_array[pic_array > mean] = pic_array[pic_array > mean] - mean
    if (blurred):
        pic_convolved = scipy.signal.fftconvolve(pic_array,
                                                 gaussian[0],
                                                 mode='same')
    else:
        pic_convolved = pic_array
    if (equalized):
        pic_convolved = exposure.equalize_hist(pic_convolved)

    ###Segment middle layer
    mean = np.mean(pic_convolved) + np.std(pic_convolved)
    pic_layer1 = mp.dilation(pic_convolved > mean, mp.square(4))

    ###Segment top layer
    mean = np.mean(pic_convolved[pic_layer1 == 0]) + np.std(
        pic_convolved[pic_layer1 == 0]) / 2
    pic_layer2 = np.invert(mp.erosion(pic_convolved < mean, mp.square(4)))

    ###Merge layers
    final_picture = pic_layer1 * 2 + pic_layer2

    ###Split layers into connected components
    conn_comps_layer0 = mp.label(final_picture < 1, connectivity=1)
    conn_comps_layer1 = mp.label(final_picture == 1, connectivity=1)
    conn_comps_layer2 = mp.label(final_picture > 1, connectivity=1)

    ###Create labelled output image (print preview)
    layered_output_image = np.zeros(conn_comps_layer0.shape)

    ###Split segments into seperate entities for future slicing
    bottom_imgs = []
    middle_imgs = []
    top_imgs = []

    threshold = -1  #conn_comps_layer0.shape[0]*conn_comps_layer0.shape[1]/64
    for i in range(0, np.max(conn_comps_layer0))[:1]:
        new_layer = (conn_comps_layer0 == i)
        if np.sum(new_layer) > threshold:
            #new_layer = np.invert(new_layer)
            bottom_imgs.append(new_layer)
            layered_output_image = layered_output_image + new_layer * (1 + i)

    #threshold = -1
    for i in range(0, np.max(conn_comps_layer1))[:1]:
        new_layer = (conn_comps_layer1 == i)
        if np.sum(new_layer) > threshold:
            #new_layer = np.invert(new_layer)
            middle_imgs.append(new_layer)
            layered_output_image = layered_output_image + new_layer * (11 + i)

    for i in range(0, np.max(conn_comps_layer2))[:1]:
        new_layer = (conn_comps_layer2 == i)
        if np.sum(new_layer) > threshold:
            #new_layer = np.invert(new_layer)
            top_imgs.append(new_layer)
            layered_output_image = layered_output_image + new_layer * (21 + i)

    ###Return all values
    return final_picture, layered_output_image, bottom_imgs, middle_imgs, top_imgs
示例#34
0
        img_nuc_dilated)  # expand x, y, z
for n in range(20):  # ~ 1 um in x, y
    for z in range(img_nuc_dilated.shape[2]):
        img_nuc_dilated[:, :, z] = morphology.binary_dilation(
            img_nuc_dilated[:, :, z])  # expand x, y only

region_peri = np.where(
    np.logical_and(
        img_nuc_dilated > region_nuc,
        np.logical_or(img_fiber_only.astype(bool),
                      nuclei_binary.astype(bool))), 1, 0)
region_cyt = np.where(img_fiber_only > img_nuc_dilated, 1, 0)
regions = {'nuc': region_nuc, 'cyt': region_cyt, 'peri': region_peri}

region_nuc_peri = np.where(region_peri + region_nuc >= 1, 1, 0)
labeled_nuc_peri, n_nucperi = morphology.label(region_nuc_peri,
                                               return_num=True)

if should_plot:
    su.animate_zstacks(
        [
            region_nuc, region_peri, region_cyt,
            region_cyt + 2 * region_peri + 3 * region_nuc
        ],
        titles=['nuclear', 'perinuclear', 'sarcoplasmic', 'all compartments'],
        cmaps=['binary_r', 'binary_r', 'binary_r', 'gnuplot2'],
        gif_name=os.path.join(outdir, 'anim', img_name + '_regions.gif'))

#-- RNA DETECTION AND ANALYSIS  -----------------------------------------------#

# iterate over RNAs in image
for chan, gene in enumerate(genes):
示例#35
0
import matplotlib.pyplot as plt
import numpy as np

from skimage.draw import ellipse
from skimage.morphology import label
from skimage.measure import regionprops
from skimage.transform import rotate

image = np.zeros((600, 600))

rr, cc = ellipse(300, 350, 100, 220)
image[rr, cc] = 1

image = rotate(image, angle=15, order=0)

label_img = label(image)
regions = regionprops(label_img)

fig, ax = plt.subplots()
ax.imshow(image, cmap=plt.cm.gray)

for props in regions:
    y0, x0 = props.centroid
    orientation = props.orientation
    x1 = x0 + math.cos(orientation) * 0.5 * props.major_axis_length
    y1 = y0 - math.sin(orientation) * 0.5 * props.major_axis_length
    x2 = x0 - math.sin(orientation) * 0.5 * props.minor_axis_length
    y2 = y0 - math.cos(orientation) * 0.5 * props.minor_axis_length

    ax.plot((x0, x1), (y0, y1), '-r', linewidth=2.5)
    ax.plot((x0, x2), (y0, y2), '-r', linewidth=2.5)
示例#36
0
        dice_scores_i.append(
            T.constant(2) * intersect[i] / (denominator[i] + T.constant(1e-6)))
    dice_scores = (T.constant(w[0])*dice_scores_i[0]+T.constant(w[1])*dice_scores_i[1]\
                  +T.constant(w[2])*dice_scores_i[2]+T.constant(w[3])*dice_scores_i[3])
    x = T.matrix('total')
    z = x / 8
    divide = theano.function([x], z)
    dice_scores = divide(dice_scores)

    return dice_scores


x = np.array([[[0, 0, 0], [0, 0, 0], [0, 0, 0]],
              [[1, 0, 1], [2, 0, 0], [3, 1, 0]]])
mask = np.array(x != 0).astype(int)
lbls = label(mask, 4)
lbls_sizes = [np.sum(lbls == i) for i in np.unique(lbls)]
largest_region = np.argmax(
    lbls_sizes[1:]) + 1  # from 1 because need excluding the background
x[lbls != largest_region] = 0
print(x)
print(np.argmax(x))
print(x)
print(label(x, 4))

# data = np.array([1,2,3])
# print(data)
# data = np.vstack([data]*3)
#
#
# w = np.zeros(4, dtype=np.float32)
示例#37
0
def topo_metric(gt, pred, thresh, n_paths):

    # 0, 1 and 2 mean, respectively, that path is infeasible, shorter/larger and correct
    result = []

    # binarize pred according to thresh
    pred_bw = (pred > thresh).astype(int)
    pred_cc = morphology.label(pred_bw)

    # get centerlines of gt and pred
    gt_cent = morphology.skeletonize(gt > 0.5)
    gt_cent_cc = morphology.label(gt_cent)
    pred_cent = morphology.skeletonize(pred_bw)
    pred_cent_cc = morphology.label(pred_cent)

    # costs matrices
    gt_cost = np.ones(gt_cent.shape)
    gt_cost[gt_cent == 0] = 10000
    pred_cost = np.ones(pred_cent.shape)
    pred_cost[pred_cent == 0] = 10000

    # build graph and find shortest paths
    for i in range(n_paths):

        # pick randomly a first point in the centerline
        R_gt_cent, C_gt_cent = np.where(gt_cent == 1)
        idx1 = randint(0, len(R_gt_cent) - 1)
        label = gt_cent_cc[R_gt_cent[idx1], C_gt_cent[idx1]]
        ptx1 = (R_gt_cent[idx1], C_gt_cent[idx1])

        # pick a second point that is connected to the first one
        R_gt_cent_label, C_gt_cent_label = np.where(gt_cent_cc == label)
        idx2 = randint(0, len(R_gt_cent_label) - 1)
        ptx2 = (R_gt_cent_label[idx2], C_gt_cent_label[idx2])

        # if points have different labels in pred image, no path is feasible
        if (pred_cc[ptx1] != pred_cc[ptx2]) or pred_cc[ptx1] == 0:
            result.append(0)

        else:
            # find corresponding centerline points in pred centerlines
            R_pred_cent, C_pred_cent = np.where(pred_cent == 1)
            poss_corr = np.zeros((len(R_pred_cent), 2))
            poss_corr[:, 0] = R_pred_cent
            poss_corr[:, 1] = C_pred_cent
            poss_corr = np.transpose(np.asarray([R_pred_cent, C_pred_cent]))
            dist2_ptx1 = np.sum((poss_corr - np.asarray(ptx1))**2, axis=1)
            dist2_ptx2 = np.sum((poss_corr - np.asarray(ptx2))**2, axis=1)
            corr1 = poss_corr[np.argmin(dist2_ptx1)]
            corr2 = poss_corr[np.argmin(dist2_ptx2)]

            # find shortest path in gt and pred

            gt_path, cost1 = graph.route_through_array(gt_cost, ptx1, ptx2)
            gt_path = np.asarray(gt_path)

            pred_path, cost2 = graph.route_through_array(
                pred_cost, corr1, corr2)
            pred_path = np.asarray(pred_path)

            # compare paths length
            path_gt_length = np.sum(
                np.sqrt(np.sum(np.diff(gt_path, axis=0)**2, axis=1)))
            path_pred_length = np.sum(
                np.sqrt(np.sum(np.diff(pred_path, axis=0)**2, axis=1)))
            if pred_path.shape[0] < 2:
                result.append(2)
            else:
                if ((path_gt_length / path_pred_length) < 0.9) or (
                    (path_gt_length / path_pred_length) > 1.1):
                    result.append(1)
                else:
                    result.append(2)

    return result.count(0), result.count(1), result.count(2)
示例#38
0
def partition_instances(raw_bodies, raw_markers=None, raw_edges=None):
    threshold=config['param'].getfloat('threshold')
    threshold_edge = config['param'].getfloat('threshold_edge')
    threshold_marker = config['param'].getfloat('threshold_mark')
    policy = config['post']['policy']
    min_object_size = config['post'].getint('min_object_size')

    # Random Walker fails for a 1-pixel seed, which is exactly on top of a 1-pixel semantic mask.
    # https://github.com/scikit-image/scikit-image/issues/1875
    # Workaround by eliminating 1-pixel semantic mask first.
    bodies = raw_bodies > threshold
    bodies = drop_small_blobs(bodies, 2) # bodies must be larger than 1-pixel
    markers = None if raw_markers is None else (raw_markers > threshold_marker)
    edges = None if raw_edges is None else (raw_edges > threshold_edge)

    if markers is not None and edges is not None:
        markers = (markers & ~edges) & bodies
        # remove artifacts caused by non-perfect (mask - contour)
        markers = drop_small_blobs(markers, min_object_size)
        markers = label(markers)
    elif markers is not None:
        markers = markers & bodies
        markers = label(markers)
    elif edges is not None:
        # to remedy error-dropped edges around the image border (1 or 2 pixels holes)
        box_bodies = bodies.copy()
        h, w = box_bodies.shape
        box_bodies[0:2, :] = box_bodies[h-2:, :] = box_bodies[:, 0:2] = box_bodies[:, w-2:] = 0
        markers = box_bodies & ~edges
        markers = drop_small_blobs(markers, min_object_size)
        markers = label(markers)
    else:
        threshold=config['param'].getfloat('threshold')
        size_scale=config['post'].getfloat('seg_scale')
        ratio=config['post'].getfloat('seg_ratio')
        size_index = mean_blob_size(bodies, ratio)
        """
        Add noise to fix min_distance bug:
        If multiple peaks in the specified region have identical intensities,
        the coordinates of all such pixels are returned.
        """
        noise = np.random.randn(bodies.shape[0], bodies.shape[1]) * 0.1
        distance = ndi.distance_transform_edt(bodies)+noise
        # 2*min_distance+1 is the minimum distance between two peaks.
        local_maxi = peak_local_max(distance, min_distance=(size_index*size_scale), exclude_border=False,
                                    indices=False, labels=bodies)
        markers = label(local_maxi)

    if policy == 'ws':
        seg_labels = watershed(-ndi.distance_transform_edt(bodies), markers, mask=bodies)
    elif policy == 'rw':
        markers[bodies == 0] = -1
        if np.sum(markers > 0) > 0:
            seg_labels = random_walker(bodies, markers)
        else:
            seg_labels = np.zeros_like(markers, dtype=np.int32)
        seg_labels[seg_labels <= 0] = 0
        markers[markers <= 0] = 0
    else:
        raise NotImplementedError("Policy not implemented")
    final_labels = add_missed_blobs(bodies, seg_labels, edges)
    return final_labels, markers
示例#39
0
    def step(self, gradient, step_size):
        mask_out_restrictions = gradient * self.restrictions

        self.w[0] = self.proposed_design_step(mask_out_restrictions, step_size)

        self.current_iteration += 1

        if (self.current_iteration %
                self.num_free_iterations_between_patches) > 0:
            # Update the variable stack including getting the permittivity at the w[-1] position
            self.update_permittivity()
            return

        #
        # How far do we have to move? Should we include the gradient information in there as well (i.e. - weight also by the gradient?)?
        #
        costs = self.topological_correction_value - self.w[0]

        #
        # For now, let's assume the density does not vary over each layer and that we can just patch up on sublayer in a layer
        # and use that solution for the whole layer
        #

        topological_patch_start = time.time()

        get_layer_idxs = self.layering_z_1.get_layer_idxs(self.w[0].shape)
        for layer in range(0, self.layering_z_1.num_layers):
            get_layer_idx = get_layer_idxs[layer]
            next_layer_idx = self.w[0].shape[2]

            if layer < (self.layering_z_1.num_layers - 1):
                next_layer_idx = get_layer_idxs[layer + 1]

            bridges(self.w[0][:, :, get_layer_idx],
                    self.restrictions[:, :, get_layer_idx],
                    costs[:, :,
                          get_layer_idx], self.topological_correction_value)

            for sublayer_idx in range(1 + get_layer_idx, next_layer_idx):
                self.w[0][:, :, sublayer_idx] = self.w[0][:, :, get_layer_idx]
                self.restrictions[:, :,
                                  sublayer_idx] = self.restrictions[:, :,
                                                                    sublayer_idx]

        topological_patch_elapsed = time.time() - topological_patch_start
        # Update the variable stack including getting the permittivity at the w[-1] position
        self.update_permittivity()

        # The substrate on the top changes this padding
        cur_fabrication_target = self.fabricate_mask()
        pad_cur_fabrication_target = np.pad(cur_fabrication_target,
                                            ((1, 1), (1, 1), (1, 1)),
                                            mode='constant')
        pad_cur_fabrication_target[:, :,
                                   pad_cur_fabrication_target.shape[2] - 1] = 1

        [solid_labels,
         num_solid_labels] = skim.label(pad_cur_fabrication_target,
                                        neighbors=4,
                                        return_num=True)
        [void_labels,
         num_void_labels] = skim.label(1 - pad_cur_fabrication_target,
                                       neighbors=8,
                                       return_num=True)
        print("Topology Information:")
        print("To patch all of the topology took " +
              str(topological_patch_elapsed) + " seconds")
        print("The current number of total solid components is " +
              str(num_solid_labels))
        print("The current number of total void components is " +
              str(num_void_labels))

        for layer in range(0, self.layering_z_1.num_layers):
            get_layer_idx = get_layer_idxs[layer]
            [solid_labels, num_solid_labels
             ] = skim.label(pad_cur_fabrication_target[:, :,
                                                       1 + get_layer_idx],
                            neighbors=4,
                            return_num=True)
            [void_labels, num_void_labels] = skim.label(
                1 - pad_cur_fabrication_target[:, :, 1 + get_layer_idx],
                neighbors=8,
                return_num=True)

            print("The current number of solid components on layer " +
                  str(layer) + " is " + str(num_solid_labels))
            print("The current number of void components on layer " +
                  str(layer) + " is " + str(num_void_labels))
        print("\n\n")

        self.restrictions[0:blur_half_width_voxels, :, :] = 0
        self.restrictions[:, 0:blur_half_width_voxels, :] = 0
        self.restrictions[(self.restrictions.shape[0] -
                           blur_half_width_voxels):(
                               self.restrictions.shape[0]), :, :] = 0
        self.restrictions[:, (self.restrictions.shape[1] -
                              blur_half_width_voxels):(
                                  self.restrictions.shape[1]), :] = 0
示例#40
0
def crop_cell(image, min_size=20000, max_size=90000, ar_thres=0.6):

    max_proj = np.max(image, axis=0)

    mask_image = max_proj > np.mean(max_proj) + 0.5 * np.std(max_proj)

    clean_mask = morphology.remove_small_objects(mask_image, min_size=min_size)

    label_mask = morphology.label(clean_mask)

    fig, ax = plt.subplots(figsize=(10, 10))
    ax.imshow(max_proj, cmap=plt.cm.gray)

    bounding_box = []
    num_1 = 0  # number of cells that are too close to the image borders
    num_2 = 0  # number of cells that are too elongated
    num_3 = 0  # number of cells that are too big
    num_4 = 0  # number of cells that are being analyzed

    for aCell in measure.regionprops(label_mask):

        min_row, min_col, max_row, max_col = aCell.bbox

        aspect_ratio = aCell.minor_axis_length / aCell.major_axis_length

        if min_row - 30 <= 0 or min_col - 30 <= 0 or max_row + 30 >= max_proj.shape[
                0] or max_col + 30 >= max_proj.shape[1]:
            num_1 = num_1 + 1
            rect_r = mpatches.Rectangle((min_col, min_row),
                                        max_col - min_col,
                                        max_row - min_row,
                                        fill=False,
                                        edgecolor='#989898',
                                        linewidth=3)
            ax.add_patch(rect_r)

        elif aspect_ratio < ar_thres:
            num_2 = num_2 + 1
            rect_r = mpatches.Rectangle((min_col, min_row),
                                        max_col - min_col,
                                        max_row - min_row,
                                        fill=False,
                                        edgecolor='#989898',
                                        linewidth=3)
            ax.add_patch(rect_r)

        else:
            if aCell.area > max_size:
                num_3 = num_3 + 1
                rect_r = mpatches.Rectangle((min_col, min_row),
                                            max_col - min_col,
                                            max_row - min_row,
                                            fill=False,
                                            edgecolor='#989898',
                                            linewidth=3)
                ax.add_patch(rect_r)

            else:
                num_4 = num_4 + 1
                cell_boundaries = [
                    min_row - 30, min_col - 30, max_row + 30, max_col + 30
                ]
                bounding_box.append(cell_boundaries)
                rect = mpatches.Rectangle((min_col, min_row),
                                          max_col - min_col,
                                          max_row - min_row,
                                          fill=False,
                                          edgecolor='green',
                                          linewidth=3)
                ax.add_patch(rect)
                ax.text(max_col - 5,
                        min_row + 5,
                        'Cell num ' + str(num_4),
                        horizontalalignment='right',
                        verticalalignment='top',
                        fontsize=12,
                        family='sans-serif',
                        color='green')
    ax.set_axis_off()
    plt.show()
    print('Summary: ')
    print(str(len(measure.regionprops(label_mask))) + ' cells are identified.')
    if num_1 > 0:
        print(str(num_1) + ' cells are too close to the boundary.')
    if num_2 > 0:
        print(str(num_2) + ' cells are too elongated.')
    if num_3 > 0:
        print(str(num_3) + ' segmented cells are too big.')
    print(str(num_4) + ' cells will be analyzed.')

    return (len(bounding_box), label_mask, bounding_box)
示例#41
0
            ┃      ┻      ┃
            ┗━┓      ┏━┛
                ┃      ┗II━II┓
                ┃  神兽保佑    ┣┓
                ┃ 永无BUG!   ┏┛
                ┗┓┓┏━┳┓┏┛
                  ┃┫┫  ┃┫┫
                  ┗┻┛  ┗┻┛
 @Belong = 'F**k'  @MadeBy = 'PyCharm'
 @Author = 'steven'   @DateTime = '2019/2/23 14:56'
'''
import os
from natsort import natsorted
from glob import glob
from skimage import io as skio
import numpy     as np
from skimage.morphology import label

from skimage.measure import regionprops
from skimage.color import label2rgb
suvp=r'E:\pyWorkspace\CAE\res\cp250\0\suv'
imgps=natsorted(glob(os.path.join(suvp,'*.tif')))
suvs=np.stack([skio.imread(_p)  for _p in imgps])
op=r'E:\pyWorkspace\CAE\try0\tryThres2.5'
thresholdV=2.5
ts=label2rgb(label(suvs>thresholdV))
from skimage import color
for i in range(ts.shape[0]):
    skio.imsave(os.path.join(op,str(i)+'.bmp'),np.round(ts[i,:,:,:]*255).astype(np.uint8))

示例#42
0
from skimage.measure import regionprops
from skimage.color import label2rgb


image = data.coins()[50:-50, 50:-50]

# apply threshold
thresh = threshold_otsu(image)
bw = closing(image > thresh, square(3))

# remove artifacts connected to image border
cleared = bw.copy()
clear_border(cleared)

# label image regions
label_image = label(cleared)
borders = np.logical_xor(bw, cleared)
label_image[borders] = -1
image_label_overlay = label2rgb(label_image, image=image)

fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6))
ax.imshow(image_label_overlay)

for region in regionprops(label_image):

    # skip small images
    if region.area < 100:
        continue

    # draw rectangle around segmented coins
    minr, minc, maxr, maxc = region.bbox
示例#43
0
    def __init__(self, nucs_lbls, spts_ctrs, spts):
        zlen, xlen, ylen = nucs_lbls.shape

        nucs_er = np.zeros(nucs_lbls.shape, dtype=int)
        for z in range(zlen):
            nucs_er[z, :, :] = label(binary_erosion(nucs_lbls[z, :, :],
                                                    iterations=6),
                                     connectivity=1)
            nucs_er[z, :, :] = remove_small_objects(nucs_er[z, :, :], 120)

        j_max = np.argmax(np.sign(nucs_er).sum(2).sum(1))
        nucs_prj = np.copy(nucs_er[j_max, :, :])
        new_idx = 1000
        for z in range(1, zlen):
            idxs = np.unique(nucs_er[z, :, :])[1:]
            for k in idxs:
                smpl = ((nucs_er[z, :, :] == k) * nucs_prj).reshape(
                    (xlen * ylen))

                if smpl.sum() == 0:
                    nucs_prj += new_idx * (nucs_er[z, :, :] == k)
                    new_idx += 1

                else:
                    smpl = np.delete(smpl, np.where(smpl == 0)[0])
                    smpl = np.median(smpl).astype(int)
                    nucs_prj += smpl * (nucs_er[z, :, :]
                                        == k) * (1 - np.sign(nucs_prj))

        nucs_prj = np.copy(nucs_er[j_max, :, :])
        nucs_prj = label(nucs_prj)

        cpu_owe = multiprocessing.cpu_count()
        nucs_dil = np.copy(nucs_prj)

        while (nucs_dil == 0).sum() > 0:
            print((nucs_dil == 0).sum())
            idxs = np.unique(nucs_dil)[1:]
            tags = np.split(idxs, (int(idxs.size / cpu_owe + 1)) *
                            np.arange(1, cpu_owe, dtype=np.int))
            args = []
            for jj in range(len(tags)):
                args.append([tags[jj], nucs_dil])
            pool = multiprocessing.Pool()
            results = pool.map(DilationFunction.DilationFunction, args)
            pool.close()
            for j in range(len(results)):
                nucs_dil += results[j].msk * (1 - np.sign(nucs_dil))
            nucs_dil = label(nucs_dil)

        ref_spts = np.zeros(spts_ctrs.shape[1])
        for k in range(spts_ctrs.shape[1]):
            ref_spts[k] = nucs_dil[spts_ctrs[1, k], spts_ctrs[0, k]]

        fls_clrd = np.zeros(np.append(nucs_prj.shape, 3))
        for j in range(ref_spts.size):
            fls_clrd[:, :, 0] += (nucs_prj == ref_spts[j]) * 1

        fls_clrd[:, :, 2] = np.sign(nucs_prj) * (
            1 - np.sign(fls_clrd[:, :, 0])) * fls_clrd[:, :, 0].max()
        fls_clrd[:, :, 1] = spts.sum(0) * fls_clrd[:, :, 0].max()

        nucs_dil = RemoveBorderNuclei.RemoveBorderNuclei(nucs_dil).nucs_dil

        self.nucs_lbls = nucs_lbls
        self.nucs_dil = nucs_dil
        self.fls_clrd = fls_clrd
示例#44
0
patch = image[400:500, 217:317].copy()

image = image[324:824, 87:387]

#image = gaussian_gradient_magnitude(image, sigma=2)
#patch = gaussian_gradient_magnitude(patch, sigma=2)

print('image shape', image.shape)
print('patch shape', patch.shape)

# result = match_template(image, patch, pad_input=True, mode='constant', constant_values=(0,0),)
result = match_template(image, patch)
thresh = 0.7

res = result > thresh
c = label(res, background=0)
reprop = regionprops(c)
print('number', len(reprop))
print('result shape', result.shape)
print('MIN - MAX', np.min(result), np.max(result))
ij = np.unravel_index(np.argmax(result), result.shape)
print(type(ij))
x, y = ij[::-1]
x += patch.shape[0] / 2
y += patch.shape[1] / 2

print(x, y)

plt.imshow(np.clip(result, 0, 1), cmap='gray')
plt.show()
示例#45
0
        avH += hs
    print("Average DSC: ", avD / length)
    print("Average MSD: ", avS / length)
    print("Average HS: ", avH / length)


#Driver()
image = io.imread('images/image_0090.png')
I = util.img_as_float(image)
J = util.img_as_bool(io.imread('groundtruth/threshimage_0090.png'))
#print(otsu(I))
A = segleaf(I)
#plt.imshow(A)
#plt.show()
#boundaries = seg.find_boundaries(A, connectivity=2, mode='inner ')
L = morph.label(A, connectivity=2)
boundaries = seg.find_boundaries(L, connectivity=2, mode='inner ')
#L = np.transpose(np.vstack(np.where(seg.find_boundaries(A == True))))
img_w_b = seg.mark_boundaries(image, L, color=(1, 1, 0))
plt.imshow(img_w_b)
plt.show(img_w_b)
#Driver()
#print(DSC(A,J))
#print(MSD(A,J))
#Driver()

#print greyscale histogram
#g_img = color.rgb2gray(I)
#hist = plt.hist(g_img, bins='auto')
#plt.plot(hist)
#plt.show()
示例#46
0
    def calculate_ROC(self, slice_dirname, tag, chosen, p_thresh=0.5):
        '''
        计算每张切片的pixel级的ROC
        :param slice_dirname: slice的路径,现在不需要了,直接读取Mask的存盘文件
        :param tag: input size of Slide Filter
        :param chosen: list of slide ID
        :param p_thresh: tumor probability threshold
        :return:
        '''

        project_root = self._params.PROJECT_ROOT
        save_path = "{}/results".format(project_root)
        mask_path = "{}/data/true_masks".format(self._params.PROJECT_ROOT)

        result_auc = []
        if tag == 0:
            code = "_history.npz"
        else:
            code = "_history_v{}.npz".format(tag)

        K = len(code)
        print(
            "slice_id, area, count, p_thresh, dice, accu, recall, f1, roc_auc")
        for result_file in os.listdir(save_path):
            ext_name = os.path.splitext(result_file)[1]
            slice_id = result_file[:-K]
            if chosen is not None and slice_id not in chosen:
                continue

            if ext_name == ".npz" and code in result_file:
                print("loading data : {}, {}".format(slice_id, result_file))
                result = np.load("{}/{}".format(save_path, result_file),
                                 allow_pickle=True)
                x1 = result["x1"]
                y1 = result["y1"]
                x2 = result["x2"]
                y2 = result["y2"]
                coordinate_scale = result["scale"]
                assert coordinate_scale == 1.25, "Scale is Error!"

                history = result["history"].item()

                cmb = CancerMapBuilder(self._params,
                                       extract_scale=40,
                                       patch_size=256)
                cancer_map = cmb.generating_probability_map(
                    history, x1, y1, x2, y2, 1.25)
                h, w = cancer_map.shape

                mask_filename = "{}/{}_true_mask.npz".format(
                    mask_path, slice_id)
                if os.path.exists(mask_filename):
                    result = np.load(mask_filename, allow_pickle=True)
                    mask_img = result["mask"]
                    mask_img = mask_img[y1:y1 + h, x1:x1 + w]
                    area = np.sum(mask_img)
                    _, count = morphology.label(mask_img,
                                                neighbors=8,
                                                connectivity=2,
                                                return_num=True)
                else:
                    mask_img = np.zeros((h, w), dtype=np.bool)
                    area = 0
                    count = 0

                false_positive_rate, true_positive_rate, thresholds = metrics.roc_curve(
                    mask_img.ravel(), cancer_map.ravel())
                roc_auc = metrics.auc(false_positive_rate, true_positive_rate)

                pred = np.array(cancer_map > p_thresh).astype(np.int)
                mask_img = np.array(mask_img).astype(np.int)
                dice = Evaluation.calculate_dice_coef(mask_img, pred)
                accu = metrics.accuracy_score(mask_img, pred)
                recall = metrics.recall_score(mask_img, pred, average='micro')
                # print(set(np.unique(mask_img)) - set(np.unique(pred)))
                f1 = metrics.f1_score(
                    mask_img, pred,
                    average='weighted')  # Here, f1 = dice,average='micro'

                temp = "{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}\t{}".format(
                    slice_id, area, count, p_thresh, dice, accu, recall, f1,
                    roc_auc)
                result_auc.append(temp)
                print(temp)

        print("############################################")
        for item in result_auc:
            print(item)

        return
示例#47
0
cur_design_variable = np.load( projects_directory_location_base + "/cur_design_variable.npy" )
bayer_filter.w[0] = cur_design_variable
bayer_filter.update_filters(num_epochs - 1)
bayer_filter.update_permittivity()

cur_fabrication_target = ( bayer_filter.get_permittivity() > 0.5 )

pad_cur_fabrication_target = np.pad(
	cur_fabrication_target,
	( ( 1, 1 ), ( 1, 1 ), ( 1, 1 ) ),
	mode='constant'
)
pad_cur_fabrication_target[ :, :, pad_cur_fabrication_target.shape[ 2 ] - 1 ] = 1

[solid_labels, num_solid_labels] = skim.label( pad_cur_fabrication_target, connectivity=1, return_num=True )
[void_labels, num_void_labels] = skim.label( 1 - pad_cur_fabrication_target, connectivity=1, return_num=True )

layer_step = int( cur_fabrication_target.shape[ 2 ] / num_vertical_layers )

new_device = np.zeros( cur_fabrication_target.shape )

for layer_idx in range( 0, num_vertical_layers ):
	pull_data = void_labels[ 1 : void_labels.shape[ 0 ] - 1, 1 : void_labels.shape[ 1 ] - 1, layer_idx * layer_step + 1 ]

	for internal_layer in range( 0, layer_step ):
		new_device[ :, :, layer_idx * layer_step + internal_layer ] = 1.0 * np.greater( cur_fabrication_target[ :, :, layer_idx * layer_step + 1 ], 0.5 )
		new_device[ :, :, layer_idx * layer_step + internal_layer ] += 1.0 * np.greater( pull_data, 1 )
new_device[ :, :, new_device.shape[ 2 ] - 1 ] = new_device[ :, :, new_device.shape[ 2 ] - 2 ]

示例#48
0
文件: valid.py 项目: xuhuaren/DSB2018
def show_groundtruth(uid, x, y, y_c, y_m, gt, gt_s, gt_c, gt_m, save=False):
    threshold = config['param'].getfloat('threshold')
    threshold_edge = config['param'].getfloat('threshold_edge')
    threshold_mark = config['param'].getfloat('threshold_mark')
    segmentation = config['post'].getboolean('segmentation')
    remove_objects = config['post'].getboolean('remove_objects')
    remove_fiber = config['post'].getboolean('filter_fiber')
    min_object_size = config['post'].getint('min_object_size')
    only_contour = config['contour'].getboolean('exclusive')
    view_color_equalize = config['valid'].getboolean('view_color_equalize')
    print_table = config['valid'].getboolean('print_table')

    fig, (ax1, ax2, ax3) = plt.subplots(3, 4, sharey=True, figsize=(12, 8))
    fig.suptitle(uid, y=1)

    y_s = y # to show pure semantic predict later

    if view_color_equalize:
        x = clahe(x)
    ax1[0].set_title('Image')
    ax1[0].imshow(x, aspect='auto')
    if segmentation :
        y, markers = partition_instances(y, y_m, y_c)
    if remove_objects:
        y = remove_small_objects(y, min_size=min_object_size)
    if remove_fiber:
        y = filter_fiber(y)
    _, count = label(y, return_num=True)
    ax1[1].set_title('Final Pred, #={}'.format(count))
    ax1[1].imshow(y, cmap='gray', aspect='auto')
    # overlay contour to semantic ground truth (another visualized view for instance ground truth, eg. gt)
    _, count = label(gt, return_num=True)
    ax1[2].set_title('Instance Lbls, #={}'.format(count))
    ax1[2].imshow(gt_s, cmap='gray', aspect='auto')
    gt_c2, cmap = _make_overlay(gt_c)
    ax1[2].imshow(gt_c2, cmap=cmap, alpha=0.7, aspect='auto')
    if only_contour: # can not tell from instances in this case
        iou = iou_metric(y, label(gt > 0), print_table)
    else:
        iou = iou_metric(y, gt, print_table)
    ax1[3].set_title('Overlay, IoU={:.3f}'.format(iou))
    ax1[3].imshow(gt_s, cmap='gray', aspect='auto')
    y, cmap = _make_overlay(y)
    ax1[3].imshow(y, cmap=cmap, alpha=0.3, aspect='auto')

    y_s = y_s > threshold
    _, count = label(y_s, return_num=True)
    ax2[0].set_title('Semantic Predict, #={}'.format(count))
    ax2[0].imshow(y_s, cmap='gray', aspect='auto')
    _, count = label(gt_s, return_num=True)
    ax2[1].set_title('Semantic Lbls, #={}'.format(count))
    ax2[1].imshow(gt_s, cmap='gray', aspect='auto')

    if y_c is not None:
        y_c = y_c > threshold_edge
        _, count = label(y_c, return_num=True)
        ax2[2].set_title('Contour Predict, #={}'.format(count))
        ax2[2].imshow(y_c, cmap='gray', aspect='auto')
        _, count = label(gt_c, return_num=True)
        ax2[3].set_title('Contour Lbls, #={}'.format(count))
        ax2[3].imshow(gt_c, cmap='gray', aspect='auto')

    _, count = label(markers, return_num=True)
    ax3[0].set_title('Final Markers, #={}'.format(count))
    ax3[0].imshow(markers, cmap='gray', aspect='auto')
    if y_m is not None:
        y_m = y_m > threshold_mark
        _, count = label(y_m, return_num=True)
        ax3[1].set_title('Marker Predict, #={}'.format(count))
        ax3[1].imshow(y_m, cmap='gray', aspect='auto')
        _, count = label(gt_m, return_num=True)
        ax3[2].set_title('Marker Lbls, #={}'.format(count))
        ax3[2].imshow(gt_m, cmap='gray', aspect='auto')

    plt.tight_layout()

    if save:
        dir = predict_save_folder()
        fp = os.path.join(dir, uid + '.png')
        plt.savefig(fp)
    else:
        show_figure()
示例#49
0
    def extractFile(self, filename):
        image = imread(filename, 1)
        # apply threshold in order to make the image binary
        bw = (image < 120).astype(np.float)

        # remove artifacts connected to image border
        cleared = bw.copy()
        # clear_border(cleared)

        # label image regions
        label_image = label(cleared, neighbors=8)
        borders = np.logical_xor(bw, cleared)
        label_image[borders] = -1

        letters = list()
        order = list()

        for region in regionprops(label_image):
            minr, minc, maxr, maxc = region.bbox
            # skip small images
            if maxr - minr > len(
                    image) / 250:  # better to use height rather than area.
                rect = mpatches.Rectangle((minc, minr),
                                          maxc - minc,
                                          maxr - minr,
                                          fill=False,
                                          edgecolor='red',
                                          linewidth=2)
                order.append(region.bbox)

        # sort the detected characters left->right, top->bottom
        lines = list()
        first_in_line = ''
        counter = 0

        # worst case scenario there can be 1 character per line
        for x in range(len(order)):
            lines.append([])

        for character in order:
            if first_in_line == '':
                first_in_line = character
                lines[counter].append(character)
            elif abs(character[0] - first_in_line[0]) < (first_in_line[2] -
                                                         first_in_line[0]):
                lines[counter].append(character)
            elif abs(character[0] - first_in_line[0]) > (first_in_line[2] -
                                                         first_in_line[0]):
                first_in_line = character
                counter += 1
                lines[counter].append(character)

        for x in range(len(lines)):
            lines[x].sort(key=lambda tup: tup[1])

        final = list()
        prev_tr = 0
        prev_line_br = 0

        for i in range(len(lines)):
            for j in range(len(lines[i])):
                tl_2 = lines[i][j][1]
                bl_2 = lines[i][j][0]
                if tl_2 > prev_tr and bl_2 > prev_line_br:
                    tl, tr, bl, br = lines[i][j]
                    letter_raw = bw[tl:bl, tr:br]
                    letter_norm = resize(letter_raw, (20, 20))
                    final.append(letter_norm)
                    prev_tr = lines[i][j][3]
                if j == (len(lines[i]) - 1):
                    prev_line_br = lines[i][j][2]
            prev_tr = 0
            tl_2 = 0
        # print ('Characters recognized: ' + str(len(final)))
        return final
示例#50
0
def prob_to_rles(x):
    lab_img = label(x > 0.5)
    for i in range(1, lab_img.max() + 1):
        yield rle_encoding(lab_img == i)
示例#51
0
    def extractFile(self, filename):
        image = imread(filename, 1)

        #apply threshold in order to make the image binary
        bw = image < 120

        # remove artifacts connected to image border
        cleared = bw.copy()
        #clear_border(cleared)

        # label image regions
        label_image = label(cleared, neighbors=8)
        borders = np.logical_xor(bw, cleared)
        label_image[borders] = -1

        # fig = plt.figure()
        #ax = fig.add_subplot(131)
        #ax.imshow(bw, cmap='jet')
        image0 = imread(filename)
        fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(20, 20))
        ax.imshow(image0, cmap='jet')

        letters = list()
        order = list()

        for region in regionprops(label_image):
            minr, minc, maxr, maxc = region.bbox
            # skip small images
            if maxc - minc > len(
                    image) / 250:  # better to use height rather than area.
                rect = mpatches.Rectangle((minc, minr),
                                          maxc - minc,
                                          maxr - minr,
                                          fill=False,
                                          edgecolor='red',
                                          linewidth=2)
                order.append(region.bbox)

                ax.add_patch(rect)

        Character_found_dir = '/'.join(
            filename.replace('/', '/Character_found/').split('/')[0:-1])
        if not os.path.exists(Character_found_dir):
            os.makedirs(Character_found_dir)
        plt.savefig(filename.replace('/', '/Character_found/'))
        #sort the detected characters left->right, top->bottom
        lines = list()
        first_in_line = ''
        counter = 0

        #worst case scenario there can be 1 character per line
        for x in range(len(order)):
            lines.append([])

        for character in order:
            if first_in_line == '':
                first_in_line = character
                lines[counter].append(character)
            elif abs(character[0] - first_in_line[0]) < (first_in_line[2] -
                                                         first_in_line[0]):
                lines[counter].append(character)
            elif abs(character[0] - first_in_line[0]) > (first_in_line[2] -
                                                         first_in_line[0]):
                first_in_line = character
                counter += 1
                lines[counter].append(character)

        for x in range(len(lines)):
            lines[x].sort(key=lambda tup: tup[1])

        final = list()
        prev_tr = 0
        prev_line_br = 0

        for i in range(len(lines)):
            for j in range(len(lines[i])):
                tl_2 = lines[i][j][1]
                bl_2 = lines[i][j][0]
                if tl_2 > prev_tr and bl_2 > prev_line_br:
                    tl, tr, bl, br = lines[i][j]
                    letter_raw = bw[tl:bl, tr:br]
                    letter_norm = resize(letter_raw, (20, 20))
                    final.append(letter_norm)
                    prev_tr = lines[i][j][3]
                if j == (len(lines[i]) - 1):
                    prev_line_br = lines[i][j][2]
            prev_tr = 0
            tl_2 = 0
        print('Characters recognized: ' + str(len(final)))
        return final
def main():
    args = parse_arguments()

    # --------------------------------- 1. Load Plugin for inference engine ---------------------------------
    logger.info("Loading plugin")
    plugin = IEPlugin(args.target_device)

    config = dict()
    if 'CPU' in args.target_device:
        if args.path_to_extension:
            plugin.add_cpu_extension(args.path_to_extension)
        if args.number_threads is not None:
            config.update({'CPU_THREADS_NUM': str(args.number_threads)})
    else:
        raise AttributeError("Device {} do not support of 3D convolution. Please use CPU or HETERO:*CPU*")

    if 'GPU' in args.target_device:
        if args.path_to_cldnn_config:
            config.update({'CONFIG_FILE': args.path_to_cldnn_config})
            logger.info("GPU extensions is loaded %s", args.path_to_cldnn_config)

    plugin.set_config(config)

    logger.info("Device is %s ", plugin.device)
    logger.info("Plugin version is %s", plugin.version)

    # --------------------- 2. Read IR Generated by ModelOptimizer (.xml and .bin files) ---------------------

    xml_filename = os.path.abspath(args.path_to_model)
    bin_filename = os.path.abspath(os.path.splitext(xml_filename)[0] + '.bin')

    ie_network = IENetwork(xml_filename, bin_filename)

    input_info = ie_network.inputs
    if not input_info:
        raise AttributeError("No inputs info is provided")
    if len(input_info) != 1:
        raise AttributeError("Only one input layer network is supported")

    input_name = next(iter(input_info))
    out_name = next(iter(ie_network.outputs))
    print(input_name, out_name)

    # ---------------------------------------- 4. Preparing input data ----------------------------------------
    logger.info("Preparing inputs")

    if len(input_info[input_name].shape) != 5:
        raise AttributeError("Incorrect shape {} for 3d convolution network".format(args.shape))

    n, _, d, h, w = input_info[input_name].shape
    ie_network.batch_size = n

    # ------------------------------------- 4. Loading model to the plugin -------------------------------------
    # logger.info("Reshape of network from {} to {}".format(input_info[input_name].shape, image_crop_pad.shape))
    #ie_network.reshape({input_name: image_crop_pad.shape})
    #input_info = ie_network.inputs

    # logger.info("Loading model to the plugin")
    executable_network = plugin.load(network=ie_network)
    del ie_network

    files = os.listdir(args.path_to_input_data)
    files = [f for f in files if (f.startswith('Patient') and os.path.isfile(os.path.join(args.path_to_input_data, f)))]
    files.sort()

    for f in files:
        header = read_nii_header(os.path.join(args.path_to_input_data, f))
        image = np.array(header.get_data()).astype(np.float32)
        original_shape = image.shape

        start_time = datetime.now()

        image = median_filter(image, 3)

        bbox = lung_bbox(image)

        image_crop = image[bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]]

        new_shape_pad = (d, h, w)
        diff = np.array(new_shape_pad) - np.array(image_crop.shape)
        pad_left = diff // 2
        pad_right = diff - pad_left

        image_crop_pad = np.pad(image_crop, pad_width=tuple([(pad_left[i], pad_right[i]) for i in range(3)]),
                                mode='reflect')

        # dataset statistics
        mean = -303.0502877950004
        mean2 = 289439.0029958802
        std = np.sqrt(mean2 - mean * mean)

        image_crop_pad = (image_crop_pad - mean) / std

        image_crop_pad = image_crop_pad[None, None]

        preprocess_time = datetime.now() - start_time

        test_im = {input_name: image_crop_pad}

        # ---------------------------------------------- 5. Do inference --------------------------------------------
        start_time = datetime.now()
        res = executable_network.infer(test_im)
        infer_time = datetime.now() - start_time

        # ---------------------------- 6. Processing of the received inference results ------------------------------
        result = res[out_name]

        start_time = datetime.now()

        output_crop = result[0, :, pad_left[0]:-pad_right[0] or None, pad_left[1]:-pad_right[1] or None,
                             pad_left[2]:-pad_right[2] or None]

        new_label = np.zeros(shape=(4,) + image.shape)
        new_label[:, bbox[0, 0]:bbox[1, 0], bbox[0, 1]:bbox[1, 1], bbox[0, 2]:bbox[1, 2]] = output_crop

        scale_factor = np.array(original_shape) / np.array(image.shape)
        old_labels = [zoom(new_label[i], scale_factor, order=1, mode='constant', cval=0)[None] for i in range(4)]

        old_label = np.concatenate(tuple(old_labels), axis=0)
        old_label = ((np.argmax(old_label, axis=0) + 1) *
                     np.max((old_label > np.array([0.5, 0.5, 0.5, 0.5]).reshape((-1, 1, 1, 1))).astype(np.int32),
                            axis=0)).astype(np.int32)

        eso_connectivity = morphology.label(old_label == 1)
        heart_connectivity = morphology.label(old_label == 2)
        trachea_connectivity = morphology.label(old_label == 3)
        aorta_connectivity = morphology.label(old_label == 4)
        eso_connectivity = reject_small_regions(eso_connectivity, ratio=0.2)
        heart_connectivity = leave_biggest_region(heart_connectivity)
        trachea_connectivity = leave_biggest_region(trachea_connectivity)
        aorta_connectivity = leave_biggest_region(aorta_connectivity)

        old_label[np.logical_and(old_label == 1, eso_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 2, heart_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 3, trachea_connectivity == 0)] = 0
        old_label[np.logical_and(old_label == 4, aorta_connectivity == 0)] = 0

        postprocess_time = datetime.now() - start_time

        logger.info("Pre-processing time is %s; Inference time is %s; Post-processing time is %s",
                    preprocess_time, infer_time, postprocess_time)

        # --------------------------------------------- 7. Save output -----------------------------------------------
        output_header = nii.Nifti1Image(old_label, header.affine)
        nii.save(output_header, os.path.join(args.path_to_output, f[:-7]+'.nii'))
    def __call__(self, subjects):
        subject_names = [subject['name'] for subject in subjects]
        subject_stats = LabeledTensor(
            dim_names=['subject', 'stat'],
            dim_keys=[subject_names, self.stats_to_output])

        for subject in subjects:
            pred_data = subject[self.prediction_label_map_name].data > 0
            target_data = subject[self.target_label_map_name].data > 0

            label_params = {
                'return_num': True,
                'connectivity': self.connectivity
            }
            pred_components, num_pred_components = label(
                pred_data[0].numpy(), **label_params)
            target_components, num_target_components = label(
                target_data[0].numpy(), **label_params)
            N, M = num_target_components, num_pred_components,

            pred_components = torch.from_numpy(pred_components)
            target_components = torch.from_numpy(target_components)

            # Trick to encode the overlap of target components i and predicted components j as (i + j * factor)
            # Then torch.unique can be used to count the occurrences of each overlap
            # Alternative would be to stack them and call unique on the flattened spatial dimensions,
            # however that is extremely slow while for some reason this approach is very fast.
            factor = 1000000
            overlap_components = target_components + (pred_components * factor)
            unique_overlap, overlap_counts = torch.unique(overlap_components,
                                                          sorted=True,
                                                          return_counts=True)

            overlap_histogram = torch.zeros(N + 1, M + 1)
            for overlap_component, count in zip(unique_overlap,
                                                overlap_counts):
                i = overlap_component % factor
                j = overlap_component // factor
                overlap_histogram[i, j] = count

            target_detected = self.detection_test(overlap_histogram,
                                                  **self.detection_test_params)
            prediction_detected = self.detection_test(
                overlap_histogram.T, **self.detection_test_params)

            detection_recall = target_detected.sum() / N
            detection_precision = prediction_detected.sum() / M
            detection_f1 = 2 * (detection_recall * detection_precision) / (
                detection_recall + detection_precision)

            TP = overlap_histogram[1:, 1:].sum()
            FP = overlap_histogram[0, 1:].sum()
            TN = overlap_histogram[0, 0].sum()
            FN = overlap_histogram[1:, 0].sum()

            stats = {
                'target_components': N,
                'predicted_components': M,
                'target_detections': target_detected.sum(),
                'predicted_detections': prediction_detected.sum(),
                'detection_recall': detection_recall,
                'detection_precision': detection_precision,
                'detection_f1': detection_f1,
                'target_volume': TP + FN,
                'prediction_volume': TP + FP,
                'TP': TP,
                'FP': FP,
                'TN': TN,
                'FN': FN,
                'dice': 2 * TP / (2 * TP + FP + FN),
                'jaccard': TP / (TP + FP + FN),
                'precision': TP / (TP + FP),
                'recall': TP / (TP + FN),
            }

            for stat_name in self.stats_to_output:
                value = stats[stat_name]
                if isinstance(value, torch.Tensor):
                    value = value.item()
                subject_stats[subject['name'], stat_name] = value

        summary_stats = subject_stats.compute_summary_stats(
            self.summary_stats_to_output)
        out_dict = {
            'subject_stats': subject_stats.to_dataframe(),
            'summary_stats': summary_stats
        }

        return out_dict
示例#54
0
def prob_to_rles(x, cutoff=0.5):
    lab_img = label(x > cutoff)
    for i in range(1, lab_img.max() + 1):
        yield rle_encoding(lab_img == i)
示例#55
0
def prob_to_rles(x, cut_off=0.5):
    lab_img = label(x > cut_off)
    if lab_img.max() < 1:
        lab_img[0, 0] = 1  # ensure at least one prediction per image
    for i in range(1, lab_img.max() + 1):
        yield rle_encoding(lab_img == i)
示例#56
0
    def segment(self,
                img,
                well_radius=800,
                well_mask_radius=765,
                include_intermediate_results=False,
                **kwargs):
        # Assume image is single plane z-stack and grab first 2D image to process
        assert img.ndim == 3
        assert img.shape[0] == 1
        img = img[0]

        logger.debug(
            'Running 2x segmentation on image with shape %s, type %s (args: well_radius = %s, well_mask_radius = %s, include_intermediate_results=%s)',
            img.shape, img.dtype, well_radius, well_mask_radius,
            include_intermediate_results)

        # Remove outliers, convert to float
        img = ndi.median_filter(img, size=(3, 3))
        img = img_as_float(img)

        # Apply bandpass and compute gradients
        img_bp = ndi.gaussian_filter(img, sigma=6) - ndi.gaussian_filter(
            img, sigma=10)
        img_gr = ndi.generic_gradient_magnitude(img_bp, ndi.sobel)

        # Get and apply well mask translation
        img_well = get_circle_mask(well_radius, img_gr.shape)
        shifts = feature.register_translation(img_gr, img_well)[0]
        img_well = get_circle_mask(well_mask_radius,
                                   img_gr.shape,
                                   translation=shifts)
        img_gm = img_gr * img_well

        # Apply local threshold and cleanup binary result
        img_bm = img_gm > filters.threshold_local(img_gm, 255)
        img_bm = ndi.binary_fill_holes(img_bm, structure=morphology.disk(1))
        img_bm = morphology.binary_opening(img_bm, selem=morphology.disk(8))

        # Run segmentation
        img_dt = ndi.distance_transform_edt(img_bm)
        img_dt = ndi.gaussian_filter(img_dt, sigma=1)
        img_pk = morphology.label(
            feature.peak_local_max(img_dt, indices=False, min_distance=8))
        img_obj = segmentation.watershed(-img_dt, img_pk,
                                         mask=img_bm).astype(np.uint16)
        img_bnd = img_obj * segmentation.find_boundaries(
            img_obj, mode='inner', background=0)

        # Compile list of object image results (and append intermediates if necessary)
        img_seg = [img_obj, img_obj, img_bnd, img_bnd]
        if include_intermediate_results:
            to_uint16 = lambda im: exposure.rescale_intensity(
                im, out_range='uint16').astype(np.uint16)
            img_seg += [
                to_uint16(img_bp),
                segmentation.find_boundaries(img_well,
                                             mode='inner',
                                             background=0).astype(np.uint16),
                to_uint16(img_gm),
                to_uint16(img_dt),
                img_pk.astype(np.uint16)
            ]

        # Stack and add new axis to give to (z, ch, h, w)
        img_seg = np.stack(img_seg)[np.newaxis]
        assert img_seg.dtype == np.uint16, 'Expecting 16bit result, got type {}'.format(
            img_seg.dtype)
        assert img_seg.ndim == 4, 'Expecting 4D result, got shape {}'.format(
            img_seg.shape)
        return img_seg
示例#57
0
def multi_rle_encode(img):
    labels = label(img[:, :, 0])
    return [rle_encode(labels==k) for k in np.unique(labels[labels>0])]
示例#58
0
#aplicamos filtros
med = filters.median(image)

# Thresholding using THRESH_TOZERO_OTSU
th6, dst6 = cv2.threshold(med, min, max, cv2.THRESH_OTSU)

contours, hierarchy = cv2.findContours(dst6, cv2.RETR_TREE,
                                       cv2.CHAIN_APPROX_SIMPLE)
cv2.drawContours(imag1, contours, -1, (200, 0, 0), 1)

#Clear objects connected to the label image border.
img_bin = clear_border(closing(imag1 > 120, square(20)))

#etiqueta los objetos
labels = label(img_bin)

plt.subplot(2, 3, 1)
plt.imshow(image, cmap='gray')
obj_ttl = plt.title('Original Image')
plt.setp(obj_ttl, color='b')
plt.axis("off")

plt.subplot(2, 3, 2)
plt.plot(hist, color='gray')
obj_xl = plt.xlabel('Intensidad Iluminacion')
plt.setp(obj_xl, color='g')
obj_yl = plt.ylabel('Numero Pixeles')
plt.setp(obj_yl, color='g')
obj_ttl = plt.title("Histograma")
plt.setp(obj_ttl, color='b')
示例#59
0
ds_new = xr.Dataset({'chla': chlanew, 'flags': flagnew})
xx = np.zeros((1, 64, 64), dtype=bool)

k = 0
while (k < nombre_images):
    i = np.random.randint(0, np.shape(ds_new.index.values)[0])
    # pour chaque image
    x = np.zeros(np.shape(ds_new.chla[i, :, :]),
                 dtype=bool)  # initialisation par une matrice de False
    y = np.argwhere(np.isnan(ds_new.chla.values[
        i, :, :]))  # coords des nan ie des nuages dans l'image
    x[y[:, 0], y[:,
                 1]] = True  # on remplace les nan ie les nuages par des "True"
    m = morphology.label(
        x, connectivity=2
    )  # Selection des regions (nuage) a contours fermé et les remplir par un chiffre donné
    print(i, k)
    for j in range(
            np.max(m)):  # mex pour parcourir toutes les régions (nuages)
        # pour chaque region fermé (nuage entier)
        c = np.where(m == j)  # Selectionne les coords
        # condition de selection du nuage sur la proximité p/r aux bords et la taille du nuage
        if (((np.shape(c[0])[0]) > surface_min)
                and ((np.shape(c[0])[0]) < surface_max) and (np.min(c) > dx)
                and (np.max(c) < 63 - dx)):
            aa = np.zeros((1, 64, 64), dtype=bool)  # initialisation du mask
            aa[0, c[0], c[1]] = True  # Mise en place du mask
            xx = np.append(xx, aa, axis=0)  # pourquoi pas "extend"?
    k = k + 1
data_mask = xr.DataArray(xx[1:])  # Sauvegarde dans le data array
示例#60
0
def bridges(density, restrictions, costs, topological_correction_value):
    binary_map = np.greater(density, 0.5)
    save_binary_map = binary_map.copy()

    pad_density = np.pad(density, ((1, 1), (1, 1)), mode='constant')

    pad_binary_map = np.greater(pad_density, 0.5)

    density_shape = density.shape
    width = density_shape[0]
    height = density_shape[1]

    pad_costs = np.pad(costs, ((1, 1), (1, 1)), mode='constant')

    [solid_labels, num_solid_labels] = skim.label(pad_binary_map,
                                                  neighbors=4,
                                                  return_num=True)

    if num_solid_labels <= 1:
        return density

    density_graph = nx.MultiDiGraph()
    for x_idx in range(0, width):
        for y_idx in range(0, height):

            center_node_id = (x_idx + 1) * (pad_density.shape[1]) + (y_idx + 1)

            for x_offset in range(0, 3):
                for y_offset in range(0, 3):

                    if ((x_offset == 1) and (y_offset == 1)) or (
                        (np.abs(x_offset - 1) + np.abs(y_offset - 1)) > 1):
                        continue

                    next_x_idx = x_idx + x_offset
                    next_y_idx = y_idx + y_offset

                    if ((next_x_idx == 0) or (next_y_idx == 0)
                            or (next_x_idx == (pad_density.shape[0] - 1))
                            or (next_y_idx == (pad_density.shape[1] - 1))):
                        continue

                    next_node_id = next_x_idx * (
                        pad_density.shape[1]) + next_y_idx

                    next_density_value = pad_binary_map[next_x_idx, next_y_idx]
                    cost_value = pad_costs[next_x_idx, next_y_idx]

                    if next_density_value:
                        cost_value = 0

                    density_graph.add_edge(center_node_id,
                                           next_node_id,
                                           weight=cost_value)

    label_to_representative_pt = {}

    for x_idx in range(0, width):
        for y_idx in range(0, height):
            density_value = pad_density[1 + x_idx, 1 + y_idx]
            component_label = solid_labels[1 + x_idx, 1 + y_idx]

            if (component_label in label_to_representative_pt.keys()) or (
                    not density_value):
                continue

            label_to_representative_pt[component_label] = [x_idx, y_idx]

    mst_graph = nx.Graph()

    for label_idx_start in range(0, num_solid_labels):
        component_start = 1 + label_idx_start
        source_pt = label_to_representative_pt[component_start]
        source_node_id = (source_pt[0] + 1) * (pad_density.shape[1]) + (
            source_pt[1] + 1)

        min_path_all = nx.shortest_path(density_graph,
                                        source=source_node_id,
                                        weight='weight')

        for label_idx_end in range(1 + label_idx_start, num_solid_labels):

            component_end = 1 + label_idx_end

            target_pt = label_to_representative_pt[component_end]
            target_node_id = (target_pt[0] + 1) * (pad_density.shape[1]) + (
                target_pt[1] + 1)

            min_path = min_path_all[target_node_id]

            min_path_distance = 0

            for path_idx in range(1, (len(min_path) - 1)):
                node_id = min_path[path_idx]

                source_x = int(node_id / pad_density.shape[1]) - 1
                source_y = node_id % pad_density.shape[1] - 1

                min_path_distance += pad_costs[source_x, source_y]

            mst_graph.add_edge(component_start,
                               component_end,
                               weight=min_path_distance)

    mst = nx.minimum_spanning_tree(mst_graph)

    mst_edges = nx.edges(mst)

    for edge in mst.edges():
        edge_start, edge_end = edge

        source_pt = label_to_representative_pt[edge_start]
        target_pt = label_to_representative_pt[edge_end]

        source_node_id = (source_pt[0] + 1) * (pad_density.shape[1]) + (
            source_pt[1] + 1)
        target_node_id = (target_pt[0] + 1) * (pad_density.shape[1]) + (
            target_pt[1] + 1)

        min_path = nx.shortest_path(density_graph,
                                    source=source_node_id,
                                    target=target_node_id,
                                    weight='weight')

        for path_idx in range(1, (len(min_path) - 1)):
            node_id = min_path[path_idx]

            source_x = int(node_id / pad_density.shape[1]) - 1
            source_y = node_id % pad_density.shape[1] - 1

            density[source_x, source_y] = topological_correction_value
            pad_density[1 + source_x,
                        1 + source_y] = topological_correction_value
            binary_map[source_x, source_y] = True
            pad_binary_map[1 + source_x, 1 + source_y] = True

    restrictions = np.logical_not(np.logical_xor(binary_map, save_binary_map))