Example #1
0
def jeff_coral_finder(im, sand_intensity_threshold, coral_gradient_threshold,
                      maximum_altseqfilt_radius, shadow_discriminant_threshold,
                      shadow_discriminant_scaling):
  im_grey = N.asarray(im.convert("L"))
  im = N.asarray(im)
  dot = N.array([[0,1,0], [1,1,1], [0,1,0]])
  dilated = morphology.grey_dilation(im_grey, dot.shape, structure=dot)
  eroded = morphology.grey_erosion(im_grey, dot.shape, structure=dot)
  gradient = dilated - eroded
  fisher_discriminant = N.dot(im, shadow_discriminant_scaling)

  # Make initial class determinations.
  is_shadow = fisher_discriminant < shadow_discriminant_threshold
  is_sand   = im_grey > sand_intensity_threshold
  is_smooth = gradient < coral_gradient_threshold
  is_coral  = is_smooth & ~is_sand & ~is_shadow

  # Now perform an alternating sequence filter on coral,
  for radius in range(1, maximum_altseqfilt_radius+1):
    se = disk_strel(radius)
    opened = morphology.binary_opening(is_coral, se)
    is_coral = morphology.binary_closing(opened, se)
  # Now perform an alternating sequence filter on sand.
  for radius in range(1, maximum_altseqfilt_radius+1):
    se = disk_strel(radius)
    opened = morphology.binary_opening(is_sand, se)
    is_sand = morphology.binary_closing(opened, se)
  # Use coral mask to exclude sand.
  is_sand = is_sand & ~is_coral
  return is_sand, is_coral
    def lungs_segmentation(self, lungs_threshold = -360):
	seg_prub = np.array(self.data3d <= lungs_threshold)
	seg_prub = morphology.binary_closing(seg_prub , iterations=self.iteration()).astype(self.segmentation.dtype)
	seg_prub = morphology.binary_opening(seg_prub , iterations = 5)
	counts , labeled_seg=self.volume_count(seg_prub)
	#self.segmentation = seg_prub
	#for x in np.nditer(labeled_seg, op_flags=['readwrite']):
	#    if x[...]!=0:890/
	#    	counts[x[...]]=counts[x[...]]+1
	#index=np.argmax(counts) #pozadí
	#counts[index]=0
	index=np.argmax(counts) #jedna nebo obě plíce
	velikost1=counts[index]
	counts[index]=0
	index2=np.argmax(counts)# druhá plíce nebo nečo jiného
	velikost2=counts[index2]
	if (1.0-self.maximal_lung_diff)<= float(velikost2)/velikost1:
	    print("plice separované")
	else:
	    pocet=0
	    seg_prub = np.array(self.data3d <= lungs_threshold)
	    seg_prub = morphology.binary_closing(seg_prub , iterations=self.iteration()).astype(self.segmentation.dtype)
	    seg_prub = morphology.binary_opening(seg_prub , iterations = 5)
		
	    while not (1.0 - self.maximal_lung_diff) <= float(velikost2)/velikost1:
		seg_prub = morphology.binary_erosion(seg_prub,iterations=1)
		counts, labeled_seg =self.volume_count(seg_prub)
	    	index=np.argmax(counts) #jedna nebo obě plíce
	    	velikost1=counts[index]
	    	counts[index]=0
	    	index2=np.argmax(counts)# druhá plíce nebo nečo jiného
	    	velikost2=counts[index2]
		pocet=pocet+1
		print(pocet)
		print(velikost1 , velikost2)
	    seg_prub = morphology.binary_dilation(self.segmentation,iterations=pocet).astype(self.segmentation.dtype)
	#self.segmentation = self.segmentation + np.array(labeled_seg==index).astype(np.int8)*self.slab['lungs']
	#self.segmentation = self.segmentation + np.array(labeled_seg==index2).astype(np.int8)*self.slab['lungs']
	plice1 = np.array(labeled_seg==index)
	z,x,y = np.nonzero(plice1)
	m1 = np.max(y)
	if m1<(self.segmentation.shape[1]/2):
	    self.segmentation = self.segmentation + np.array(labeled_seg==index).astype(np.int8)*self.slab['llung']
	    self.segmentation = self.segmentation + np.array(labeled_seg==index2).astype(np.int8)*self.slab['rlung']
	else:
	    self.segmentation = self.segmentation + np.array(labeled_seg==index).astype(np.int8)*self.slab['rlung']
	    self.segmentation = self.segmentation + np.array(labeled_seg==index2).astype(np.int8)*self.slab['llung']
	self.orientation()
	if self.smer==1:#netestovano
	    self.segmentation[self.segmentation==self.slab['llung']]=3
	    self.segmentation[self.segmentation==self.slab['rlung']]=self.slab['llung']
	    self.segmentation[self.segmentation==3]=self.slab['rlung']
        pass
Example #3
0
def Fg_extract(frame,type = 1): #extract foreground    

    if type ==1:
        mu[:]       = alpha*frame + (1.0-alpha)*mu_old
        mu_old[:]   = mu
        sig2[:]     = alpha*(1.0*frame-mu)**2 + (1.0-alpha)*sig2_old
        sig2_old[:] = sig2
        
        sig = sig2**0.5
        
        lmcs = lmc*sig
        bmcs = bmc*sig
        
        fg= np.abs(1.0*frame-mu)[:,:,0]-1*sig[:,:,0]>0.0
    elif type == 2:
        try:
            fg = np.abs(1.0*frame.mean(2)-BG)>50.0
        except:
            BG = pickle.load(open("bg13-19.pkl","rb"))
            BG = cv2.resize(BG,(0,0),fx = 0.5,fy=0.5)
            fg = np.abs(1.0*frame.mean(2)-BG)>50.0
            

    fgo = ndm.binary_opening(fg)
    fgf = ndm.binary_fill_holes(fgo)
    right.set_data(fgf)
    plt.draw()
       
    return fgf
Example #4
0
def _sim_mask(in_file):
    import nibabel as nb
    import numpy as np
    import os.path as op
    import scipy.ndimage as sn
    from scipy.ndimage.morphology import (binary_opening, binary_dilation)
    from pyacwereg.misc import ball as gen_ball

    if not isinstance(in_file, basestring):
        in_file = in_file[2]

    out_file = op.abspath('sim_mask.nii.gz')

    im = nb.load(in_file)
    data = im.get_data()
    data[data > 1.0e-4] = 1
    data[data < 1] = 0

    ball1 = gen_ball(4, 1.9)
    data = binary_opening(data.astype(np.uint8), structure=ball1,
                          iterations=1).astype(np.uint8)
    # Get largest object
    label_im, nb_labels = sn.label(data)
    sizes = sn.sum(data, label_im, range(nb_labels + 1))
    larger = np.squeeze(np.argwhere(sizes == sizes.max()))
    data[label_im != larger] = 0

    # Dilate
    # data = binary_dilation(data, structure=ball1,
    #                        iterations=1).astype(np.uint8)

    nb.Nifti1Image(
        data, im.get_affine(), im.get_header()).to_filename(out_file)
    return out_file
Example #5
0
def Fg_extract(frame): #extract foreground    

    mu[:]       = alpha*frame + (1.0-alpha)*mu_old
    mu_old[:]   = mu
    sig2[:]     = alpha*(1.0*frame-mu)**2 + (1.0-alpha)*sig2_old
    sig2_old[:] = sig2

    sig = sig2**0.5

    lmcs = lmc*sig
    bmcs = bmc*sig

    fg= np.abs(1.0*frame-mu)[:,:,0]-2*sig[:,:,0]>0.0
    fgo = ndm.binary_opening(fg)
    fgf = ndm.binary_fill_holes(fgo)
    right.set_data(fgf)
    plt.draw()
    '''
    if imsave:
        im = zeros(frame.shape)
        a = fgf.astype(np.uint8)*255
        im[:,:,0]=a
        im[:,:,1]=a
        im[:,:,2]=a
        
        im = Image.fromarray(im.astype(np.uint8))
        im.save('/home/andyc/image/tracking VIDEO0004/binary/b%.3d.bmp'%vid_idx)
    '''
    return fgf
Example #6
0
 def generateData(self):
     input = self.getInput(0).getData()
     
     if self.itrs > 0:
         output = morphology.binary_opening(input, structure=self.struct, iterations=self.itrs)
     else:
         output = input
     
     self.getOutput(0).setData(output)
Example #7
0
def get_mask(im):
    # use a intensity threshold to roughly find the mask of the body
    th = 32000  # an approximate background intensity value
    mask = im > th
    mask = binary_opening(mask, structure=np.ones((7, 7)))  # roughly remove bed
    # mask = binary_dilation(mask)
    # mask = binary_fill_holes(mask, structure=np.ones((11,11)))  # fill parts like lung

    if mask.sum() == 0:  # maybe atypical intensity
        mask = im * 0 + 1
    return mask.astype(dtype=np.int32)
Example #8
0
File: segment.py Project: wj2/2p
def watershed_segment(M,xM=None,yM=None):
    """Use watershed segmentation on an array. 
    Return an array where regions are given different integer labels"""

    if xM != None and yM != None:
        sel = np.ones((int(ceil(23.9*xM)),int(ceil(23.9*yM)))) # for opening
        sel2 = np.ones((int(ceil(127.2*xM)),int(ceil(127.2*yM)))) # for local thresholding
        sel3 = np.ones((int(ceil(11.9*xM)),int(ceil(11.9*yM)))) # for erosion
        ma,mi =(44245.21*xM*yM),(316.037*xM*yM) 
    else:
        selD = np.array([int(M.shape[0]*.012),int(M.shape[1]*.012)])
        selD = np.where(selD!=0,selD,1)
    
        sel2D = np.array([int(M.shape[0]*.12),int(M.shape[1]*.12)])
        sel2D = np.where(sel2D!=0,sel2D,1)

        sel3D = np.array([int(M.shape[0]*.01),int(M.shape[1]*.01)])
        sel3D = np.where(sel3D!=0,sel3D,1)


        sel = np.ones(selD) # for opening
        sel2 = np.ones(sel2D) # for local thresholding
        sel3 = np.ones(sel3D) # for erosion
        ma,mi = (M.shape[0]*M.shape[1]*.0075),(M.shape[0]*M.shape[1]*.0003)

    # get a few points in the center of each blob
    
    # threshold
    bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)))
    #& (M>=stats.scoreatpercentile(M.flatten(),80)))

    # open and erode
    blobs = snm.binary_opening(bw,structure=sel)
    blobs = snm.binary_erosion(blobs,structure=sel3,iterations=2)
    
    # label
    labels,_ = ndi.label(blobs)
    labels[labels > 0] += 1
    labels[0,0] = 1

    # rescale and cast to int16, then use watershed
    #M2 = rescaled(M,0,65000).astype(np.uint16)
    #newlabels = ndi.watershed_ift(M2,labels)
    newlabels = labels
    
    # get rid of groups unless they have the right number of pixels

    counts = np.bincount(newlabels.flatten())
    old2new = np.arange(len(counts)) 
    old2new[(counts < int(mi)) | (counts > int(ma))] = 0
    newlabels = old2new[newlabels]

    return newlabels
Example #9
0
def refined_seeding(a, maximum_height=0, grey_close_radius=1, 
    binary_open_radius=1, binary_close_radius=1, minimum_size=0):
    """Perform morphological operations to get good segmentation seeds."""
    if grey_close_radius > 0:
        strel = diamond_se(grey_close_radius, a.ndim)
        a = grey_closing(a, footprint=strel)
    s = (a <= maximum_height)
    if binary_open_radius > 0:
        strel = diamond_se(binary_open_radius, s.ndim)
        s = binary_opening(s, structure=strel)
    if binary_close_radius > 0:
        strel = diamond_se(binary_close_radius, s.ndim)
        s = binary_closing(s, structure=strel)
    s = remove_small_connected_components(s, minimum_size)
    return label(s)[0]
Example #10
0
def get_surface(source):
	"""Return Image Surface """

	brightness  = ImageEnhance.Brightness(source)
	img         = brightness.enhance(1.2)
	contrast    = ImageEnhance.Contrast(img)
	img         = contrast.enhance(2000)
	img         = ImageOps.grayscale(img)
	img         = ImageOps.invert(img)
	# # 	#Use NDImage to detect holes 
	ndarray     = np.array(img)
	ndarray     = morphology.binary_fill_holes(ndarray).astype(bool)
	ndarray     = morphology.binary_opening(ndarray,iterations=1)
	img          = Image.fromarray(np.uint8(ndarray*255))
	img          = img.convert("1")
	return img
Example #11
0
def Fg_extract(frame): #extract foreground

    
    mu[:]       = alpha*frame + (1.0-alpha)*mu_old
    mu_old[:]   = mu
    sig2[:]     = alpha*(1.0*frame-mu)**2 + (1.0-alpha)*sig2_old
    sig2_old[:] = sig2

    sig = sig2**0.5

    lmcs = lmc*sig
    bmcs = bmc*sig

    fg= np.abs(1.0*frame-mu)[:,:,0]-1*sig[:,:,0]>0.0
    fgo = ndm.binary_opening(fg)
    fgf = ndm.binary_fill_holes(fgo)

    return fgf
Example #12
0
    def post_process(self):

        entropyValue = array(self.entropyValue)
        w = self.modulLen * self.samplerate() / self.blocksize()
        modulentropy = computeModulation(entropyValue, w, False)
        confEntropy = array(modulentropy - self.threshold) / self.threshold
        confEntropy[confEntropy > 1] = 1

        conf = self.new_result(data_mode='value', time_mode='framewise')

        conf.id_metadata.id += '.' + 'confidence'
        conf.id_metadata.name += ' ' + 'Confidence'

        conf.data_object.value = confEntropy
        self.process_pipe.results.add(conf)

        # Binary Entropy
        binaryEntropy = modulentropy > self.threshold
        binaryEntropy = binary_opening(
            binaryEntropy, [1] * (self.smoothLen * 2))

        convert = {False: 0, True: 1}
        label = {0: 'NonSpeech', 1: 'Speech'}
        segList = segmentFromValues(binaryEntropy)

        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'

        segs.label_metadata.label = label

        segs.data_object.label = [convert[s[2]] for s in segList]
        segs.data_object.time = [(float(s[0]) * self.blocksize() /
                                 self.samplerate())
                                 for s in segList]
        segs.data_object.duration = [(float(s[1] - s[0] + 1) * self.blocksize() /
                                     self.samplerate())
                                     for s in segList]

        self.process_pipe.results.add(segs)

        return
Example #13
0
 def get_colony_region(self, ary, bg_cut):
     mary = np.zeros(ary.shape)
     #ary = ary.copy()
     #ary = gaussian_filter(ary, 1)
     #ary = median_filter(ary, 3)
     for xys in self.xyss:
         vs = np.array([ary[x,y] for x,y in xys])
         tmp = np.where(vs>=bg_cut)
         if len(tmp[0]) == 0:
             tmp = np.where(vs==max(vs))
             ind = tmp[0][0]
         else:
             ind = tmp[0][0]
         x,y = xys[ind]
         mary[x,y] = 1
     
     mary = binary_closing(mary)
     mary = binary_fill_holes(mary)
     mary = binary_opening(mary)
     return mary
Example #14
0
File: segment.py Project: wj2/2p
def watershed_segment_2(M,click_coords):
    """Use watershed segmentation on an array. 
    Return an array where regions are given different integer labels"""
    
    # todo: choose these structures based on aspect ratio of M and input parameters
    sel = np.ones((4,10)) # for opening
    sel2 = np.ones((15,75)) # for local thresholding
    sel3 = np.ones((2,5)) # for erosion
    # get a few points in the center of each blob
    
    # threshold
    #bw = ((M>=ndi.percentile_filter(M,80,footprint=sel2)) & (M>=scoreatpercentile(M.flatten(),60)))
    
    score = stats.percentileofscore(M.flatten(),M[int(click_coords[0][1]),int(click_coords[0][0])])
    bw = (M>=stats.scoreatpercentile(M.flatten(),score))

    # open and erode
    #bools = sp.zeros((M.shape[0],M.shape[1]),int)
    #bools[int(click_coords[0]),int(click_coords[1])] = 1
    #blobs = sp.where(bools == 1,True,False)
    blobs = snm.binary_opening(bw,structure=sel)
    blobs = snm.binary_dilation(blobs,iterations=3)
    blobs = snm.binary_erosion(blobs,structure=sel3)
    
    
    # label
    labels,_ = ndi.label(blobs)
    labels[labels > 0] += 1
    #labels[0,0] = 1

    # rescale and cast to int16, then use watershed
    M2 = rescaled(M,0,65000).astype(np.uint16)
    newlabels = ndi.watershed_ift(M2,labels)
    
    # get rid of groups unless they have the right number of pixels
    counts = np.bincount(newlabels.flatten())
    old2new = np.arange(len(counts))
    old2new[(counts < 100) | (counts > 600)] = 0
    newlabels = old2new[newlabels]
    
    return newlabels
Example #15
0
def main():
	i, h = load(sys.argv[1])
	ibo, ibc, ibe, ibd = map(int, sys.argv[3:])

    	i = binary_fill_holes(i)

	if not 0 == ibo: i = binary_opening(i, structure=None, iterations=ibo)
	if not 0 == ibc: i = binary_closing(i, structure=None, iterations=ibc)

	if not 0 == ibe: i = binary_erosion(i, structure=None, iterations=ibe)
	if not 0 == ibd: i = binary_dilation(i, structure=None, iterations=ibd)

	#i = morphology2d(binary_opening, i, structure=1, iterations=1)
	#i = morphology2d(binary_closing, i, structure=1, iterations=1)

	#i = morphology2d(binary_erosion, i, structure=1, iterations=1)
	#i = morphology2d(binary_dilation, i, structure=1, iterations=1)

	if 0 == numpy.count_nonzero(i):
		raise Warning("{}: empty segmentation resulted".format(sys.argv[1]))

	save(i, sys.argv[2], h, True)
Example #16
0
# Create figure for noise reduction
fig = plt.figure()

# Plot original
a = fig.add_subplot(1, 2, 1)
a.set_title('Original')
plt.imshow(img, cmap=plt.cm.gray)

# Create disk structuring element
r = 7
y, x = np.ogrid[-r:r + 1, -r:r + 1]
mask = x**2 + y**2 <= r**2
mask = mask.astype(int)

# Create noise free image, using morphological opening and closing
img_morphed = morph.binary_closing(morph.binary_opening(img, structure=mask),
                                   structure=mask)

# Plot noise free version
a = fig.add_subplot(1, 2, 2)
a.set_title('Noise free')
plt.imshow(img_morphed, cmap=plt.cm.gray)

#######
#  b) #
#######


def distanceTransform(img):
    # Initialize distance transform image to 'img'
    img_dist = img.astype(float)
Example #17
0
def norm_process(img_path, roi_path, out_label):
    image_nii = nib.load(img_path)
    label_nii = nib.load(roi_path)
    image = np.copy(image_nii.get_data())
    label = np.copy(label_nii.get_data())
    reso = image_nii.header['pixdim'][1:4]
    assert image.shape == label.shape, 'Image shape != Label shape'
    print('Image size:', image.shape, 'image reso:', reso)

    # Get threshold for boundary
    thresh_min = threshold_minimum(image[label > 0])
    thresh_otsu = threshold_otsu(image[label > 0])

    # ax = plt.hist(image[label>0].ravel(), bins = 64)
    # plt.axvline(thresh_min, color='r')
    # plt.axvline(thresh_otsu, color='g')
    # plt.show()

    region_1 = np.logical_and(image >= thresh_otsu, label > 0).astype(np.int8)
    region_2 = np.logical_and(image < thresh_otsu, label > 0).astype(np.int8)

    new_label = np.zeros_like(label)
    new_label[region_1 > 0] = 2
    new_label[region_2 > 0] = 3

    # slice-wise
    boundaries, slice_indices = get_boundary_3d(new_label, 2)
    print(f'Found valid {len(slice_indices)} slices:', slice_indices)

    margin_label = np.zeros_like(new_label)
    for bound, slice_idx in zip(boundaries, slice_indices):
        new_slice = np.zeros(region_1.shape[:2]).astype(np.int)
        for pt in bound:
            new_slice[pt[0], pt[1]] = 1

        #plt.imshow(new_slice, vmin=0, vmax=1)

        order = 3
        delta_dist = 5  # 5mm
        coords = np.array(bound).transpose()
        z = np.polyfit(coords[0], coords[1], order)
        p = np.poly1d(z)

        # # judge direction
        # pos_direction = False
        # center_idx = len(coords[0])//2
        # center_pt = (coords[0][center_idx], coords[1][center_idx])
        # center_delta = get_delta_xy(center_pt, reso, p, delta_dist)
        # center_pt_pos = (center_pt[0]+center_delta[0], center_pt[1]+center_delta[1])
        # if intersect(center_pt, center_pt_pos, region_2[...,slice_idx]):
        #     pos_direction = True
        # print('Positive direction:', pos_direction)

        new_line = []
        for x, y in zip(coords[0], coords[1]):
            delta_x_px, delta_y_px = get_delta_xy((x, y), reso, p, delta_dist)
            new_pt_pos = (x + delta_x_px, y + delta_y_px)
            new_pt_neg = (x - delta_x_px, y - delta_y_px)
            if intersect((x, y), new_pt_pos, region_2[..., slice_idx]):
                inter_pts = get_line_segment((x, y), new_pt_pos)
                new_line = new_line + inter_pts
            else:
                inter_pts = get_line_segment((x, y), new_pt_neg)
                new_line = new_line + inter_pts

        for pt in new_line:
            new_slice[pt[0], pt[1]] = out_label + 8
        new_slice = binary_closing(new_slice,
                                   generate_binary_structure(2, 1),
                                   iterations=2).astype(np.int)
        new_slice = binary_opening(new_slice,
                                   generate_binary_structure(2, 1),
                                   iterations=1).astype(np.int)
        new_slice[new_slice > 0] = out_label + 8
        margin_label[..., slice_idx] = new_slice

    margin_label[new_label == 2] = out_label
    #return margin_label
    nib.save(nib.Nifti1Image(margin_label, image_nii.affine, image_nii.header),
             os.path.join(out_dir, os.path.basename(roi_path)))
    def split_by_class_index(self,
                             i,
                             sigma=2,
                             threshold_split=0.25,
                             expand_mask=1,
                             minimum_pixels=1):
        """
        If class i contains multiple non-contiguous segments in real space, divide these regions
        into distinct classes.

        Algorithm is as described in the docstring for self.split.

        Accepts:
            i               (int) index of the class to split
            sigma           (float) std of gaussian kernel used to smooth the class images before
                            thresholding and splitting.
            threshold_split (float) used to threshold the class image to create a binary mask.
            expand_mask     (int) number of pixels by which to expand the mask before separating
                            into contiguous regions.
            minimum_pixels  (int) if, after splitting, a potential new class contains fewer than
                            this number of pixels, ignore it
        """
        assert isinstance(i, (int, np.integer))
        assert isinstance(expand_mask, (int, np.integer))
        assert isinstance(minimum_pixels, (int, np.integer))
        W_next = np.zeros((self.N_feat, 1))
        H_next = np.zeros((1, self.N_meas))

        # Get the class in real space
        class_image = self.get_class_image(i)

        # Turn into a binary mask
        class_image = gaussian_filter(class_image, sigma)
        mask = class_image > (np.max(class_image) * threshold_split)
        mask = binary_opening(mask, iterations=1)
        mask = binary_closing(mask, iterations=1)
        mask = binary_dilation(mask, iterations=expand_mask)

        # Get connected regions
        labels, nlabels = label(mask,
                                background=0,
                                return_num=True,
                                connectivity=2)

        # Add each region to the new W and H matrices
        for j in range(nlabels):
            mask = (labels == (j + 1))
            mask = binary_erosion(mask, iterations=expand_mask)

            if np.sum(mask) >= minimum_pixels:

                # Leave the Bragg peak weightings the same
                W_next = np.hstack((W_next, self.W[:, i, np.newaxis]))

                # Use the existing real space pixel weightings
                h_i = np.zeros(self.N_meas)
                h_i[mask.ravel()] = self.H[i, :][mask.ravel()]
                H_next = np.vstack((H_next, h_i[np.newaxis, :]))

        W_prev = np.delete(self.W, i, axis=1)
        H_prev = np.delete(self.H, i, axis=0)
        self.W_next = np.concatenate((W_next[:, 1:], W_prev), axis=1)
        self.H_next = np.concatenate((H_next[1:, :], H_prev), axis=0)
        self.N_c_next = self.W_next.shape[1]

        return
Example #19
0
def make_petal_dm_core(pupImage, pupAngleDegree):
    """
    <pupImage> : image of the pupil

    La fonction renvoie des fn d'influence en forme de petale d'apres
    une image de la pupille, qui est supposee etre segmentee.


    influ, i1, j1, smallsize, nbSeg = make_petal_dm_core(pupImage, 0.0)
    """
    # Splits the pupil into connex areas.
    # <segments> is the map of the segments, <nbSeg> in their number.
    # binary_opening() allows us to suppress individual pixels that could
    # be identified as relevant connex areas
    from scipy.ndimage.measurements import label
    from scipy.ndimage.morphology import binary_opening
    s = np.ones((2, 2), dtype=np.bool)
    segments, nbSeg = label(binary_opening(pupImage, s))

    # Faut trouver le plus petit support commun a tous les
    # petales : on determine <smallsize>
    smallsize = 0
    i1t = []  # list of starting indexes of influ functions
    j1t = []
    i2t = []  # list of ending indexes of influ functions
    j2t = []
    for i in range(nbSeg):
        petal = segments == (i + 1
                             )  # identification (boolean) of a given segment
        profil = np.sum(petal, axis=1) != 0
        extent = np.sum(profil).astype(np.int32)
        i1t.append(np.min(np.where(profil)[0]))
        i2t.append(np.max(np.where(profil)[0]))
        if extent > smallsize:
            smallsize = extent

        profil = np.sum(petal, axis=0) != 0
        extent = np.sum(profil).astype(np.int32)
        j1t.append(np.min(np.where(profil)[0]))
        j2t.append(np.max(np.where(profil)[0]))
        if extent > smallsize:
            smallsize = extent

    # extension de la zone minimale pour avoir un peu de marge
    smallsize += 2

    # Allocate array of influence functions
    influ = np.zeros((smallsize, smallsize, nbSeg), dtype=np.float32)

    npt = pupImage.shape[0]
    i0 = j0 = npt / 2 - 0.5
    petalMap = build_petals(nbSeg, pupAngleDegree, i0, j0, npt)
    ii1 = np.zeros(nbSeg)
    jj1 = np.zeros(nbSeg)
    for i in range(nbSeg):
        ip = (smallsize - i2t[i] + i1t[i] - 1) // 2
        jp = (smallsize - j2t[i] + j1t[i] - 1) // 2
        i1 = np.maximum(i1t[i] - ip, 0)
        j1 = np.maximum(j1t[i] - jp, 0)
        if (j1 + smallsize) > npt:
            j1 = npt - smallsize
        if (i1 + smallsize) > npt:
            i1 = npt - smallsize
        #petal = segments==(i+1) # determine le segment pupille veritable
        k = petalMap[i1 + smallsize // 2, j1 + smallsize // 2]
        petal = (petalMap == k)
        influ[:, :, k] = petal[i1:i1 + smallsize, j1:j1 + smallsize]
        ii1[k] = i1
        jj1[k] = j1

    return influ, ii1, jj1, int(smallsize), nbSeg
Example #20
0
def ccs4_map(cfg_set_tds,
             figsize_x=12,
             figsize_y=12,
             hillshade=True,
             radar_loc=True,
             radar_vis=True):
    """Print map of TRT cells."""

    ## Load DEM and Swiss borders
    shp_path_CH = os.path.join(
        cfg_set_tds["root_path"],
        u"data/shapefile/swissBOUNDARIES3D_1_3_TLM_LANDESGEBIET.shp")
    shp_path_Kantone = os.path.join(
        cfg_set_tds["root_path"],
        u"data/shapefile/swissBOUNDARIES3D_1_3_TLM_KANTONSGEBIET.shp")
    shp_path_count = os.path.join(
        cfg_set_tds["root_path"],
        u"data/shapefile/CCS4_merged_proj_clip_G05_countries.shp")
    dem_path = os.path.join(cfg_set_tds["root_path"], u"data/DEM/ccs4.png")
    visi_path = os.path.join(cfg_set_tds["root_path"],
                             u"data/radar/radar_composite_visibility.npy")

    dem = Image.open(dem_path)
    dem = np.array(dem.convert('P'))

    sf_CH = shapefile.Reader(shp_path_CH)
    sf_KT = shapefile.Reader(shp_path_Kantone)
    sf_ct = shapefile.Reader(shp_path_count)

    ## Setup figure
    fig_extent = (255000, 965000, -160000, 480000)
    fig, axes = plt.subplots(1, 1)
    fig.set_size_inches(figsize_x, figsize_y)

    ## Plot altitude / hillshading
    if hillshade:
        ls = colors.LightSource(azdeg=315, altdeg=45)
        axes.imshow(ls.hillshade(-dem, vert_exag=0.05),
                    extent=fig_extent,
                    cmap='gray',
                    alpha=0.5)
    else:
        axes.imshow(dem * 0.6, extent=fig_extent, cmap='gray', alpha=0.5)

    ## Get borders of Cantons
    try:
        shapes_KT = sf_KT.shapes()
    except UnicodeDecodeError:
        print("   *** Warning: No country shape plotted (UnicodeDecodeErrror)")
    else:
        for KT_i, shape in enumerate(shapes_KT):
            x = np.array([i[0] for i in shape.points[:]])
            y = np.array([i[1] for i in shape.points[:]])
            endpoint = np.where(x == x[0])[0][1]
            x = x[:endpoint]
            y = y[:endpoint]
            axes.plot(x, y, color='darkred', linewidth=0.5, zorder=5)

    ## Get borders of neighbouring countries
    try:
        shapes_ct = sf_ct.shapes()
    except UnicodeDecodeError:
        print("   *** Warning: No country shape plotted (UnicodeDecodeErrror)")
    else:
        for ct_i, shape in enumerate(shapes_ct):
            if ct_i in [0, 1]:
                continue
            x = np.array([i[0] for i in shape.points[:]])
            y = np.array([i[1] for i in shape.points[:]])
            x[x <= 255000] = 245000
            x[x >= 965000] = 975000
            y[y <= -159000] = -170000
            y[y >= 480000] = 490000
            if ct_i in [3]:
                axes.plot(x[20:170], y[20:170], color='black', linewidth=0.5)
            if ct_i in [2]:
                ## Delete common border of FR and CH:
                x_south = x[y <= 86000]
                y_south = y[y <= 86000]
                x_north = x[np.logical_and(
                    np.logical_and(y >= 270577, y <= 491000), x > 510444)]
                #x_north = x[np.logical_and(y>=270577,y<=491000)]
                y_north = y[np.logical_and(
                    np.logical_and(y >= 270577, y <= 491000), x > 510444)]
                #y_north = y[np.logical_and(y>=270577,y<=491000)]
                axes.plot(x_south,
                          y_south,
                          color='black',
                          linewidth=0.5,
                          zorder=4)
                axes.plot(x_north,
                          y_north,
                          color='black',
                          linewidth=0.5,
                          zorder=4)
            if ct_i in [4]:
                ## Delete common border of AT and CH:
                x_south = x[np.logical_and(x >= 831155, y < 235000)]
                y_south = y[np.logical_and(x >= 831155, y < 235000)]
                #x_north1 = x[np.logical_and(x>=756622,y>=260466)]
                x_north1 = x[np.logical_and(
                    np.logical_and(x >= 758622, y >= 262466), x <= 794261)]
                #y_north1 = y[np.logical_and(x>=756622,y>=260466)]
                y_north1 = y[np.logical_and(
                    np.logical_and(x >= 758622, y >= 262466), x <= 794261)]
                y_north2 = y[np.logical_and(
                    np.logical_and(x >= 774261, y >= 229333), x <= 967000)]
                x_north2 = x[np.logical_and(
                    np.logical_and(x >= 774261, y >= 229333), x <= 967000)]
                y_north2 = np.concatenate([
                    y_north2[np.argmin(x_north2):],
                    y_north2[:np.argmin(x_north2)]
                ])
                x_north2 = np.concatenate([
                    x_north2[np.argmin(x_north2):],
                    x_north2[:np.argmin(x_north2)]
                ])
                x_LI = x[np.logical_and(
                    np.logical_and(x <= 773555, y >= 214400), y <= 238555)]
                y_LI = y[np.logical_and(
                    np.logical_and(x <= 773555, y >= 214400), y <= 238555)]
                axes.plot(x_south,
                          y_south,
                          color='black',
                          linewidth=0.5,
                          zorder=4)
                axes.plot(x_north1,
                          y_north1,
                          color='black',
                          linewidth=0.5,
                          zorder=4)
                axes.plot(x_north2,
                          y_north2,
                          color='black',
                          linewidth=0.5,
                          zorder=4)
                axes.plot(x_LI, y_LI, color='black', linewidth=0.5, zorder=4)
            else:
                continue
                #axes.plot(x,y,color='black',linewidth=1,zorder=4)

    ## Get Swiss borders
    try:
        #shp_records = sf_CH.shapeRecords()
        shapes_CH = sf_CH.shapes()
    except UnicodeDecodeError:
        print("   *** Warning: No country shape plotted (UnicodeDecodeErrror)")
    else:
        for ct_i, shape in enumerate(shapes_CH):  #sf_CH.shapeRecords():
            if ct_i != 0: continue
            x = np.array([i[0] - 2000000 for i in shape.points[:]])
            y = np.array([i[1] - 1000000 for i in shape.points[:]])
            endpoint = np.where(x == x[0])[0][1]
            x = x[:endpoint]
            y = y[:endpoint]

            ## Convert to swiss coordinates
            #x,y = lonlat2xy(lon, lat)
            axes.plot(x, y, color='darkred', linewidth=1, zorder=3)

    ## Add weather radar locations:
    if radar_loc:
        weather_radar_y = [237000, 142000, 100000, 135000, 190000]
        weather_radar_x = [681000, 497000, 708000, 604000, 780000]
        axes.scatter(
            weather_radar_x,
            weather_radar_y,
            marker="D",  #s=2,
            color='orange',
            edgecolor='black',
            zorder=10)

    ## Add radar visibility:
    if radar_vis:
        arr_visi = np.load(visi_path)
        arr_visi[arr_visi < 9000] = 0
        arr_visi2 = morph.binary_opening(morph.binary_erosion(
            arr_visi, structure=np.ones((4, 4))),
                                         structure=np.ones((4, 4)))
        arr_visi[arr_visi < 9000] = np.nan
        axes.imshow(arr_visi, cmap="gray", alpha=0.2, extent=fig_extent)
        arr_visi[np.isnan(arr_visi)] = 1
    #axes.contour(arr_visi[::-1,:], levels=[2], cmap="gray", linewidths=2,
    #             linestyle="solid", alpha=0.5, extent=fig_extent)
    #arr_visi = arr_visi[::4, ::4]
    #ys, xs = np.mgrid[arr_visi.shape[0]:0:-1,
    #                  0:arr_visi.shape[1]]
    #axes.scatter(xs.flatten(), ys.flatten(), s=4,
    #             c=arr_visi.flatten().reshape(-1, 3), edgecolor='face')

    ## Add further elements:
    axes.set_xlim([255000, 965000])
    axes.set_ylim([-160000, 480000])
    axes.grid()
    axes.set_ylabel("CH1903 Northing")
    axes.set_xlabel("CH1903 Easting")
    axes.get_xaxis().set_major_formatter( \
        ticker.FuncFormatter(lambda x, p: format(int(x), ",").replace(',', "'")))
    axes.get_yaxis().set_major_formatter( \
        ticker.FuncFormatter(lambda x, p: format(int(x), ",").replace(',', "'")))
    plt.yticks(rotation=90, verticalalignment="center")
    return fig, axes, fig_extent
Example #21
0
def Fg_Extract(frame,type = 1,trun = 100): #extract foreground    
    
    global BG_old
    global F_case
    global M_case
    global B_case


    if type ==1:
        mu[:]       = alpha*frame + (1.0-alpha)*mu_old
        mu_old[:]   = mu
        sig2[:]     = alpha*(1.0*frame-mu)**2 + (1.0-alpha)*sig2_old
        sig2_old[:] = sig2        
        sig = sig2**0.5
        lmcs = lmc*sig
        bmcs = bmc*sig       
        sig_factor = 1
        #pdb.set_trace() 
        fg= (np.abs(1.0*frame-mu)[:,:,0]-sig_factor*sig[:,:,0]>0.0) +\
            (np.abs(1.0*frame-mu)[:,:,1]-sig_factor*sig[:,:,1]>0.0) +\
            (np.abs(1.0*frame-mu)[:,:,2]-sig_factor*sig[:,:,2]>0.0)
    elif type == 2:
        try:
            fg = np.abs(1.0*frame.mean(2)-BG)>50.0
        except:
            BG = pickle.load(open("Feb11_bg.pkl","rb"))
            BG = cv2.resize(BG,(0,0),fx = scale,fy=scale)
            fg = np.abs(1.0*frame.mean(2)-BG)>30.0

    elif type == 3 : #runningmean
        if len(vid)>trun:
            if (vid_idx-round(trun/2))<0:
                if F_case == 1: 
                    BG = BG_old
                else:
                    LB = 0
                    UB = trun-1
                    BG = array(vid[LB:UB+1]).mean(0)    
                    F_case = 1

            elif len(vid)-vid_idx<=(trun-1):
                if B_case == 1:
                    BG = BG_old
                else: 
                    LB = len(vid)-(trun-1)
                    UB = len(vid)
                    BG = array(vid[LB:UB+1]).mean(0)
                    B_case = 1                    
            else:
                if M_case == 1:
                    BG = BG_old + array(vid[vid_idx+int(trun/2)])/trun\
                                - array(vid[vid_idx-int(trun/2)+1])/trun   
                else:
                    LB = vid_idx-int(trun/2)+1
                    UB = vid_idx+int(trun/2)
                    #pdb.set_trace()
                    BG = array(vid[LB:UB+1]).mean(0)
                    M_case = 1

            fg = (np.abs(1.0*frame[:,:,0]-BG[:,:,0])>28.0)+\
                 (np.abs(1.0*frame[:,:,1]-BG[:,:,1])>28.0)+\
                 (np.abs(1.0*frame[:,:,2]-BG[:,:,2])>28.0)
        else:
            print('select truncation is larger then the sequence....')
            BG = array(vid).mean(0)
            fg = (np.abs(1.0*frame[:,:,0]-BG[:,:,0])>30.0)+\
                 (np.abs(1.0*frame[:,:,1]-BG[:,:,1])>30.0)+\
                 (np.abs(1.0*frame[:,:,2]-BG[:,:,2])>30.0)

    elif type==4: 
        if len(vid)>trun:

            if len(vid)-vid_idx<=(trun-1):
                if B_case == 1:
                    BG = BG_old
                else:
                    LB = len(vid)-(trun-1)
                    UB = len(vid)
                    BG = array(vid[LB:UB+1]).mean(0)
                    B_case = 1
            else:             
                if F_case == 1:
                    BG = BG_old + array(vid[vid_idx+trun-1])/trun\
                                - array(vid[vid_idx-1])/trun
                else:
                    LB = 0
                    UB = trun-1
                    BG = array(vid[LB:UB+1]).mean(0)
                    F_case = 1

            fg = (np.abs(1.0*frame[:,:,0]-BG[:,:,0])>30.0)+\
                 (np.abs(1.0*frame[:,:,1]-BG[:,:,1])>30.0)+\
                 (np.abs(1.0*frame[:,:,2]-BG[:,:,2])>30.0)
        else:
            print('select truncation is larger then the sequence....')
            BG = array(vid).mean(0)
            fg = (np.abs(1.0*frame[:,:,0]-BG[:,:,0])>30.0)+\
                 (np.abs(1.0*frame[:,:,1]-BG[:,:,1])>30.0)+\
                 (np.abs(1.0*frame[:,:,2]-BG[:,:,2])>30.0)


    if maskon:
       fg = fg*mask

    fgo = ndm.binary_opening(fg)
    fgf = ndm.binary_fill_holes(fgo)
    right.set_data(fgf)
    plt.draw()
    BG_old = BG       
    return fgf
Example #22
0
def basicProcessing(volume, sigma, order, output, mode, truncate):
    """This function shows an example of processing a volume.

    Parameters
    ----------
    volume : array
        Array in which different processing will be applied.

    sigma : int or sequence of int
        Standard deviation for Gaussian kernel.

    order : int or sequence of int
        An order of 0 corresponds to convolution with a Gaussian kernel. An order of 1, 2, or 3 corresponds to
        convolution with the first, second or third derivatives of a Gaussian. Higher order derivatives are
        not implemented.

    output : array or dtype
        The array in which to place the output, or the dtype of the returned array. By default an array of
        the same dtype as input will be created.

    mode : str
        The mode parameter determines how the input array is extended when the filter overlaps a border.

    truncate : float
        Truncate the filter at this many standard deviations. Default is 4.0.
    """


    #### Filters ###

    result = gaussian_filter(input=volume, sigma=sigma, order=order, output=output, mode=mode, truncate=truncate)

    val = threshold_otsu(result)
    print("val : {}".format(val))

    mask = np.zeros(volume.shape, dtype=np.int8)
    mask[volume > val] = 1
    #mask = mask.astype(int)

    print("mask shape: {}".format(mask.shape))
    print(mask)


    #### Morphological Operation ###

    # Opening removes small objects
    r1 = binary_opening(mask, structure=np.ones((3, 3, 3))).astype(np.int8)

    # Closing removes small holes
    r2 = binary_closing(r1, structure=np.ones((3, 3, 3))).astype(np.int8)


    # 3x3x3 structuring element with connectivity 4 or 8
    struct1 = generate_binary_structure(3, 1)   # no diagonal elements
    #struct1 = generate_binary_structure(3, 2)  # with diagonal elements
    ############struct1 = struct1.astype(int)
    print (struct1)


    #r3 = binary_dilation(r2).astype(int)
    r3 = binary_dilation(r2, structure=struct1).astype(int)    # using a structure element

    # Erosion removes objects smaller than the structure
    r4 = binary_erosion(r3, structure=np.ones((3, 3, 3))).astype(np.int8)


    #### Measurements ###

    struct2 = np.ones((3, 3, 3), dtype=np.int8)
    labeled_array, num_features = label(r4, structure=struct2)

    #print(labeled_array)
    print(num_features)

    return labeled_array, num_features
def preprocessing(image, smooth_size, folder):
    """
    'The image low contrast and under segmentation
    problem is not yet addressed by most of the researchers'
    
    'Other researchers also proposed different method to
    remedy the problem of watershed.  Li, El-
    moataz, Fadili, and Ruan, S. (2003) proposed an improved
    image segmentation approach based 
    on level set and mathematical morphology'
    
    THE SPHERES MUST BE ALMOST ALONG THE SAME PLANES IN Z DIRECTION
    IF THEY ARE TOUCHING AND OVERLAP, WHILE BEING ALMOST MERGED
    IT IS IMPOSSIBLE TO RESOLVE THEM
    
    ONE IDEA MIGHT BE TO DETECT CENTRES ALONG ONE AXIS AND THEN ANOTHER
    AFTER ALL THE CENTRES WERE FOUND COMBINE THEM SOMEHOW... 
    """
    from skimage.restoration import denoise_tv_chambolle

    dim = int(image.shape[0] / 50.)
    smoothed = rank.median(image, disk(smooth_size))
    #smoothed = denoise_tv_chambolle(image, weight=0.002)
    smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))

    pl.subplot(2, 3, 1)
    pl.title("after median")
    pl.imshow(smoothed)
    pl.gray()
    # If after smoothing the "dot" disappears
    # use the image value

    # TODO: wat do with thresh?
    try:
        im_max = smoothed.max()
        thresh = threshold_otsu(image)
    except:
        im_max = image.max()
        thresh = threshold_otsu(image)

    if im_max < thresh:
        labeled = np.zeros(smoothed.shape, dtype=np.int32)

    else:
        binary = smoothed > thresh

        # TODO: this array size is the fault of errors
        bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)
        bin_close = binary_closing(bin_open, np.ones((5, 5)), iterations=5)

        pl.subplot(2, 3, 2)
        pl.title("threshold")
        pl.imshow(binary, interpolation='nearest')
        pl.subplot(2, 3, 3)
        pl.title("opening")
        pl.imshow(bin_open, interpolation='nearest')
        pl.subplot(2, 3, 4)
        pl.title("closing")
        pl.imshow(bin_close, interpolation='nearest')

        distance = ndimage.distance_transform_edt(bin_open)
        local_maxi = peak_local_max(distance, indices=False, labels=bin_open)

        markers = ndimage.label(local_maxi)[0]

        labeled = watershed(-distance, markers, mask=bin_open)
        pl.subplot(2, 3, 5)
        pl.title("label")
        pl.imshow(labeled)
        #pl.show()
        pl.savefig(folder)
        pl.close('all')

        #misc.imsave(folder, labeled)


#         labels_rw = random_walker(bin_close, markers, mode='cg_mg')
#
#         pl.imshow(labels_rw, interpolation='nearest')
#         pl.show()

    return labeled
Example #24
0
# get data mask from index and volume
if args.index is not None:
    volume = volume_nib.get_data()
    mask = np.zeros_like(volume)
    for index in args.index:
        mask = np.logical_or(mask, volume == index)
else:
    mask = volume_nib.get_data()

# Basic morphology
if args.erosion is not None:
    mask = morphology.binary_erosion(mask, iterations=args.erosion)
if args.dilation is not None:
    mask = morphology.binary_dilation(mask, iterations=args.dilation)
if args.opening is not None:
    mask = morphology.binary_opening(mask, iterations=args.opening)
if args.closing is not None:
    mask = morphology.binary_closing(mask, iterations=args.closing)
    

# Label fill
if args.max_label:
    label_objects, nb_labels = ndi.label(mask)
    sizes = np.bincount(label_objects.ravel())
    sizes[0] = 0 # ingnore zero voxel
    max_label = np.argmax(sizes)
    max_mask = (label_objects == max_label)
    mask = max_mask

# Extract marching cube surface from mask
vertices, triangles = mcubes.marching_cubes(mask, args.value)
Example #25
0
def count_items(img1_f, it):
    im_open = morphology.binary_opening( \
      img1_f,ones((9,5)), iterations=it)
    labels_open, nbr_objects_open = measurements.label(im_open)
    return labels_open
Example #26
0
def bright_field_segmentation(stack, debug=False):
    """Segments cells out of bright field microscopy images
        PARAMS:
            stack (np.array): 
            debug (bool): True if diagnostics plots are needed
        RETURNS:
            mask (np.array): stack of maskw where the cells were detected
    """

    stack = enhance_contrast(stack, 'hist-equal')

    # Gradient computation and gaussian filtering
    mask = np.empty_like(stack)
    for i in range(stack.shape[0]):
        mask[i] = ndimage.gaussian_filter(mask[i], 1.5)
        mask[i] = skimage.filters.sobel(stack[i])

    # Morphology
    print("Morphology...")
    # mask = morphology.grey_erosion(mask, structure=np.ones((2, 1,1)))
    print(mask.min(), mask.max())
    mask = morphology.binary_closing((mask < 20 / 255).astype('uint8'),
                                     structure=np.ones((2, 2, 2)))
    mask = morphology.binary_erosion(mask,
                                     structure=structural_element(
                                         'circle', (2, 5, 5)))
    mask = morphology.binary_opening(mask,
                                     structure=structural_element(
                                         'circle', (2, 5, 5)))
    mask = morphology.binary_closing(mask,
                                     structure=structural_element(
                                         'circle', (3, 10, 10)))
    # mask = morphology.binary_closing((mask > 1.35).astype('uint8'), structure=structural_element('square', (3, 10, 10)))
    # mask = morphology.binary_opening(mask, structure=np.ones((1, 5,5)))

    # stack = enhance_contrast(stack, 'hist-equal')

    # # Gradient computation and gaussian filtering
    # mask = np.zeros_like(stack)
    # for i in range(stack.shape[0]):
    #     mask[i] = skimage.filters.sobel(stack[i])
    #     mask[i] = ndimage.gaussian_filter(mask[i], 1.5)

    # # Morphology
    # print("Morphology...")
    # mask = morphology.binary_dilation(mask < 0.01, structure=structural_element('circle', (3,10,10)))
    # mask = morphology.binary_closing(mask, structure=structural_element('cross', (3,13,13)))
    # mask = morphology.binary_opening(mask, structure=structural_element('circle', (1,15,15)))

    imageio.mimsave(
        'stack.gif',
        np.concatenate([(stack * 255).astype('uint8'),
                        ((mask) * 255).astype('uint8')],
                       axis=-1))
    raise KeyboardInterrupt

    if debug:
        imageio.mimsave(
            'brigth_field_seg.gif',
            np.concatenate([(stack * 255).astype('uint8'),
                            ((mask) * 255).astype('uint8')],
                           axis=-1))

    return mask.astype('uint8')
Example #27
0
from scipy.ndimage import morphology, measurements
from PIL import Image
import numpy as np
import pylab as plt
import os

parent_dir = os.path.split(os.getcwd())[0]
# im = np.array(Image.open(os.getcwd() + '/images/data/ceramic-houses_t0.png').convert('L'))
im = np.array(Image.open(os.getcwd() + '/images/data/houses.png').convert('L'))
im = 1 * (im < 128)

im2 = morphology.binary_opening(im, np.ones((9, 5)), iterations=2)

lable, num_object = measurements.label(im2)

plt.gray()
plt.imshow(im2)
plt.title('Number of object: {}'.format(num_object))

plt.figure()
plt.imshow(lable)

plt.show()
Example #28
0
def seg_pose(csvpath, dump):
    THRESHOLD_EYE = 0.8
    THRESHOLD_EAR = 0.8
    THRESHOLD_SHOULDER = 0.75
    THRESHOLD_HIP = 0.2
    THRESHOLD_KNEE = 0.1
    MIN_SEGMENT_LENGTH = 30
    EXPAND_SEGMENT_LENGTH = 9
    MED_WND = 9
    MAX_SEGMENTS_PER_HOUR = 8

    # read csv
    df = dict()
    with open(csvpath) as f:
        columns = f.readline().split(',')
        for col in columns:
            df[col] = []
        for line in f:
            for idx, val in enumerate(line.split(',')):
                df[columns[idx]].append(float(val))
        for col in columns:
            df[col] = np.array(df[col])

    t = df['pts']

    if len(t) <= 10:
        print(f'Too few body pose samples, ignored. {csvpath}',
              file=sys.stderr)
        exit(0)

    intra_frame_interval = np.diff(t).mean()

    print((f'        frames: {len(t)}\n'
           f'      duration: {sec_to_time_repr(t.max())}\n'
           f'frame interval: {round(intra_frame_interval, 3)}/s'),
          file=sys.stderr)

    # eye width
    eye2, eye = smooth(np.abs(df['leftEyeX'] - df['rightEyeX']), MED_WND)
    eye_c2, eye_c = smooth((df['leftEye'] + df['rightEye']) / 2, MED_WND)
    mode_eye = find_mode(eye, t)

    # ear width
    ear2, ear = smooth(np.abs(df['leftEarX'] - df['rightEarX']), MED_WND)
    ear_c2, ear_c = smooth((df['leftEar'] + df['rightEar']) / 2, MED_WND)
    mode_ear = find_mode(ear, t)

    # shoulder width
    sld2, sld = smooth(np.abs(df['leftShoulderX'] - df['rightShoulderX']),
                       MED_WND)
    sld_c2, sld_c = smooth((df['leftShoulder'] + df['rightShoulder']) / 2,
                           MED_WND)
    mode_sld = find_mode(sld, t)

    # knee detection confidence
    knee_c2, knee_c = smooth((df['leftKnee'] + df['rightKnee']) / 2, MED_WND)
    mode_knee_c = find_mode(knee_c, t)

    # hip detection confidence
    hip_c2, hip_c = smooth((df['leftHip'] + df['rightHip']) / 2, MED_WND)
    mode_hip_c = find_mode(hip_c, t)

    # score
    decision_eye = mode_eye * THRESHOLD_EYE
    decision_ear = mode_ear * THRESHOLD_EAR
    decision_sld = mode_sld * THRESHOLD_SHOULDER,
    decision_hip = (1 - mode_hip_c) * THRESHOLD_HIP + mode_hip_c
    decision_knee = (1 - mode_knee_c) * THRESHOLD_KNEE + mode_knee_c

    weight = np.array([[0.25], [0.35], [0.4], [0.5], [1]])
    feat_score = [
        eye2 < decision_eye,
        ear2 < decision_ear,
        sld2 < decision_sld,
        hip_c2 > decision_hip,
        knee_c2 > decision_knee,
    ]
    score = np.sum(np.multiply(weight, feat_score), axis=0)

    # make decision
    segment_frames = int(round(MIN_SEGMENT_LENGTH / intra_frame_interval))
    expand_frames = int(round(EXPAND_SEGMENT_LENGTH / intra_frame_interval))
    expand_struct = [True] * expand_frames
    filter_struct = [True] * segment_frames

    decision = score > 0.8
    decision = morphology.binary_closing(decision, filter_struct)
    decision = morphology.binary_opening(decision, filter_struct)
    decision = morphology.binary_dilation(decision, expand_struct)

    # scan through decision score, build segment list
    segments = []
    ignored_segments = []
    cur_start = None

    for i in range(decision.shape[0]):
        # mark start position
        if not cur_start and decision[i]:
            cur_start = t[i], i

        # mark end position, do extra checks
        if cur_start and not decision[i]:
            # check duration is reasonable
            # check segment is constructed with sufficient samples
            # detection in samples are dynamic (e.g. not from a static photo)

            start_t, start_i = cur_start
            end_t, end_i = t[i], i
            duration = end_t - start_t

            cur_start = None  # unmark start position for next iteration

            n_actual_samples = end_i - start_i
            n_expected_samples = floor(
                (end_t - start_t) / intra_frame_interval) + 1
            sample_ratio = n_actual_samples / n_expected_samples

            volatility = np.mean([
                compute_volatility(eye[start_i:end_i + 1]),
                compute_volatility(ear[start_i:end_i + 1]),
                compute_volatility(sld[start_i:end_i + 1]),
            ])

            if duration > 600:
                # most likely misdetection
                # a typical dance should not last more than 10 minutes
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}: too long duration, time = {round(duration)} secs',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'too long', 'T'))
                continue

            if sample_ratio < 0.3:
                # most likely static image
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}: too few valid samples, ratio = {round(sample_ratio, 2)}',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'valid samples', 'S'))
                continue

            if volatility < 0.08:
                print(
                    f'Ignore segment {round(start_t, 3)} to {round(end_t, 3)}, too small volatility, r_vol = {volatility.round(5)}',
                    file=sys.stderr)
                ignored_segments.append((start_t, end_t, 'volatility', 'V'))
                continue

            segments.append((start_t, end_t))

    if len(segments) > (np.max(t) - np.min(t)) / 3600 * MAX_SEGMENTS_PER_HOUR:
        print(f'Too many segments, possibly wrong type. {csvpath}',
              file=sys.stderr)
        print(f'Ignoring all segments for automatic extraction.',
              file=sys.stderr)
        for start_t, end_t in segments:
            print(
                f'    {{"start_t": {round(start_t, 3)}, "end_t": {round(end_t, 3)}}}',
                file=sys.stderr)
        segments = []

    for start_t, end_t in segments:
        print(
            f'{{"start_t": {round(start_t, 3)}, "end_t": {round(end_t, 3)}}}')

    if dump:
        try:
            import matplotlib
            matplotlib.use('Agg')
            matplotlib.rcParams.update({'font.size': 18})

            import matplotlib.pyplot as plt

            fig, ax = plt.subplots(6, 1, figsize=(36, 24), sharex=True)
            fig.suptitle(os.path.basename(csvpath))

            f_eye = plot_yc(ax[0], eye, eye2, eye_c, eye_c2, t, mode_eye,
                            decision_eye, 'eye')
            f_ear = plot_yc(ax[1], ear, ear2, ear_c, ear_c2, t, mode_ear,
                            decision_ear, 'ear')
            f_sld = plot_yc(ax[2], sld, sld2, sld_c, sld_c2, t, mode_sld,
                            decision_sld, 'shoulder')
            f_hip = plot_c(ax[3], hip_c, hip_c2, t, mode_hip_c, decision_hip,
                           'hip')
            f_knee = plot_c(ax[4], knee_c, knee_c2, t, mode_knee_c,
                            decision_knee, 'knee')
            f_decision = plot_c(ax[5], score,
                                decision * score.max() * 1.1, t, None, None,
                                'decision')

            for start_t, end_t, reason, code in ignored_segments:
                m_time = (end_t + start_t) / 2
                ax[5].text(m_time,
                           1.55,
                           code,
                           horizontalalignment='center',
                           verticalalignment='bottom',
                           color='red',
                           fontsize=26)
                ax[5].fill_between([start_t, end_t],
                                   0,
                                   1.5,
                                   color=[1, 0.6, 0.6])

            labels = [sec_to_time_repr(t) for t in ax[5].get_xticks()]
            ax[5].set_xticklabels(labels)
            ax[5].tick_params(axis='x', length=8, width=2, colors='black')
            fig.tight_layout()

            if not dump.endswith('.png'):
                png_path = dump + '.png'
            else:
                png_path = dump

            fig.savefig(png_path,
                        dpi=144,
                        optimize=True,
                        facecolor='w',
                        format='png')
        except:
            print(f'Fail to dump analysis diagram, error:', file=sys.stderr)
            print(traceback.format_exc(), file=sys.stderr)
            try:
                trace_path = dump + '.png'
                with open(trace_path, 'w') as f:
                    print(f'Fail to dump analysis diagram, error:', file=f)
                    print(traceback.format_exc(), file=f)
            except:
                print(f'Fail to write dump trace, error:', file=sys.stderr)
                print(traceback.format_exc(), file=sys.stderr)
Example #29
0
start = 15225
end = 15240
buf = double(vid[start-trun/2:start+trun/2+1])
ind = range(trun+1)
ind.pop(trun/2)
th =40

#build tree filter
Tf = (vid[0:100,:,:,1].mean(0)>100) & (vid[0:100,:,:,0].mean(0)<100)
Tf = ndm.binary_closing(Tf,structure=np.ones((4,4)))
Tf = ~ndm.binary_fill_holes(Tf)


for ii in range(start,end):
    print(ii)
    left.set_data(vid[ii][:,:,::-1])
    #pdb.set_trace()
     
    BG[:] = np.abs(buf[ind]-buf[trun/2]).mean(3).mean(0)>40. 
    #fg = np.abs(buf[ind].mean(3)-buf[trun/2].mean(2)).mean(0)>40.
    
    fgo = ndm.binary_opening(BG*Tf)
    fgf = ndm.binary_fill_holes(fgo)

    right.set_data(mf(fgf,5))
    plt.draw()
     
    buf = np.roll(buf,-1,0)
    buf[-1] = vid[ii+trun/2+1]

Example #30
0
def open(mask, disk_size, iterations):
    return binary_opening(mask, disk(disk_size), iterations=iterations)
Example #31
0
def cluster_analysis(_imagefile, _anomalyfile, _matfile, write_dir, _junk,
                     file_name, permanent_dir):
    _feature_data = []
    _hist_data = []
    prefix = re.split('IR_|.pgm', _imagefile)[0]
    # print(prefix);
    postfix = re.split('IR_|.pgm', _imagefile)[1]
    # print(postfix);
    # _image = imread(_imagefile)
    # Get the region properties of the whole fruit
    # We need these to express the properties of anomaly region as ratios
    # of the whole fruit surface
    (_image, mask) = segment.segment(_imagefile, _matfile)
    _image = np.asarray(_image)
    mask = np.asarray(mask)
    mask = mask.astype(int)
    _props = measure.regionprops(mask, _image)
    plt.imshow(_image, cmap='gray')
    plt.close()
    if len(_props) == 0:
        return None
    else:
        _props = measure.regionprops(mask, _image)[0]
        # To store the sample points (pixel coordinates + pixel value)
        _datapoints = []
        # To store only the pixel coordinates
        _coords = []

        # turn on interactive mode. Required in VS for displaying figures interactively
        # during script execution
        # plt.ion()

        # Read the file
        with open(_anomalyfile, 'rU') as inp:
            reader = csv.reader(inp)
            for row in reader:
                _datapoints.append([row[1], row[2], row[0]])
                _coords.append([row[1], row[2]])

        # Convert the values from string to integers using this hack I found
        # on Stack Overflow
        _datapoints = map(myFloat, _datapoints)
        _coords = map(myFloat, _coords)
        _coords = map(myInt, _coords)

        # Convert the lists into arrays
        _datapoints = np.asarray(_datapoints)
        _coords = np.asarray(_coords)

        # Normalize the data points (0 mean and 1 standard deviation)
        _center_xform = StandardScaler().fit_transform(_datapoints)

        # Do the clustering
        db = DBSCAN(eps=0.3, min_samples=20).fit(_center_xform)

        labels = db.labels_
        labels_set = set(labels)
        #print labels_set
        # Remove the anomalies label
        labels_set.discard(-1)

        # Non-empty clusters found
        nclusters = 0

        for k in labels_set:
            # Get points in the current cluster
            members = (labels == k)
            members = _coords[members]

            # Form a binary image representing the cluster as points with value 1
            bw = np.zeros((480, 640), dtype=bool)
            for c in members:
                # Array indexing needs a tuple lists don't work
                xy = tuple(c)
                bw[xy] = 1

            # Merge the points into one large region
            bw = morphology.binary_closing(bw, np.ones((3, 3)), iterations=6)
            bw = morphology.binary_opening(bw, np.ones((3, 3)), iterations=3)
            bw = morphology.binary_fill_holes(bw)
            # Remove very small regions
            skimorph.remove_small_objects(bw, in_place=True)

            # Need to do this to avoid error in latest skimage library
            bw = bw.astype(int)

            # Binary image contains a region?
            if bw.any():
                nclusters += 1

                points = bw.nonzero()
                values = _image[points]
                cluster_props = measure.regionprops(bw, _image)[0]

                features = {}

                # These two are not features; they are only used for plotting
                features['points'] = points
                features['values'] = values

                # Eccentricity of the ellipse
                features['eccentricity'] = cluster_props.eccentricity

                # Diameter of the circle with the same area as the region
                # Normalized using image width
                features[
                    'eq_diameter'] = cluster_props.equivalent_diameter / 640

                # Number of objects - number of holes (8 connectivity)
                features['euler_number'] = cluster_props.euler_number
                # Fraction of area of entire fruit occupied
                features['area'] = 1. * cluster_props.area / _props.area

                # Ratio of pixels in the region to pixels of the convex hull
                features['solidity'] = cluster_props.solidity

                # Ellipse properties
                features[
                    'major_axis'] = 1. * cluster_props.major_axis_length / _props.major_axis_length
                features[
                    'minor_axis'] = 1. * cluster_props.minor_axis_length / _props.minor_axis_length

                # Normalized mean pixel value and standard deviation
                features[
                    'mean_value'] = 1. * cluster_props.mean_intensity / _props.max_intensity
                features['std'] = np.std(values) / _props.max_intensity

                hist = values.copy()
                hist = hist - _props.min_intensity
                hist = 256. * hist / _props.max_intensity
                hist = hist.astype(int)
                bins = np.bincount(hist, minlength=256)
                hist = []
                for i in range(0, 32):
                    start = i * 4
                    end = start + 4
                    v = 1. * sum(bins[start:end]) / values.size
                    hist.append(v)
                    features['hist' + str(i)] = v

                plt.figure()
                plt.bar(np.arange(32), hist)
                #plt.savefig(prefix + postfix + "_Histogram_" + str(nclusters) + ".png")
                print("->->->->->", _junk + file_name + postfix)
                plt.savefig(_junk + file_name + "_" + postfix + "_Histogram_" +
                            str(nclusters) + ".png")
                _feature_data.append(features)

            plt.close()
    # for cluster in _feature_data:
    #    points = cluster['points']
    #    im = np.zeros((480, 640), dtype=int)
    #    im[points] = cluster['values']
    #    plt.figure()
    #    plt.imshow(im, cmap='gray')

    # plt.show()
    n1 = csvwrite(_imagefile, _feature_data, permanent_dir)
    # n2 = csvwrite_histo(_imagefile, _hist_data);
    # mergeCSV(n1, n2);
    return _feature_data
Example #32
0
    img10 = img99.clone()
    img10.removeDim('t')
    img10.data = np.max(img99.data, axis=0)
    imgG = img10.clone()
    imgG.data = gaussian_filter(imgG.data, [0, sspread, sspread])
    img11 = img10.clone()
    for k in range(50):
        for n in range(img10.data.shape[0]):
            img11.data[n] = morph.binary_closing(
                img10.data[n], iterations=1).astype('uint8') * 255
        if np.all(img11.data == img10.data):
            break
        else:
            img10.data = img11.data.copy()
    for n in range(img11.data.shape[0]):
        img11.data[n] = morph.binary_opening(
            img11.data[n], iterations=1).astype('uint8') * 255
    img11.data[:] = 0
    img11.data[img10.data > 1] = imgG.data[img10.data > 1]
    img11.data = gaussian_filter(img11.data, [0, sspread, sspread])
    img11.data = img11.data / np.percentile(img11.data[img11.data > 0],
                                            99) * 255
    img11.save(savePath + '/' + case + '/segmentation_closing/img')
    img11.mimwrite2D(savePath + '/' + case + '/segmentation_closing',
                     axes=('h', 'y', 'x'))


def combineAndSyncSlices(savePath,
                         focusSlice,
                         guessPeriod,
                         stackstr='',
                         translateToStack=True,
Example #33
0
def mark_orders(
    im,
    min_cluster=500,
    filter_size=120,
    noise=8,
    opower=4,
    border_width=5,
    degree_before_merge=2,
    regularization=0,
    closing_shape=(5, 5),
    opening_shape=(2, 2),
    plot=False,
    manual=True,
    auto_merge_threshold=0.9,
    merge_min_threshold=0.1,
    sigma=2,
):
    """ Identify and trace orders

    Parameters
    ----------
    im : array[nrow, ncol]
        order definition image
    min_cluster : int, optional
        minimum cluster size in pixels (default: 500)
    filter_size : int, optional
        size of the running filter (default: 120)
    noise : float, optional
        noise to filter out (default: 8)
    opower : int, optional
        polynomial degree of the order fit (default: 4)
    border_width : int, optional
        number of pixels at the bottom and top borders of the image to ignore for order tracing (default: 5)
    plot : bool, optional
        wether to plot the final order fits (default: False)
    manual : bool, optional
        wether to manually select clusters to merge (strongly recommended) (default: True)

    Returns
    -------
    orders : array[nord, opower+1]
        order tracing coefficients (in numpy order, i.e. largest exponent first)
    """

    # Convert to signed integer, to avoid underflow problems
    im = np.asarray(im)
    im = im.astype(int)

    if filter_size is None:
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        threshold = np.percentile(col, 90)
        npeaks = find_peaks(col, height=threshold)[0].size
        filter_size = im.shape[0] // npeaks
        logging.info("Median filter size, estimated: %i", filter_size)
    elif filter_size <= 0:
        raise ValueError(f"Expected filter size > 0, but got {filter_size}")

    if border_width is None:
        # find width of orders, based on central column
        col = im[:, im.shape[0] // 2]
        col = median_filter(col, 5)
        idx = np.argmax(col)
        width = peak_widths(col, [idx])[0][0]
        border_width = int(np.ceil(width))
        logging.info("Image border width, estimated: %i", border_width)
    elif border_width < 0:
        raise ValueError(f"Expected border width > 0, but got {border_width}")

    if min_cluster is None:
        min_cluster = im.shape[1] // 4
        logging.info("Minimum cluster size, estimated: %i", min_cluster)
    elif not np.isscalar(min_cluster):
        raise TypeError(
            f"Expected scalar minimum cluster size, but got {min_cluster}")

    # blur image along columns, and use the median + blurred + noise as threshold
    blurred = gaussian_filter1d(im, filter_size, axis=0)

    if noise is None:
        tmp = np.abs(blurred.flatten())
        noise = np.percentile(tmp, 5)
        logging.info("Background noise, estimated: %f", noise)
    elif not np.isscalar(noise):
        raise TypeError(f"Expected scalar noise level, but got {noise}")

    threshold = np.ma.median(blurred - im, axis=0)
    mask = im > blurred + noise + np.abs(threshold)
    # remove borders
    if border_width != 0:
        mask[:border_width, :] = mask[-border_width:, :] = False
    # remove masked areas with no clusters
    mask = np.ma.filled(mask, fill_value=False)
    # close gaps inbetween clusters
    struct = np.full(closing_shape, 1)
    mask = morphology.binary_closing(mask, struct, border_value=1)
    # remove small lonely clusters
    struct = np.full(opening_shape, 1)
    # struct = morphology.generate_binary_structure(2, 1)
    mask = morphology.binary_opening(mask, struct)

    # label clusters
    clusters, _ = label(mask)

    # remove small clusters
    sizes = np.bincount(clusters.ravel())
    mask_sizes = sizes > min_cluster
    mask_sizes[
        0] = True  # This is the background, which we don't need to remove
    for i in np.arange(len(sizes))[~mask_sizes]:
        clusters[clusters == i] = 0

    # # Reorganize x, y, clusters into a more convenient "pythonic" format
    # # x, y become dictionaries, with an entry for each order
    # # n is just a list of all orders (ignore cluster == 0)
    n = np.unique(clusters)
    n = n[n != 0]
    x = {i: np.where(clusters == c)[0] for i, c in enumerate(n)}
    y = {i: np.where(clusters == c)[1] for i, c in enumerate(n)}

    def best_fit_degree(x, y):
        L1 = np.sum((np.polyval(np.polyfit(y, x, 1), y) - x)**2)
        L2 = np.sum((np.polyval(np.polyfit(y, x, 2), y) - x)**2)

        # aic1 = 2 + 2 * np.log(L1) + 4 / (x.size - 2)
        # aic2 = 4 + 2 * np.log(L2) + 12 / (x.size - 3)

        if L1 < L2:
            return 1
        else:
            return 2

    if sigma > 0:
        degree = {i: best_fit_degree(x[i], y[i]) for i in x.keys()}
        bias = {i: np.polyfit(y[i], x[i], deg=degree[i])[-1] for i in x.keys()}
        n = list(x.keys())
        yt = np.concatenate([y[i] for i in n])
        xt = np.concatenate([x[i] - bias[i] for i in n])
        coef = np.polyfit(yt, xt, deg=degree_before_merge)

        res = np.polyval(coef, yt)
        cutoff = sigma * (res - xt).std()

        # DEBUG plot
        # uy = np.unique(yt)
        # mask = np.abs(res - xt) > cutoff
        # plt.plot(yt, xt, ".")
        # plt.plot(yt[mask], xt[mask], "r.")
        # plt.plot(uy, np.polyval(coef, uy))
        # plt.show()
        #

        m = {
            i: np.abs(np.polyval(coef, y[i]) - (x[i] - bias[i])) < cutoff
            for i in x.keys()
        }

        k = max(x.keys()) + 1
        for i in range(1, k):
            new_img = np.zeros(im.shape, dtype=int)
            new_img[x[i][~m[i]], y[i][~m[i]]] = 1
            clusters, _ = label(new_img)

            x[i] = x[i][m[i]]
            y[i] = y[i][m[i]]
            if len(x[i]) == 0:
                del x[i], y[i]

            nnew = np.max(clusters)
            if nnew != 0:
                xidx, yidx = np.indices(im.shape)
                for j in range(1, nnew + 1):
                    xn = xidx[clusters == j]
                    yn = yidx[clusters == j]
                    if xn.size >= min_cluster:
                        x[k] = xn
                        y[k] = yn
                        k += 1
                # plt.imshow(clusters, origin="lower")
                # plt.show()

    if plot:
        plt.title("Identified clusters")
        plt.xlabel("x [pixel]")
        plt.ylabel("y [pixel]")
        clusters = np.ma.zeros(im.shape, dtype=int)
        for i in x.keys():
            clusters[x[i], y[i]] = i + 1
        clusters[clusters == 0] = np.ma.masked

        plt.imshow(clusters, origin="lower", cmap="prism")
        plt.show()

    # Merge clusters, if there are even any possible mergers left
    x, y, n = merge_clusters(
        im,
        x,
        y,
        n,
        manual=manual,
        deg=degree_before_merge,
        auto_merge_threshold=auto_merge_threshold,
        merge_min_threshold=merge_min_threshold,
    )

    orders = fit_polynomials_to_clusters(x, y, n, opower)

    # sort orders from bottom to top, using relative position

    def compare(i, j):
        _, xi, i_left, i_right = i
        _, xj, j_left, j_right = j

        if i_right < j_left or j_right < i_left:
            return xi.mean() - xj.mean()

        left = max(i_left, j_left)
        right = min(i_right, j_right)

        return xi[left:right].mean() - xj[left:right].mean()

    xp = np.arange(im.shape[1])
    keys = [(c, np.polyval(orders[c], xp), y[c].min(), y[c].max())
            for c in x.keys()]
    keys = sorted(keys, key=cmp_to_key(compare))
    key = [k[0] for k in keys]

    n = np.arange(len(n), dtype=int)
    x = {c: x[key[c]] for c in n}
    y = {c: y[key[c]] for c in n}
    orders = np.array([orders[key[c]] for c in n])

    column_range = np.array([[np.min(y[i]), np.max(y[i]) + 1] for i in n])

    if plot:
        plot_orders(im, x, y, n, orders, column_range)

    return orders, column_range
Example #34
0
    def deblendDonut(self, imgToDeblend, iniGuessXY):
        """Deblend the donut image.

        Parameters
        ----------
        imgToDeblend : numpy.ndarray
            Image to deblend.
        iniGuessXY : list[tuple]
            The list contains the initial guess of (x, y) positions of
            neighboring stars as [star 1, star 2, etc.].

        Returns
        -------
        numpy.ndarray
            Deblended donut image.
        float
            Position x of donut in pixel.
        float
            Position y of donut in pixel.

        Raises
        ------
        ValueError
            Only support to deblend single neighboring star.
        """

        # Check the number of neighboring star
        if len(iniGuessXY) != 1:
            raise ValueError("Only support to deblend single neighboring star.")

        # Get the initial guess of the brightest donut
        imgBinary = self._centroidFind.getImgBinary(imgToDeblend)
        realcx, realcy, realR = self._centroidFind.getCenterAndRfromImgBinary(imgBinary)

        # Check the image quality
        if not realcx:
            return np.array([]), realcx, realcy

        # Remove the salt and pepper noise
        imgBinary = binary_opening(imgBinary).astype(float)
        imgBinary = binary_closing(imgBinary).astype(float)

        # Get the binary image by the adaptive threshold method
        imgBinaryAdapt = self._getImgBinaryAdapt(imgToDeblend)

        # Calculate the system error by only taking the background signal
        bg1D = imgToDeblend.flatten()
        bgImgBinary1D = imgBinaryAdapt.flatten()
        background = bg1D[bgImgBinary1D == 0]
        bgPhist, binEdges = np.histogram(background, bins=256)
        sysError = np.mean(binEdges[0:2])

        # Remove the system error
        noSysErrImage = imgToDeblend - sysError
        noSysErrImage[noSysErrImage < 0] = 0

        # Get the residure map
        resImgBinary = imgBinaryAdapt - imgBinary

        # Compensate the zero element for subtraction
        resImgBinary[np.where(resImgBinary < 0)] = 0

        # Remove the salt and pepper noise noise of resImgBinary
        resImgBinary = binary_opening(resImgBinary).astype(float)

        # Calculate the shifts of x and y
        # Only support to deblend single neighboring star at this moment
        starXyNbr = iniGuessXY[0]
        x0 = int(starXyNbr[0] - realcx)
        y0 = int(starXyNbr[1] - realcy)

        xoptNeighbor = nelderMeadModify(
            self._funcResidue,
            np.array([x0, y0]),
            args=(imgBinary, resImgBinary),
            step=15,
        )

        # Shift the main donut image to fitted position of neighboring star
        fitImgBinary = shift(
            imgBinary, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])]
        )

        # Handle the numerical error of shift. Regenerate a binary image.
        fitImgBinary[fitImgBinary > 0.5] = 1
        fitImgBinary[fitImgBinary < 0.5] = 0

        # Get the overlap region between main donut and neighboring donut
        imgOverlapBinary = imgBinary + fitImgBinary
        imgOverlapBinary[imgOverlapBinary < 1.5] = 0
        imgOverlapBinary[imgOverlapBinary > 1.5] = 1

        # Get the overall binary image
        imgAllBinary = imgBinary + fitImgBinary
        imgAllBinary[imgAllBinary > 1] = 1

        # Get the reference image for the fitting
        imgRef = noSysErrImage * imgAllBinary

        # Calculate the magnitude ratio of image
        imgMainDonut = noSysErrImage * imgBinary
        imgFit = shift(imgMainDonut, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])])

        xoptMagNeighbor = minimize_scalar(
            self._funcMag,
            bounds=(0, 1),
            method="bounded",
            args=(imgMainDonut, imgOverlapBinary, imgFit, imgRef, xoptNeighbor[0]),
        )

        imgDeblend = imgMainDonut - xoptMagNeighbor.x * imgFit * imgOverlapBinary

        # Repair the boundary of image
        imgDeblend = self._repairBoundary(imgOverlapBinary, imgBinary, imgDeblend)

        # Calculate the centroid position of donut
        realcy, realcx = center_of_mass(imgBinary)

        return imgDeblend, realcx, realcy
Example #35
0
def accall(accresult, base_index=0, slice_thickness=5.0, more_evaluate=False):
    sum1_1 = accresult.acc1_1_tt * 2 + accresult.acc1_1_tf + accresult.acc1_1_ft
    sum2_1 = accresult.acc2_1_tt * 2 + accresult.acc2_1_tf + accresult.acc2_1_ft
    sum_2 = accresult.acc_2_tt * 2 + accresult.acc_2_tf + accresult.acc_2_ft
    # don't divide 0
    if sum1_1 < 0.01:
        acc1_1 = 0.5
    else:
        acc1_1 = accresult.acc1_1_tt / sum1_1

    acc2_1 = 0
    if sum2_1 < 0.01:
        acc2_1 = 0.5
    else:
        acc2_1 = accresult.acc2_1_tt / sum2_1

    acc_2 = 0
    if sum_2 < 0.01:
        acc_2 = 0.5
    else:
        acc_2 = accresult.acc_2_tt / sum_2

    acc1 = acc1_1 + acc_2
    acc2 = acc2_1 + acc_2
    if more_evaluate == True:
        offsetslice = int(10.0 // slice_thickness)
        small_index = 0
        big_index = len(accresult.foreground_number) - 1
        for i in range(accresult.center_index - base_index, -1, -1):
            if accresult.foreground_number[
                    i] == 0 and accresult.foreground_number[max(
                        [i - 1, 0])] == 0 and accresult.foreground_number[max(
                            [i - 2, 0])] == 0:
                small_index = max([i - offsetslice, 0])
                break
        for i in range(accresult.center_index - base_index,
                       len(accresult.foreground_number)):
            if accresult.foreground_number[
                    i] == 0 and accresult.foreground_number[min([
                        i + 1, len(accresult.foreground_number) - 1
                    ])] == 0 and accresult.foreground_number[min(
                        [i + 2, len(accresult.foreground_number) - 1])] == 0:
                big_index = min(
                    [i + offsetslice,
                     len(accresult.foreground_number) - 1])
                break
        resultzeroslice = np.ones(len(accresult.foreground_number))
        resultzeroslice[small_index:big_index + 1] = 0
        labelslice = np.ones(len(accresult.foreground_number))
        labelpoint = np.array(accresult.labelforeground_number)
        labelslice[np.equal(labelpoint, 0)] = 0
        incorrect_slice_number = np.sum(labelslice * resultzeroslice)
        resultoneslice = np.ones(len(accresult.foreground_number))
        resultoneslice[np.equal(resultzeroslice, 1)] = 0
        accresult.label_point_number.append(labelpoint)
        key_slice = np.zeros(len(accresult.foreground_number))
        for i in range(16):
            index = (big_index - small_index) * i // 16 + small_index
            key_slice[index] = 1
        accresult.key_slice.append(key_slice)
        accresult.valid_slice.append(labelslice)
        # in fact the the valid slice is follows:
        # accresult.valid_slice.append(resultoneslice)
        # print('invalid slice = %d' % incorrect_slice_number)
        if incorrect_slice_number > 0:
            accresult.num_of_sum += incorrect_slice_number
        im = morphology.binary_opening(accresult.center_image,
                                       np.ones((3, 3)),
                                       iterations=3)
        im2 = morphology.binary_closing(im, np.ones((3, 3)), iterations=4)
        # 4
        labels, nbr_objects = measurements.label(im2)
        label_1 = np.zeros_like(labels)
        label_1[np.equal(labels, 1)] = 1
        label_2 = np.zeros_like(labels)
        label_2[np.equal(labels, 2)] = 1
        # if nbr_objects != 2:
        #     accresult.is_valid.append(False)
        #     label_2 = label_1
        # elif nbr_objects == 2:
        #     accresult.is_valid.append(True)
        if nbr_objects < 2:
            im = morphology.binary_opening(accresult.center_kidney_image,
                                           np.ones((3, 3)),
                                           iterations=3)
            im2 = morphology.binary_closing(im, np.ones((3, 3)), iterations=4)
            labels2, nbr_objects2 = measurements.label(im2)
            if nbr_objects2 == 2:
                label_1[np.equal(labels2, 1)] = 1
                label_2[np.equal(labels2, 2)] = 1
                accresult.is_valid.append(True)
                nbr_objects = nbr_objects2
                # bag_msk_np = PIL.Image.fromarray(labels2)
                # bag_msk_np.save(open('fff{}.png'.format(accresult.volume_index), 'wb'))
            else:
                accresult.is_valid.append(False)
                label_2 = label_1
                # bag_msk_np = PIL.Image.fromarray(labels)
                # bag_msk_np.save(open('ttt{}.png'.format(accresult.volume_index), 'wb'))
        elif nbr_objects == 2:
            accresult.is_valid.append(True)
            # bag_msk_np = PIL.Image.fromarray(labels)
            # bag_msk_np.save(open('fff{}.png'.format(accresult.volume_index), 'wb'))
        else:
            num_label_dict = []
            for i in range(nbr_objects):
                num_label_i = np.sum(np.equal(labels, i + 1))
                num_label_dict.append((num_label_i, i))
            result = sorted(num_label_dict, key=lambda x: x[0])
            accresult.is_valid.append(True)
            label_1[np.equal(labels, result[0][1])] = 1
            label_2[np.equal(labels, result[1][1])] = 1
            # bag_msk_np = PIL.Image.fromarray(labels)
            # bag_msk_np.save(open('fff{}.png'.format(accresult.volume_index), 'wb'))
        y, x = label_1.nonzero()
        bbox1 = Boundbox()
        bbox2 = Boundbox()
        bbox1.centerx = (max(x) + min(x)) // 2
        bbox1.centery = (max(y) + min(y)) // 2
        bbox1.width = max(x) - min(x)
        bbox1.height = max(y) - min(y)
        y, x = label_2.nonzero()
        bbox2.centerx = (max(x) + min(x)) // 2
        bbox2.centery = (max(y) + min(y)) // 2
        bbox2.width = max(x) - min(x)
        bbox2.height = max(y) - min(y)
        if bbox1.centerx < bbox2.centerx:
            accresult.lbbox.append(bbox1)
            accresult.rbbox.append(bbox2)
        else:
            accresult.lbbox.append(bbox2)
            accresult.rbbox.append(bbox1)
        labels = np.uint8(labels * (255 // nbr_objects))
        # bag_msk_np = PIL.Image.fromarray(labels)
        # if nbr_objects != 2:
        #     bag_msk_np.save(open('ttt{}.png'.format(accresult.volume_index), 'wb'))
        #     labels2 = np.zeros_like(labels)
        #     labels2[im] =255
        #     bag_msk_np = PIL.Image.fromarray(labels2)
        #     bag_msk_np.save(open('zzz{}.png'.format(accresult.volume_index), 'wb'))
        # else :
        #     bag_msk_np.save(open('fff{}.png'.format(accresult.volume_index), 'wb'))
        print('object number = %d' % nbr_objects)
    SetZero(accresult)
    accresult.volume_index += 1
    return acc_2 * 2, acc1_1 * 2, acc2_1 * 2
Example #36
0
    def deblendDonut(self, iniGuessXY):
        """Get the deblended donut image.

        Parameters
        ----------
        iniGuessXY : tuple or list
            Initial guess of (x, y) position of neighboring star.

        Returns
        -------
        numpy.ndarray
            Deblended donut image.
        float
            Position x in pixel.
        float
            Position y in pixel.
        """

        # Deblended image
        imgDeblend = []

        # Postion of centroid

        # Get the initial guess of brightest donut
        realcx, realcy, realR, imgBinary = self.getCenterAndR_ef(checkEntropy=True)

        # Remove the salt and pepper noise noise of resImgBinary
        imgBinary = binary_opening(imgBinary).astype(float)
        imgBinary = binary_closing(imgBinary).astype(float)

        # Check the image quality
        if (not realcx):
            return imgDeblend, realcx, realcy

        # Get the binary image by adaptive threshold
        adapcx, adapcy, adapR, adapImgBinary = self.getCenterAndR_adap()

        # Calculate the system error by only taking the background signal
        bg1D = self.getImg().flatten()
        bgImgBinary1D = adapImgBinary.flatten()
        background = bg1D[bgImgBinary1D == 0]
        bgPhist, binEdges = np.histogram(background, bins=256)
        sysError = np.mean(binEdges[0:2])

        # Remove the system error
        noSysErrImage = self.getImg() - sysError
        noSysErrImage[noSysErrImage < 0] = 0

        # Get the residure map
        resImgBinary = adapImgBinary - imgBinary

        # Compensate the zero element for subtraction
        resImgBinary[np.where(resImgBinary < 0)] = 0

        # Remove the salt and pepper noise noise of resImgBinary
        resImgBinary = binary_opening(resImgBinary).astype(float)

        # Calculate the shifts of x and y
        x0 = int(iniGuessXY[0] - realcx)
        y0 = int(iniGuessXY[1] - realcy)

        xoptNeighbor = nelderMeadModify(self._funcResidue, np.array([x0, y0]),
                                        args=(imgBinary, resImgBinary), step=15)

        # Shift the main donut image to fitted position of neighboring star
        fitImgBinary = shift(imgBinary, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])])

        # Handle the numerical error of shift. Regenerate a binary image.
        fitImgBinary[fitImgBinary > 0.5] = 1
        fitImgBinary[fitImgBinary < 0.5] = 0

        # Get the overlap region between main donut and neighboring donut
        imgOverlapBinary = imgBinary + fitImgBinary
        imgOverlapBinary[imgOverlapBinary < 1.5] = 0
        imgOverlapBinary[imgOverlapBinary > 1.5] = 1

        # Get the overall binary image
        imgAllBinary = imgBinary + fitImgBinary
        imgAllBinary[imgAllBinary > 1] = 1

        # Get the reference image for the fitting
        imgRef = noSysErrImage*imgAllBinary

        # Calculate the magnitude ratio of image
        imgMainDonut = noSysErrImage*imgBinary
        imgFit = shift(imgMainDonut, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])])

        xoptMagNeighbor = minimize_scalar(
            self._funcMag, bounds=(0, 1), method="bounded",
            args=(imgMainDonut, imgOverlapBinary, imgFit, imgRef, xoptNeighbor[0]))

        imgDeblend = imgMainDonut - xoptMagNeighbor.x*imgFit*imgOverlapBinary

        # Repair the boundary of image
        imgDeblend = self._repairBoundary(imgOverlapBinary, imgBinary, imgDeblend)

        # Calculate the centroid position of donut
        realcy, realcx = center_of_mass(imgBinary)

        return imgDeblend, realcx, realcy
    def split(self,
              sigma=2,
              threshold_split=0.25,
              expand_mask=1,
              minimum_pixels=1):
        """
        If any classes contain multiple non-contiguous segments in real space, divide these regions
        into distinct classes.

        Algorithm is as follows:
        First, an image of each class is obtained from its scan position weights.
        Then, the image is convolved with a gaussian of std sigma.
        This is then turned into a binary mask, by thresholding with threshold_split.
        Stray pixels are eliminated by performing a one pixel binary closing, then binary opening.
        The mask is then expanded by expand_mask pixels.
        Finally, the contiguous regions of the resulting mask are found. These become the new class
        components by scan position.

        The splitting itself involves creating two classes - i.e. adding a column to W and a row to
        H.  The new BP classes (W columns) have exactly the same values as the old BP class. The two
        new scan position classes (H rows) divide up the non-zero entries of the old scan position
        class into two or more non-intersecting subsets, each of which becomes its own new class.

        Accepts:
            sigma           (float) std of gaussian kernel used to smooth the class images before
                            thresholding and splitting.
            threshold_split (float) used to threshold the class image to create a binary mask.
            expand_mask     (int) number of pixels by which to expand the mask before separating
                            into contiguous regions.
            minimum_pixels  (int) if, after splitting, a potential new class contains fewer than
                            this number of pixels, ignore it
        """
        assert isinstance(expand_mask, (int, np.integer))
        assert isinstance(minimum_pixels, (int, np.integer))

        W_next = np.zeros((self.N_feat, 1))
        H_next = np.zeros((1, self.N_meas))
        for i in range(self.N_c):
            # Get the class in real space
            class_image = self.get_class_image(i)

            # Turn into a binary mask
            class_image = gaussian_filter(class_image, sigma)
            mask = class_image > (np.max(class_image) * threshold_split)
            mask = binary_opening(mask, iterations=1)
            mask = binary_closing(mask, iterations=1)
            mask = binary_dilation(mask, iterations=expand_mask)

            # Get connected regions
            labels, nlabels = label(mask,
                                    background=0,
                                    return_num=True,
                                    connectivity=2)

            # Add each region to the new W and H matrices
            for j in range(nlabels):
                mask = (labels == (j + 1))
                mask = binary_erosion(mask, iterations=expand_mask)

                if np.sum(mask) >= minimum_pixels:

                    # Leave the Bragg peak weightings the same
                    W_next = np.hstack((W_next, self.W[:, i, np.newaxis]))

                    # Use the existing real space pixel weightings
                    h_i = np.zeros(self.N_meas)
                    h_i[mask.ravel()] = self.H[i, :][mask.ravel()]
                    H_next = np.vstack((H_next, h_i[np.newaxis, :]))

        self.W_next = W_next[:, 1:]
        self.H_next = H_next[1:, :]
        self.N_c_next = self.W_next.shape[1]

        return
Example #38
0
  
    for i in range(len(imlist)-1): 
  
        print i
         
        ori = np.array(Image.open(imlist[i])
        f = ori.convert('L')).astype(np.float)
        diff = (f-BG)*mask
  
        th = 10
        result = np.zeros(diff.shape)
        result[abs(diff)>th]=255

        r_norm = result/255
        rc = ndm.binary_closing(r_norm,structure=np.ones((2,2)))
        ro = ndm.binary_opening(rc,structure=np.ones((2,2)))
        label_im, nb_labels = nd.label(ro)
        sizes = nd.sum(ro, label_im, range(nb_labels + 1))

        mask_size = (sizes < 200) | (sizes > 3000)
        remove_pixel = mask_size[label_im]
        label_im[remove_pixel] = 0

   #     mask_size = sizes > 3000
   #     remove_pixel = mask_size[label_im]
   #     label_im[remove_pixel] = 0

        tmp = np.zeros([1080,1920]) 
        tmp[label_im!=0] = 255

        sx = nd.sobel(tmp, axis=0, mode='constant')
def preprocessing(image, smooth_size, folder):
    """
    'The image low contrast and under segmentation
    problem is not yet addressed by most of the researchers'
    
    'Other researchers also proposed different method to
    remedy the problem of watershed.  Li, El-
    moataz, Fadili, and Ruan, S. (2003) proposed an improved
    image segmentation approach based 
    on level set and mathematical morphology'
    
    THE SPHERES MUST BE ALMOST ALONG THE SAME PLANES IN Z DIRECTION
    IF THEY ARE TOUCHING AND OVERLAP, WHILE BEING ALMOST MERGED
    IT IS IMPOSSIBLE TO RESOLVE THEM
    
    ONE IDEA MIGHT BE TO DETECT CENTRES ALONG ONE AXIS AND THEN ANOTHER
    AFTER ALL THE CENTRES WERE FOUND COMBINE THEM SOMEHOW... 
    """
    from skimage.restoration import denoise_tv_chambolle
    
    dim = int(image.shape[0] / 50.)
    smoothed = rank.median(image, disk(smooth_size))
    #smoothed = denoise_tv_chambolle(image, weight=0.002)
    smoothed = rank.enhance_contrast(smoothed, disk(smooth_size))
    
    pl.subplot(2, 3, 1)
    pl.title("after median")
    pl.imshow(smoothed)
    pl.gray()
    # If after smoothing the "dot" disappears
    # use the image value
    
    # TODO: wat do with thresh?
    try:
        im_max = smoothed.max()
        thresh = threshold_otsu(image)
    except:
        im_max = image.max()
        thresh = threshold_otsu(image)

    
    if im_max < thresh:
        labeled = np.zeros(smoothed.shape, dtype=np.int32)
        
    else:
        binary = smoothed > thresh
        
        # TODO: this array size is the fault of errors
        bin_open = binary_opening(binary, np.ones((dim, dim)), iterations=5)
        bin_close = binary_closing(bin_open, np.ones((5,5)), iterations=5)
        
        pl.subplot(2, 3, 2)
        pl.title("threshold")
        pl.imshow(binary, interpolation='nearest')
        pl.subplot(2, 3, 3)
        pl.title("opening")
        pl.imshow(bin_open, interpolation='nearest')
        pl.subplot(2, 3, 4)
        pl.title("closing")
        pl.imshow(bin_close, interpolation='nearest')
        
        distance = ndimage.distance_transform_edt(bin_open)
        local_maxi = peak_local_max(distance,
                                    indices=False, labels=bin_open)
        
        markers = ndimage.label(local_maxi)[0]
        
        labeled = watershed(-distance, markers, mask=bin_open)
        pl.subplot(2, 3, 5)
        pl.title("label")
        pl.imshow(labeled)
        #pl.show()
        pl.savefig(folder)
        pl.close('all')

        #misc.imsave(folder, labeled)
#         labels_rw = random_walker(bin_close, markers, mode='cg_mg')
#          
#         pl.imshow(labels_rw, interpolation='nearest')
#         pl.show()

    return labeled
Example #40
0
def main():
    plt.close("all")
    plt.figure(figsize=(20, 12))

    n_ig = 6
    igs = []
    count = 0
    while count < n_ig:
        orbit = np.random.randint(6000) + 2000

        file_name = ais.find_file(orbit)
        nsweeps = 0
        try:
            stats = os.stat(file_name)
            nsweeps = stats[stat.ST_SIZE] / 400
        except OSError as e:
            print(e)

        if nsweeps > (180 * 150):
            tmp = ais.read_ais(orbit)
            if len(tmp) > 150:
                ignumber = np.random.randint(len(tmp))
                igs.append(tmp[ignumber])
                count = count + 1
                print("ORBIT %d, Ionogram %d" % (orbit, ignumber))
                continue

        print("REJECTING")

    n = len(igs)
    igs = igs[0:n_ig]

    g = mpl.gridspec.GridSpec(6, 6, wspace=0.16, hspace=0.1,
        left=0.05, right=0.95, bottom=0.08, top=0.95)
    # plt.hot()
    vmin, vmax = -16, -13

    thresold = -15

    fp_structure = np.ones((10,1))
    td_structure = np.ones((1,4))

    for i, ig in enumerate(igs):
        ig.interpolate_frequencies()
        data = ig.data
        plt.subplot(g[i,0])
        plt.imshow(np.log10(data), interpolation='nearest', aspect='auto', vmin=vmin, vmax=vmax)
        if i == 0:
            plt.title("Data")

        plt.subplot(g[i,1])
        bdata = np.zeros(data.shape, dtype=bool)
        bdata[data > (10.**thresold)] = True
        plt.imshow(bdata, interpolation='nearest', aspect='auto')
        if i == 0:
            plt.title("Threshold @ %f" % thresold)

        plt.subplot(g[i,2])
        dfp = morphology.binary_opening(bdata, structure=fp_structure)
        plt.imshow(dfp, interpolation='nearest', aspect='auto')
        if i == 0:
            plt.title("FP-Lines: v-opened")

        # plt.subplot(g[i,3])
        # dtd = morphology.binary_opening(bdata, structure=td_structure)
        # plt.imshow(dtd, interpolation='nearest', aspect='auto')
        # if i == 0:
        #     plt.title("CYC LINES?")

        plt.subplot(g[i,3])
        dtd = np.logical_and(bdata, np.logical_not(dfp))
        plt.imshow(dtd, interpolation='nearest', aspect='auto')
        if i == 0:
            plt.title("Residual")

        plt.subplot(g[i,4])
        # dtd = np.logical_and(bdata, np.logical_not(dfp))
        # dtd = morphology.binary_dilation(dtd, structure=morphology.generate_binary_structure(2,1))
        # dtd = morphology.binary_fill_holes(dtd, structure=np.ones((1,3)))
        dtd = morphology.binary_opening(dtd, structure=np.ones((1,4)))

        structure = np.ones((2,1))
        structure[1,0] = 0

        dtd = morphology.binary_hit_or_miss(dtd, structure1=structure)


        plt.imshow(dtd, interpolation='nearest', aspect='auto')
        if i == 0:
            plt.title("Cyc. lines: residual h-opened")

        # plt.subplot(g[i,5])
        # dtd = np.logical_and(bdata, np.logical_not(np.logical_or(dtd, dfp)))
        # plt.imshow(dtd, interpolation='nearest', aspect='auto')
        # if i == 0:
        #     plt.title("RESIDUAL")

        plt.subplot(g[i,5])

        dtd = morphology.binary_closing(dtd)
        plt.imshow(dtd, interpolation='nearest', aspect='auto')
        if i == 0:
            plt.title("Ionosphere - topside edge")

        # plt.subplot(g[i,4])
        # d = np.logical_and(bdata, np.logical_not(dfp))
        # d = morphology.binary_erosion(d, structure=td_structure)
        # d = morphology.binary_opening(d, structure=td_structure)
        # plt.imshow(d, interpolation='nearest', aspect='auto')
        # if i == 0:
        #     plt.title("IONOSPHERE?")

        # plt.subplot(g[i,5])
        # # d = np.logical_and(bdata, np.logical_not(np.logical_or(dfp, d)))
        # # d = morphology.binary_closing(bdata,structure=morphology.generate_binary_structure(2,2), iterations=3)
        # d = morphology.binary_dilation(bdata, structure=np.ones((2,1)))
        # plt.imshow(d, interpolation='nearest', aspect='auto')
        # if i == 0:
        #     plt.title("RESIDUAL?")

    plt.show()
Example #41
0
from PIL import Image
from numpy import *

from scipy.ndimage import measurements,morphology

"""
This is the morphology example in section 1.4.
"""

# load image and threshold to make sure it is binary
im = array(Image.open('houses.png').convert('L'))
im = 1*(im<128)

labels, nbr_objects = measurements.label(im)
print "Number of objects:", nbr_objects

# morphology - opening to separate objects better
im_open = morphology.binary_opening(im,ones((9,5)),iterations=2) 

labels_open, nbr_objects_open = measurements.label(im_open)
print "Number of objects:", nbr_objects_open
Example #42
0
    def PrepareCalculation(self,
                           zero_edge=False,
                           core_discontinuties=[],
                           edge_discontinuties=[],
                           transformation=None,
                           even_fun=True,
                           robust_fit=False,
                           pedestal_rho=None,
                           elm_phase=None):
        if debug:
            TT = time.time()
            print('\nPrepareCalculation')
            #np.savez('discontinuties' ,core_discontinuties,edge_discontinuties,elm_phase)

        #BUG removing large number of points, will change missing_data index!!
        #if debug:

        self.robust_fit = robust_fit
        #embed()
        if len(self.P) > self.n_points and transformation[2](100) != 1:
            print(
                'Only linear transformation can be used with line integrated measurements'
            )
            transformation = None

        if transformation is None:
            transformation = (lambda x: x, ) * 2 + (lambda x: 1, )

        self.trans, self.invtrans, self.deriv_trans = transformation

        #=============== define contribution matrix  ==================
        # it is a sparse matrix representation of bilinear interpolation

        dr = (self.r_max - self.r_min) / (self.nr_new - 1)
        it = (self.T - self.t_min) / self.dt
        ir = (self.R - self.r_min) / dr

        #print('sum(it > self.nt_new0-1)',np.sum(it > self.nt_new0-2),np.sum(it > self.nt_new0-1) ,self.missing_data.shape,  )

        #new grid for output
        r_new = np.linspace(self.r_min, self.r_max, self.nr_new)
        #t_new0 = t_new = np.linspace(self.t_min,self.t_max,self.nt_new0)

        floor_it = np.uint32(it)
        floor_ir = np.uint32(ir)

        weight = np.tile(self.W, (4, 1))
        index_p = np.tile(self.P, (4, 1))
        index_t = np.tile(floor_it, (4, 1))
        index_r = np.tile(floor_ir, (4, 1))
        #print('sum(floor_it > self.nt_new0-2)',np.sum(floor_it > self.nt_new0-2),self.missing_data.shape,  self.nt_new0  )

        index_t[1::2] += 1
        index_r[2:] += 1

        #fast rounding by 3 digits to increase sparsity for regularly spaced data and remove  rounding error
        frac_it = np.uint32((it - floor_it) * 1e3 + 0.5) / 1e3
        frac_ir = ir - floor_ir

        #bilinear weights
        weight[::2] *= 1. - frac_it
        weight[1::2] *= frac_it
        weight[:2] *= 1. - frac_ir
        weight[2:] *= frac_ir

        #embed()

        #skip fit of temporal regions without any data
        if elm_phase is None:
            #if elm syncing is not used
            #time regions which are not covered by any measurements
            try:
                self.missing_data[index_t[0]] = False
                self.missing_data[index_t[1]] = False
            except:
                print('error:  self.missing_data[index_t[1]] = False ')
            #weakly constrained timepoints
            weak_data, _ = np.histogram(index_t,
                                        self.nt_new0,
                                        weights=weight,
                                        range=(0, self.nt_new0))

            self.missing_data[weak_data < np.mean(weak_data) *
                              .02] = True  #almost missing data

            weak_data = (weak_data <
                         np.mean(weak_data) / 5.)[~self.missing_data]
            weak_data = weak_data[1:] | weak_data[:-1]

            #correction of dt for regions with a missing or weakly constrained data
            dt = np.ones(self.nt_new0)
            dt = np.ediff1d(np.cumsum(dt)[~self.missing_data])
            dt = (dt / (1 + weak_data)) * self.dt
            self.nt_new = np.sum(~self.missing_data)
            #embed()
            #skipping a fit in regions without the data
            used_times = np.cumsum(~self.missing_data) - 1
            index_t = used_times[index_t]
            t_new = self.t_new0[~self.missing_data]
        else:
            t_new = self.t_new0
            self.nt_new = self.nt_new0
            dt = self.dt * np.ones(self.nt_new - 1)
            used_times = np.arange(len(self.t_new0))

        self.r_new, self.t_new = np.meshgrid(r_new, t_new)

        weight = weight.ravel()
        nonzero = weight != 0  #add only nonzero elements to matrix !
        index_p = index_p.ravel()[nonzero]
        index_rt = (index_r.ravel() * self.nt_new + index_t.ravel())[nonzero]
        npix = self.nr_new * self.nt_new
        # Now, we'll exploit a sparse csc_matrix to build the 2D histogram...
        self.M = sp.csc_matrix((weight[nonzero], (index_p, index_rt)),
                               shape=(self.n_points, npix))

        if debug:
            print('compression', self.M.data.size / (len(self.P) * 4))
            print('prepare V', time.time() - TT)
            TT = time.time()

        #imshow(self.M.sum(0).reshape(self.nr_new,self.nt_new), interpolation='nearest', aspect='auto');colorbar();show()
        #imshow(self.M[25000].todense().reshape(self.nr_new,self.nt_new), interpolation='nearest', aspect='auto');colorbar();show()

        #prepare regularisation matrix

        #calculate (1+c)*d/dr(1/r*dF/dr) + (1-c)*d^2F/dt^2
        rvec = np.linspace(self.r_min, self.r_max, self.nr_new)
        rvec_b = (rvec[1:] + rvec[:-1]) / 2

        #radial weightng function, it will keep zero gradient in core and allow pedestal
        diffusion = np.ones(self.nr_new - 1)
        if even_fun:
            #A zero slope constraint is imposed at the magnetic axis
            #diffusion /= (rvec_b*np.arctan(np.pi*rvec_b)-np.log((np.pi*rvec_b)**2+1)/(2*np.pi))/rvec_b
            #diffusion /= np.arctan( 3/2*rvec_b)

            from scipy.special import erf
            #erf = 2/sqrt(pi)*integral(exp(-t**2), t=0..z).
            #diffusion /= erf(rvec_b)

            #Gamma = 1/(r*pi*2*pi*R)*integral r*S
            #for S is a gaussian profile of the source exp(-x^2)
            diffusion /= (1 - np.exp(-rvec_b**2)) / rvec_b

            #plt.plot(erf(rvec_b),':')
            #plt.plot(np.arctan( 3/2*rvec_b))
            #plt.plot((rvec_b*np.arctan(np.pi*rvec_b)-np.log((np.pi*rvec_b)**2+1)/(2*np.pi))/rvec_b,'-.')
            #plt.plot((1-np.exp(-rvec_b**2))/rvec_b*1.5,'--')
            #plt.show()

        #allow large gradints at pedestal
        if pedestal_rho is not None:

            def gauss(x, x0, s):
                y = np.zeros_like(x)
                ind = np.abs(x - x0) / s < 4
                y[ind] = np.exp(-(x[ind] - x0)**2 / (2 * s**2))
                return y

            diffusion /= 1 + gauss(rvec_b, pedestal_rho, 0.02) * 10 + gauss(
                rvec_b, pedestal_rho + .05, .05) * 5

        tweight = np.exp(-rvec)

        #==================time domain===============
        #prepare 3 matrices, for core, midradius and edge
        DTDT = []

        #discontinuties, regions must cover whole range and do not overlap
        self.time_breaks = OrderedDict()
        self.time_breaks['core'] = (0., .3), core_discontinuties
        self.time_breaks['middle'] = (.3, .6), []
        self.time_breaks['edge'] = (.6, 2.), edge_discontinuties

        if len(core_discontinuties) == 0:
            self.time_breaks['middle'] = (0, .6), []
            del self.time_breaks['core']
        if len(edge_discontinuties) == 0:
            del self.time_breaks['edge']
            self.time_breaks['middle'] = (self.time_breaks['middle'][0][0],
                                          2), []

        #iterate over all regions
        for region, (rho_range, time_breaks) in self.time_breaks.items():

            break_ind = []
            if not time_breaks is None and len(time_breaks) != 0:
                break_ind = np.unique(self.t_new0.searchsorted(time_breaks))
                break_ind = break_ind[(break_ind < len(self.t_new0) - 2) &
                                      (break_ind > 3)]  #to close to boundary
                if any(break_ind):
                    break_ind = break_ind[np.ediff1d(break_ind, to_begin=10) >
                                          1]  #to close discontinuties

            #remove discontinuties when there are no measurements
            if len(break_ind) > 1 and elm_phase is None:
                break_ind = break_ind[~binary_opening(self.missing_data
                                                      )[break_ind]]
                break_ind = np.unique(used_times[break_ind])

            if self.nt_new > 1:
                DT = np.zeros((3, self.nt_new))
                #minimize second derivative
                DT[0, 0:-2] = .5 / (dt[:-1] + dt[1:]) / dt[:-1]
                DT[1, 1:-1] = -.5 / (dt[:-1] + dt[1:]) * (1 / dt[:-1] +
                                                          1 / dt[1:])
                DT[2, 2:] = .5 / (dt[:-1] + dt[1:]) / dt[1:]
                #force zero 1. derivative at the edge of the discontinuity
                DT[[2, 0], [1, -2]] = 1 / self.dt**2 / 2
                DT[1, [0, -1]] = -1 / self.dt**2 / 2

                #introduce discontinuties to a time derivative matrix
                if len(break_ind) > 0:
                    DT[0, break_ind - 2] = 1 / self.dt
                    DT[1, break_ind - 1] = -1 / self.dt
                    DT[2, break_ind - 0] = 0
                    DT[0, break_ind - 1] = 0
                    DT[1, break_ind - 0] = -1 / self.dt
                    DT[2, break_ind + 1] = 1 / self.dt

                DT *= self.dt
                DT = sp.spdiags(DT, (-1, 0, 1), self.nt_new, self.nt_new)

                #add correlation between timeslices at the same elm phase
                if elm_phase is not None and region == 'edge':
                    phase = np.interp(t_new,
                                      elm_phase[0],
                                      elm_phase[1],
                                      left=0,
                                      right=0)
                    nelm = len(elm_phase[0])
                    elm_start = elm_phase[0][elm_phase[1] == -1]
                    elm_start_ind = np.arange(nelm)[elm_phase[1] == -1]
                    DT_elm_sync = [
                    ]  #indexes and weighs in the elm synchronisatiom matrix
                    #iterate over each timeslice of the timegrid
                    for it, (t, p) in enumerate(zip(t_new, phase)):
                        #iterate over one left on the left and one on the right

                        for side in ['L', 'R']:
                            #find point in the next elm with nearest phase value

                            #check that the current elm is not the first/last elm
                            if len(elm_start) > 2 and (t < elm_start[-1]
                                                       and side == 'R') or (
                                                           t > elm_start[1]
                                                           and side == 'L'):
                                #index of the previous/next elm
                                ielm = elm_start.searchsorted(t)

                                if side == 'L': ielm -= 2
                                assert ielm >= 0, 'ielm > 0'

                                #select the elm
                                ip = elm_start_ind[ielm]
                                elm_beg = elm_phase[0][ip]
                                elm_end = np.inf if ip + 3 > nelm else elm_phase[
                                    0][ip + 2]
                                dt_elm = elm_end - elm_beg
                                #interval inside of range [elm_beg, elm_end]
                                ind_next = slice(
                                    *t_new.searchsorted((elm_beg, elm_end)))

                                #at least 6 time slices within the elm, else skip it
                                if ind_next.stop - ind_next.start > 5:
                                    iphase = phase[ind_next].searchsorted(p)
                                    if 0 < iphase < (
                                            ind_next.stop - ind_next.start
                                    ):  #inside of left and right edge

                                        next_it_r = ind_next.start + iphase
                                        next_it_l = next_it_r - 1

                                        #weight of the left point
                                        w = (phase[next_it_r] -
                                             p) / (phase[next_it_r] -
                                                   phase[next_it_l])
                                        assert 0 <= w <= 1, 'w > 0'

                                        DT_elm_sync.append(
                                            (it, next_it_l, -w * dt_elm))
                                        DT_elm_sync.append(
                                            (it, next_it_r, -(1 - w) * dt_elm))

                                    elif iphase == 0 and ind_next.start != 0:  #if it is not edge of the grid
                                        DT_elm_sync.append(
                                            (it, ind_next.start, -dt_elm))

                                    elif iphase == (
                                            ind_next.stop - ind_next.start
                                    ) and ind_next.stop != len(
                                            t_new
                                    ):  #if it is not edge of the grid
                                        DT_elm_sync.append(
                                            (it, ind_next.stop - 1, -dt_elm))

                    if len(DT_elm_sync) == 0:
                        print('No ELMS for synchronisation')

                    else:
                        #add elm synchronisation to time derivative matrix
                        I, J, W = np.array(DT_elm_sync).T
                        B = sp.coo_matrix(
                            (W / self.dt, (I, J)),
                            (self.nt_new, self.nt_new
                             ))  #normalise it by average lenght of elms??
                        B = B - sp.spdiags(
                            B.sum(1).T, 0, self.nt_new,
                            self.nt_new)  #diagonal value at it should by +2
                        DT = sp.vstack((B / 4., DT / 4.), format='csr')

            else:
                DT = np.matrix(0)
            #apply DT just in a selected region
            r_range = (r_new >= rho_range[0]) & (r_new < rho_range[1])
            W = sp.diags(tweight[r_range], 0)
            DTDT.append(sp.kron(W**2, DT.T * DT))

        #merge all regions together
        if len(DTDT) > 1:
            self.DTDT = sp.block_diag(DTDT, format='csc')
        else:
            self.DTDT = DTDT[0]

        #==============radial domain===============
        #build operator of 1. derivative
        DR = np.zeros((2, self.nr_new))
        DR[0, 0:] = 1
        DR[1, 0:] = -1
        DR = sp.spdiags(DR, (1, 0), self.nr_new, self.nr_new)
        DD = sp.spdiags(diffusion, 0, self.nr_new, self.nr_new)
        #grad D grad operator
        DR = DR * DD * DR

        if zero_edge and self.trans(0) == 0:
            #press edge to zero
            DR = DR.tolil()
            DR[-1, -1] = 10
        elif self.trans(0) != 0:
            #zero 2. derivative at the edge
            DR[-2, -2:] = 0

        I = sp.eye(self.nt_new)
        self.DRDR = sp.kron(DR.T * DR, I, format='csc')
        #print(self.DRDR.size)

        self.prepared = True

        if debug:
            print('prepare DT', time.time() - TT)
Example #43
0
def object_count(frame,
                 mf_size=3,
                 thres=0.5,
                 cl_size=2.6,
                 op_size=5.2,
                 lbl_struct=np.ones((3, 3)),
                 mfiltered=None,
                 binarized=None,
                 closed=None,
                 opened=None,
                 labeled=None):
    """Counts visible Objects after processing the frame.

    Processsteps:
        Median-Filter
        Binarization
        Binary-Closing
        Binary-Opening
        Labeling

    Optional Parameter:
    mf_size         Size of the square used to median_filter the frame.
                    (default: 3)
    thres           Threshold for binarization.
                    (default: 0.5; meaning: 0.5*(max-min)+min))
    cl_size         Radius (float possible) of the disk used for
                    binary_closing. (default: 2.6)
    op_size         Radius (float possible) of the disk used for
                    binary_opening. (default: 5.2)
    lbl_struct      Symmetric structure indicating neighbor-pixels.
                    (default: np.ones((3,3)))
    mfiltered       ndarray of same size as frame that will hold the median-
                    filtered frame afterwards.
    binarized       ndarray of same size as frame that will hold the binarized
                    frame afterwards.
    closed          ndarray of same size as frame that will hold the frame
                    after morpholocal closing.
    opened          ndarray of same size as frame that will hold the frame
                    after morpholocal opening.
    labeled         ndarray of same size as frame that will hold the labeled
                    frame afterwards.
    """

    from scipy.ndimage.morphology import binary_opening, binary_closing
    from scipy.ndimage.measurements import label
    from scipy.ndimage.filters import median_filter
    from math import ceil

    mframe = median_filter(frame, mf_size)
    mi, ma = mframe.min(), mframe.max()
    binary = (mframe > (thres * (ma - mi) + mi))
    c = ceil(cl_size)
    c1, c2 = [c + x for x in frame.shape]
    blobs1 = np.zeros([x + int(2 * ceil(cl_size)) for x in frame.shape])
    #Working with included border to minimaze effects of the edge during
    #morpholocal transformation.
    blobs1[c:c1, c:c2] = binary
    blobs1 = binary_closing(blobs1, disk_2d(cl_size))
    blobs2 = binary_opening(blobs1, disk_2d(op_size))
    blobs1 = blobs1[c:c1, c:c2]
    blobs2 = blobs2[c:c1, c:c2]
    lbl, num_l = label(blobs2, lbl_struct)

    if isinstance(mfiltered, np.ndarray):
        mfiltered.flat[...] = mframe.flat[...]
    if isinstance(binarized, np.ndarray):
        binarized.flat[...] = binary.flat[...]
    if isinstance(closed, np.ndarray):
        closed.flat[...] = blobs1.flat[...]
    if isinstance(opened, np.ndarray):
        opened.flat[...] = blobs2.flat[...]
    if isinstance(labeled, np.ndarray):
        labeled.flat[...] = lbl.flat[...]

    return num_l
Example #44
0
def alternating_sequence_filter(data, max_radius):
  for radius in range(1, max_radius+1):
    se = disk_strel(radius)
    opened = morphology.binary_opening(data, se)
    return morphology.binary_closing(opened, se)
    def post_process(self):
        '''

        '''
        # Normalize energy
        self.energy = self._buffer['energy'][:]
        if self.energy.max():
            self.energy = self.energy / self.energy.max()

        silences = [1 if e < self.max_energy else 0 for e in self.energy]
        step = float(self.input_stepsize) / float(self.samplerate())

        models_dir = os.path.join(timeside.__path__[0],
                                  'analyzer', 'trained_models')
        prototype1_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto1.dat')
        prototype2_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto2.dat')

        prototype = numpy.load(prototype1_file)
        prototype2 = numpy.load(prototype2_file)

        # Lissage pour éliminer les petits segments dans un sens ou l'autre
        struct = [1] * len(prototype)
        silences = binary_closing(silences, struct)
        silences = binary_opening(silences, struct)

        seg = [0, -1, silences[0]]
        silencesList = []
        for i, v in enumerate(silences):
            if not (v == seg[2]):
                seg[1] = i
                silencesList.append(tuple(seg))
                seg = [i, -1, v]
        seg[1] = i
        silencesList.append(tuple(seg))
        selected_segs = []
        candidates = []

        for s in silencesList:
            if s[2] == 1:
                shape = numpy.array(self.energy[s[0]:s[1]])

                d1, _ = computeDist2(prototype, shape)
                d2, _ = computeDist2(prototype2, shape)
                dist = min([d1, d2])

                candidates.append((s[0], s[1], dist))
                if dist < self.threshold:
                    selected_segs.append(s)

        label = {0: 'Start', 1: 'Session'}

        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'
        segs.data_object.label_metadata.label = label
        segs.data_object.label = [s[2] for s in selected_segs]
        segs.data_object.time = [(float(s[0]) * step)
                                 for s in selected_segs]
        segs.data_object.duration = [(float(s[1] - s[0]) * step)
                                     for s in selected_segs]
        self.add_result(segs)
    def deblendDonut(self, iniGuessXY, magRatio):
        """
        
        Get the deblended donut image.
        
        Arguments:
            iniGuessXY {[float]} -- Initial guess of (x, y) position of neighboring star.
            magRatio {[float]} -- Initial guess of magnitude ratio between neighboring star 
                                  and bright star.
        
        Returns:
            [float] -- Deblended donut image and pixel x, y position.
        """

        # Deblended image
        imgDeblend = []

        # Postion of centroid

        # Get the initial guess of brightest donut
        realcx, realcy, realR, imgBinary = self.getCenterAndR_ef(checkEntropy=True)

        # Remove the salt and pepper noise noise of resImgBinary
        imgBinary = binary_opening(imgBinary).astype(float)
        imgBinary = binary_closing(imgBinary).astype(float)

        # Check the image quality
        if (not realcx):
            return imgDeblend, realcx, realcy

        # Get the binary image by adaptive threshold
        adapcx, adapcy, adapR, adapImgBinary = self.getCenterAndR_adap()

        # Calculate the system error by only taking the background signal
        bg1D = self.image.flatten()
        bgImgBinary1D = adapImgBinary.flatten()
        background = bg1D[bgImgBinary1D==0]
        bgPhist, pgCen = np.histogram(background, bins=256)
        sysError = pgCen[0]

        # Remove the system error
        noSysErrImage = self.image - sysError
        noSysErrImage[noSysErrImage<0] = 0

        # Get the residure map
        resImgBinary = adapImgBinary - imgBinary

        # Compensate the zero element for subtraction
        resImgBinary[np.where(resImgBinary<0)] = 0

        # Remove the salt and pepper noise noise of resImgBinary
        resImgBinary = binary_opening(resImgBinary).astype(float)

        # Calculate the shifts of x and y
        x0 = int(iniGuessXY[0] - realcx)
        y0 = int(iniGuessXY[1] - realcy)

        xoptNeighbor = nelderMeadModify(self.__funcResidue, np.array([x0, y0]), 
                                        args=(imgBinary, resImgBinary), step=15)

        # Shift the main donut image to fitted position of neighboring star 
        fitImgBinary = shift(imgBinary, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])])

        # Handle the numerical error of shift. Regenerate a binary image.
        fitImgBinary[fitImgBinary > 0.5] = 1
        fitImgBinary[fitImgBinary < 0.5] = 0

        # Get the overlap region between main donut and neighboring donut
        imgOverlapBinary = imgBinary + fitImgBinary
        imgOverlapBinary[imgOverlapBinary < 1.5] = 0
        imgOverlapBinary[imgOverlapBinary > 1.5] = 1

        # Get the overall binary image
        imgAllBinary = imgBinary + fitImgBinary
        imgAllBinary[imgAllBinary > 1] = 1

        # Get the reference image for the fitting
        imgRef = noSysErrImage*imgAllBinary

        # Calculate the magnitude ratio of image
        imgMainDonut = noSysErrImage*imgBinary
        imgFit = shift(imgMainDonut, [int(xoptNeighbor[0][1]), int(xoptNeighbor[0][0])])

        xoptMagNeighbor = minimize_scalar(self.__funcMag, bounds = (0, 1), method="bounded",
                                          args=(imgMainDonut, imgOverlapBinary, imgFit, imgRef, xoptNeighbor[0]))

        imgDeblend = imgMainDonut - xoptMagNeighbor.x*imgFit*imgOverlapBinary

        # Repair the boundary of image
        imgDeblend = self.__repairBoundary(imgOverlapBinary, imgBinary, imgDeblend)

        # Calculate the centroid position of donut
        realcy, realcx = center_of_mass(imgBinary)

        return imgDeblend, realcx, realcy
Example #47
0
def clean_frames(frames, return_mask=False):
    mask = binary_dilation(binary_opening(frames > 10.0, iterations=2), iterations=1)
    out = frames.copy()
    out[~mask] = 0.0
    return out if not return_mask else (out, mask)
Example #48
0
def segment_head_to_inner_brain(im, threshold=200):
    mask = im > threshold
    inner_mask = binary_fill_holes(mask) ^ mask
    inner_mask = binary_opening(inner_mask, iterations=12)
    inner_mask = binary_erosion(inner_mask, iterations=18)
    return inner_mask
Example #49
0
def nd_opening(bin, size):
    return morphology.binary_opening(bin, np.ones(size))
    def lungs_segmentation(self, lungs_threshold=-360):
        seg_prub = np.array(self.data3d <= lungs_threshold)
        seg_prub = morphology.binary_closing(
            seg_prub,
            iterations=self.iteration()).astype(self.segmentation.dtype)
        seg_prub = morphology.binary_opening(seg_prub, iterations=5)
        counts, labeled_seg = self.volume_count(seg_prub)
        #self.segmentation = seg_prub
        #for x in np.nditer(labeled_seg, op_flags=['readwrite']):
        #    if x[...]!=0:890/
        #    	counts[x[...]]=counts[x[...]]+1
        #index=np.argmax(counts) #pozadí
        #counts[index]=0
        index = np.argmax(counts)  #jedna nebo obě plíce
        velikost1 = counts[index]
        counts[index] = 0
        index2 = np.argmax(counts)  # druhá plíce nebo nečo jiného
        velikost2 = counts[index2]
        if (1.0 - self.maximal_lung_diff) <= float(velikost2) / velikost1:
            print("plice separované")
        else:
            print("plice neseparované")
            pocet = 0
            seg_prub = np.array(self.data3d <= lungs_threshold)
            seg_prub = morphology.binary_closing(
                seg_prub,
                iterations=self.iteration()).astype(self.segmentation.dtype)
            seg_prub = morphology.binary_opening(seg_prub, iterations=5)

            while not (1.0 -
                       self.maximal_lung_diff) <= float(velikost2) / velikost1:
                seg_prub = morphology.binary_erosion(seg_prub, iterations=1)
                counts, labeled_seg = self.volume_count(seg_prub)
                index = np.argmax(counts)  #jedna nebo obě plíce
                velikost1 = counts[index]
                counts[index] = 0
                index2 = np.argmax(counts)  # druhá plíce nebo nečo jiného
                velikost2 = counts[index2]
                pocet = pocet + 1
                seg_prub = morphology.binary_dilation(
                    self.segmentation,
                    iterations=pocet).astype(self.segmentation.dtype)
        #self.segmentation = self.segmentation + np.array(labeled_seg==index).astype(np.int8)*self.slab['lungs']
        #self.segmentation = self.segmentation + np.array(labeled_seg==index2).astype(np.int8)*self.slab['lungs']
        plice1 = np.array(labeled_seg == index)
        z, x, y = np.nonzero(plice1)
        m1 = np.max(y)
        if m1 < (self.segmentation.shape[1] / 2):
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index).astype(np.int8) * self.slab['llung']
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index2).astype(np.int8) * self.slab['rlung']
        else:
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index).astype(np.int8) * self.slab['rlung']
            self.segmentation = self.segmentation + np.array(
                labeled_seg == index2).astype(np.int8) * self.slab['llung']
        self.orientation()
        if self.smer == 1:
            self.segmentation[self.segmentation == self.slab['llung']] = 3
            self.segmentation[self.segmentation ==
                              self.slab['rlung']] = self.slab['llung']
            self.segmentation[self.segmentation == 3] = self.slab['rlung']
        pass
    def heart_segmentation(self, heart_threshold = 0, top_threshold = 200):
        a=self.convolve_structure_heart()
        seg_prub = np.array(self.segmentation == self.slab['rlung'])+np.array(self.segmentation == self.slab['llung'])


        logger.debug('pred konvoluci')
        seg_prub = filters.convolve( (seg_prub-0.5) , a )
        logger.debug('po konvoluci')


# import sed3
# ed = sed3.sed3(seg_prub)
# ed.show()
        if self.smer==0:
            seg_prub = np.array(seg_prub<=-0.3)
        else:
            seg_prub = np.array(seg_prub<=0.3)
        cc = self.__above_diaphragm_calculation(seg_prub)
#ipdb.set_trace()
        plice1=np.array(self.segmentation==self.slab['llung'])
        z, x, y = np.nonzero(plice1)

        x1 = [0,0,0,0]
        y1 = [0,0,0,0]
        z1 = [0,0,0,0]
        x1[0]=np.min(x)
        x1[1]=np.max(x)
        y1[0]=np.min(y)
        y1[1]=np.max(y)
        z1[0]=np.max(z)
        z1[1]=np.min(z)
        plice2=np.array(self.segmentation==self.slab['rlung'])
        z, x, y = np.nonzero(plice2)
        x1[2]=np.min(x)
        x1[3]=np.max(x)
        y1[2]=np.min(y)
        y1[3]=np.max(y)
        z1[2]=np.max(z)
        z1[3]=np.min(z)
        mp=np.zeros(self.segmentation.shape)
        xmin=np.min(x1)
        xmax=np.max(x1)
        ymin=np.min(y1)
        ymax=np.max(y1)
        zmin=np.min(z1)
        zmax=np.max(z1)
        if self.smer==0:
            mp[zmin:,  xmin:xmax ,ymin:ymax]=1
        else:
            mp[:zmax,  xmin:xmax ,ymin:ymax]=1

        bones = np.array(self.data3d >= top_threshold)
        aaa = np.array(self.data3d >= heart_threshold)
        aaa = aaa - bones
        logger.debug('pred binary opening')
        aaa=morphology.binary_opening(aaa , iterations=self.iteration()+2).astype(self.segmentation.dtype)
        aaa = morphology.binary_erosion(aaa, iterations=self.iteration())	
        aaa=cc * aaa * mp

        lab , num = label(aaa)
        counts= [0]*(num+1)
        for x in range(1, num+1):
            a = np.sum(np.array(lab == x))
            counts[x] = a
        index= np.argmax(counts)
        aaa = np.array(lab==index)
        logger.debug('pred dilataci')
        aaa = morphology.binary_dilation(aaa, iterations=self.iteration())
        #self.segmentation= aaa
        self.segmentation = self.segmentation + aaa.astype(np.int8)*self.slab['heart']
    def post_process(self):
        '''

        '''
        # Normalize energy
        self.energy = self._buffer['energy'][:]

        # BAD PATCH !!!
        self.energy[-1] = 0
        if self.energy.max():
            self.energy = self.energy / self.energy.max()

        silences = [1 if e < self.max_energy else 0 for e in self.energy]
        step = float(self.input_stepsize) / float(self.samplerate())

        path = os.path.split(__file__)[0]
        models_dir = os.path.join(path, 'trained_models')

        prototype1_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto1.dat')
        prototype2_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto2.dat')

        prototype = numpy.load(prototype1_file)
        prototype2 = numpy.load(prototype2_file)

        # Lissage pour éliminer les petits segments dans un sens ou l'autre
        struct = [1] * len(prototype)
        silences = binary_closing(silences, struct)
        silences = binary_opening(silences, struct)

        seg = [0, -1, silences[0]]
        silencesList = []
        for i, v in enumerate(silences):
            if not (v == seg[2]):
                seg[1] = i
                silencesList.append(tuple(seg))
                seg = [i, -1, v]
        seg[1] = i
        silencesList.append(tuple(seg))
        segments = []
        start = 0.0

        for s in silencesList:
            if s[2] == 1:
                shape = numpy.array(self.energy[s[0]:s[1]])

                d1, _ = computeDist2(prototype, shape)
                d2, _ = computeDist2(prototype2, shape)
                dist = min([d1, d2])

                if dist < self.threshold:
                    s = map(float, s)
                    segments += [(start, s[0] * step - start, 1),
                                 (s[0] * step, (s[1] - s[0]) * step, 0)]
                    start = s[1] * step

        segments += [(start, len(self.energy) * step - start, 1)]

        label = {0: 'Start', 1: 'Session'}
        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'
        segs.data_object.label_metadata.label = label

        segs.data_object.time, segs.data_object.duration, segs.data_object.label = zip(
            *segments)

        self.add_result(segs)
Example #53
0
def watershed(a, seeds=None, connectivity=1, mask=None, smooth_thresh=0.0, 
        smooth_seeds=False, minimum_seed_size=0, dams=False,
        override_skimage=False, show_progress=False):
    """Perform the watershed algorithm of Vincent & Soille (1991).
    
    Parameters
    ----------
    a : np.ndarray, arbitrary shape and type
        The input image on which to perform the watershed transform.
    seeds : np.ndarray, int or bool type, same shape as `a` (optional)
        The seeds for the watershed. If provided, these are the only basins
        allowed, and the algorithm proceeds by flooding from the seeds.
        Otherwise, every local minimum is used as a seed.
    connectivity : int, {1, ..., a.ndim} (optional, default 1)
        The neighborhood of each pixel, defined as in `scipy.ndimage`.
    mask : np.ndarray, type bool, same shape as `a`. (optional)
        If provided, perform watershed only in the parts of `a` that are set
        to `True` in `mask`.
    smooth_thresh : float (optional, default 0.0)
        Local minima that are less deep than this threshold are suppressed,
        using `hminima`.
    smooth_seeds : bool (optional, default False)
        Perform binary opening on the seeds, using the same connectivity as
        the watershed.
    minimum_seed_size : int (optional, default 0)
        Remove seed regions smaller than this size.
    dams : bool (optional, default False)
        Place a dam where two basins meet. Set this to True if you require
        0-labeled boundaries between different regions.
    override_skimage : bool (optional, default False)
        skimage.morphology.watershed is used to implement the main part of the
        algorithm when `dams=False`. Use this flag to use the separate pure
        Python implementation instead.
    show_progress : bool (optional, default False)
        Show a cute little ASCII progress bar (using the progressbar package)

    Returns
    -------
    ws : np.ndarray, same shape as `a`, int type.
        The watershed transform of the input image.
    """
    seeded = seeds is not None
    sel = generate_binary_structure(a.ndim, connectivity)
    # various keyword arguments operate by modifying the input image `a`.
    # However, we operate on a copy of it called `b`, so that `a` can be used
    # to break ties.
    b = a
    if not seeded:
        seeds = regional_minima(a, connectivity)
    if minimum_seed_size > 0:
        seeds = remove_small_connected_components(seeds, minimum_seed_size,
                                                  in_place=True)
        seeds = relabel_from_one(seeds)[0]
    if smooth_seeds:
        seeds = binary_opening(seeds, sel)
    if smooth_thresh > 0.0:
        b = hminima(a, smooth_thresh)
    if seeds.dtype == bool:
        seeds = label(seeds, sel)[0]
    if skimage_available and not override_skimage and not dams:
        return skimage.morphology.watershed(b, seeds, sel, None, mask)
    elif seeded:
        b = impose_minima(a, seeds.astype(bool), connectivity)
    levels = unique(b)
    a = pad(a, a.max()+1)
    b = pad(b, b.max()+1)
    ar = a.ravel()
    br = b.ravel()
    ws = pad(seeds, 0)
    wsr = ws.ravel()
    neighbors = build_neighbors_array(a, connectivity)
    level_pixels = build_levels_dict(b)
    if show_progress: wspbar = ip.StandardProgressBar('Watershed...')
    else: wspbar = ip.NoProgressBar()
    for i, level in ip.with_progress(enumerate(levels), 
                                            pbar=wspbar, length=len(levels)):
        idxs_adjacent_to_labels = queue([idx for idx in level_pixels[level] if
                                            any(wsr[neighbors[idx]])])
        while len(idxs_adjacent_to_labels) > 0:
            idx = idxs_adjacent_to_labels.popleft()
            if wsr[idx] > 0: continue # in case we already processed it
            nidxs = neighbors[idx] # neighbors
            lnidxs = nidxs[(wsr[nidxs] != 0).astype(bool)] # labeled neighbors
            adj_labels = unique(wsr[lnidxs])
            if len(adj_labels) == 1 or len(adj_labels) > 1 and not dams: 
                # assign a label
                wsr[idx] = wsr[lnidxs][ar[lnidxs].argmin()]
                idxs_adjacent_to_labels.extend(nidxs[((wsr[nidxs] == 0) * 
                                    (br[nidxs] == level)).astype(bool) ])
    return juicy_center(ws)
Example #54
0
        er -= st.rolling_mean2(a, 500)
        err.append(-st.shift_2d(er, vx, vy))
        if len(err) <= 1:  ####secondar layer processing requires three frames
            continue
        if vy**2 + vx**2 >= 50**2:  ######The motion of the dominant layer is fast, likely low clouds. Do NOT trigger the second layer algorithm
            v2 += [[np.nan, np.nan]]
            err.popleft()
            continue

#####process the secondary layer
        ert = er + err[-2]
        ####total error
        scale = red2 / np.nanmean(red2)
        nopen = max(5, int(np.sqrt(vx**2 + vy**2) / 3))
        cm2 = (ert > 15 * scale) & (q[-2].cm)
        cm2 = morphology.binary_opening(cm2, np.ones(
            (nopen, nopen)))  ####remove line-like structures
        cm2 = remove_small_objects(cm2, min_size=500, connectivity=4)
        ####remove small objects
        sec_layer = np.sum(cm2) / len(
            cm2.ravel())  ###the amount of clouds in secondary layer
        if sec_layer < 5e-3:  ###too few pixels, no need to process secondary cloud layer
            v2 += [[np.nan, np.nan]]
            err.popleft()
            continue
        elif sec_layer > 1e-1:  ####there are significant amount of secondary layer clouds, we may need to re-run
            pass
            ####the cloud motion algorithm for the dominant cloud layer by masking out the secondary layer

#####obtain the mask for secondar cloud layer using a watershed-like algorithm
        mred = q[-2].rgb[:, :, 0].astype(np.float32) - st.fill_by_mean2(
            q[-2].rgb[:, :, 0], 200, mask=~cm2)
    def post_process(self):
        '''

        '''
        # Normalize energy
        self.energy = self._buffer['energy'][:]
        
        
        # BAD PATCH !!!
        self.energy[-1] = 0
        if self.energy.max():
            self.energy = self.energy / self.energy.max()

        silences = [1 if e < self.max_energy else 0 for e in self.energy]
        step = float(self.input_stepsize) / float(self.samplerate())

        path = os.path.split(__file__)[0]
        models_dir = os.path.join(path, 'trained_models')

        prototype1_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto1.dat')
        prototype2_file = os.path.join(models_dir,
                                       'irit_noise_startSilences_proto2.dat')

        prototype = numpy.load(prototype1_file)
        prototype2 = numpy.load(prototype2_file)

        # Lissage pour éliminer les petits segments dans un sens ou l'autre
        struct = [1] * len(prototype)
        silences = binary_closing(silences, struct)
        silences = binary_opening(silences, struct)

        seg = [0, -1, silences[0]]
        silencesList = []
        for i, v in enumerate(silences):
            if not (v == seg[2]):
                seg[1] = i
                silencesList.append(tuple(seg))
                seg = [i, -1, v]
        seg[1] = i
        silencesList.append(tuple(seg))
        segments = []
        start  = 0.0
        
        for s in silencesList:
            if s[2] == 1:
                shape = numpy.array(self.energy[s[0]:s[1]])

                d1, _ = computeDist2(prototype, shape)
                d2, _ = computeDist2(prototype2, shape)
                dist = min([d1, d2])

                if dist < self.threshold:
                    s = map(float, s)	
                    segments += [(start, s[0]*step-start, 1) , (s[0]*step, (s[1]-s[0])*step, 0)]
                    start = s[1]*step

        segments += [(start, len(self.energy)*step-start, 1)]
                
        label = {0: 'Start', 1: 'Session'}
        segs = self.new_result(data_mode='label', time_mode='segment')
        segs.id_metadata.id += '.' + 'segments'
        segs.id_metadata.name += ' ' + 'Segments'
        segs.data_object.label_metadata.label = label
        
        segs.data_object.time, segs.data_object.duration,	 segs.data_object.label= zip(*segments)

        self.add_result(segs)
Example #56
0
# normalize to the range 0-1
pixels /= 65535.000
# confirm the normalization
print('Min: %.3f, Max: %.3f' % (pixels.min(), pixels.max()))

dapi = pixels[0, :, :]
plt.imshow(dapi)
#plt.show()
GFP = pixels[1, :, :]
plt.imshow(GFP)

block_size = 21
nmask = threshold_local(dapi, block_size, offset=0.0004)
nmask2 = dapi > nmask
plt.imshow(nmask2)
nmask3 = binary_opening(nmask2, structure=np.ones((3, 3))).astype(np.float64)
plt.imshow(nmask3)

nmask4 = binary_fill_holes(nmask3)
plt.imshow(nmask4)
perimeter(nmask4, neighbourhood=4)
all_labels = measure.label(nmask4)
plt.imshow(all_labels)

label_objects, nb_labels = label(all_labels)
plt.imshow(label_objects)
sizes = np.bincount(label_objects.ravel())
#np.histogram(sizes, bins=10, range=None)
#plt.hist(sizes, bins='auto')
#plt.show()
mask_sizes = sizes > 250  #all the values larger than 250 is True
Example #57
0
File: 0424.py Project: ta-oyama/PCV

# 1.4.3 モルフォロジー
# 画像を読み込み、閾値処理で2値化する
im = np.array(Image.open("houses.png").convert("L"))
im = 1 * (im < 128)

labels, nbr_objects = measurements.label(im)
print "Number of objects:", nbr_objects
plt.figure()
plt.imshow(im)
plt.figure()
plt.imshow(labels)

# 物体の間がつながっている箇所を除去する
im_open = morphology.binary_opening(im, np.ones((9, 5)), iterations=2)

labels_open, nbr_objects_open = measurements.label(im_open)
print "Number of objects:", nbr_objects_open
plt.figure()
plt.imshow(im_open)
plt.figure()
plt.imshow(labels_open)


# 1.4.4.1 .matファイルを読み書きする
# .matファイルを保存
x = [1, 2]
data = {}
data["x"] = x
scipy.io.savemat("test.mat", data)
Example #58
0
def watershed(a,
              seeds=None,
              connectivity=1,
              mask=None,
              smooth_thresh=0.0,
              smooth_seeds=False,
              minimum_seed_size=0,
              dams=False,
              override_skimage=False,
              show_progress=False):
    """Perform the watershed algorithm of Vincent & Soille (1991).
    
    Parameters
    ----------
    a : np.ndarray, arbitrary shape and type
        The input image on which to perform the watershed transform.
    seeds : np.ndarray, int or bool type, same shape as `a` (optional)
        The seeds for the watershed. If provided, these are the only basins
        allowed, and the algorithm proceeds by flooding from the seeds.
        Otherwise, every local minimum is used as a seed.
    connectivity : int, {1, ..., a.ndim} (optional, default 1)
        The neighborhood of each pixel, defined as in `scipy.ndimage`.
    mask : np.ndarray, type bool, same shape as `a`. (optional)
        If provided, perform watershed only in the parts of `a` that are set
        to `True` in `mask`.
    smooth_thresh : float (optional, default 0.0)
        Local minima that are less deep than this threshold are suppressed,
        using `hminima`.
    smooth_seeds : bool (optional, default False)
        Perform binary opening on the seeds, using the same connectivity as
        the watershed.
    minimum_seed_size : int (optional, default 0)
        Remove seed regions smaller than this size.
    dams : bool (optional, default False)
        Place a dam where two basins meet. Set this to True if you require
        0-labeled boundaries between different regions.
    override_skimage : bool (optional, default False)
        skimage.morphology.watershed is used to implement the main part of the
        algorithm when `dams=False`. Use this flag to use the separate pure
        Python implementation instead.
    show_progress : bool (optional, default False)
        Show a cute little ASCII progress bar (using the progressbar package)

    Returns
    -------
    ws : np.ndarray, same shape as `a`, int type.
        The watershed transform of the input image.
    """
    seeded = seeds is not None
    sel = generate_binary_structure(a.ndim, connectivity)
    # various keyword arguments operate by modifying the input image `a`.
    # However, we operate on a copy of it called `b`, so that `a` can be used
    # to break ties.
    b = a
    if not seeded:
        seeds = regional_minima(a, connectivity)
    if minimum_seed_size > 0:
        seeds = remove_small_connected_components(seeds,
                                                  minimum_seed_size,
                                                  in_place=True)
        seeds = relabel_from_one(seeds)[0]
    if smooth_seeds:
        seeds = binary_opening(seeds, sel)
    if smooth_thresh > 0.0:
        b = hminima(a, smooth_thresh)
    if seeds.dtype == bool:
        seeds = label(seeds, sel)[0]
    if skimage_available and not override_skimage and not dams:
        return skimage.morphology.watershed(b, seeds, sel, None, mask)
    elif seeded:
        b = impose_minima(a, seeds.astype(bool), connectivity)
    levels = unique(b)
    a = pad(a, a.max() + 1)
    b = pad(b, b.max() + 1)
    ar = a.ravel()
    br = b.ravel()
    ws = pad(seeds, 0)
    wsr = ws.ravel()
    neighbors = build_neighbors_array(a, connectivity)
    level_pixels = build_levels_dict(b)
    if show_progress: wspbar = ip.StandardProgressBar('Watershed...')
    else: wspbar = ip.NoProgressBar()
    for i, level in ip.with_progress(enumerate(levels),
                                     pbar=wspbar,
                                     length=len(levels)):
        idxs_adjacent_to_labels = queue(
            [idx for idx in level_pixels[level] if any(wsr[neighbors[idx]])])
        while len(idxs_adjacent_to_labels) > 0:
            idx = idxs_adjacent_to_labels.popleft()
            if wsr[idx] > 0: continue  # in case we already processed it
            nidxs = neighbors[idx]  # neighbors
            lnidxs = nidxs[(wsr[nidxs] != 0).astype(bool)]  # labeled neighbors
            adj_labels = unique(wsr[lnidxs])
            if len(adj_labels) == 1 or len(adj_labels) > 1 and not dams:
                # assign a label
                wsr[idx] = wsr[lnidxs][ar[lnidxs].argmin()]
                idxs_adjacent_to_labels.extend(
                    nidxs[((wsr[nidxs] == 0) *
                           (br[nidxs] == level)).astype(bool)])
    return juicy_center(ws)
Example #59
0
    def segment(self, fill_holes=False, edt_sampling=(3,1,1),
                edt_smooth=[1,3,3]):
        """Segment objects within the image according to attributes provided.

        Yields: a PexSegmentObj containing segmented objects as well as all
            images generated during segmentation (for post-hoc analysis) as
            well as relevant values, e.g. numbers and names of segmented
            particles. See PexSegmentObj documentation for more details.
        """
        starttime = time.time() # begin timing
        f_directory = os.getcwd()
        pdout = [] # list of PexSegmentObj attributes to pass to pandas for csv
        # data import
        if self.filename != '':
            print('reading' + self.filename)
            raw_img = io.imread(self.filename)
        elif self.src_data is not None:
            raw_img = self.src_data
        print('raw image imported.')
        if self.seg_method == 'pre-thresholded':
            gaussian_img = raw_img
        else:
            # gaussian filter
            print('performing gaussian filtering...')
            gaussian_img = gaussian_filter(raw_img,
                                           [self.g_z, self.g_xy, self.g_xy])
            print('Image smoothed.')
        print('preprocessing complete.')
        ## SEGMENTATION BY THRESHOLDING THE GAUSSIAN ##
        if self.seg_method == 'threshold':
            # binary thresholding and cleanup
            print('thresholding...')
            threshold_img = np.copy(gaussian_img)
            if self.mode == 'threshold':
                print('mode = threshold.')
                # make binary image
                threshold_img[threshold_img < self.threshold] = 0
                threshold_img[threshold_img > 0] = 1
                print('thresholding complete.')
                if fill_holes:
                    print('filling holes in objects.')
                    for i in range(0,threshold_img.shape[0]):
                        threshold_img[i, :, :] = binary_fill_holes(
                            threshold_img[i, :, :])
            elif self.mode == 'bg_scaled':
                print('mode = background-scaled.')
                self.thresholds = {}
                threshold_img = np.zeros(shape = raw_img.shape)
                for i in self.cells.obj_nums:
                    if i == 0:
                        pass
                    else:
                        print('thresholding cell ' + str(i))
                        # get median for the cell
                        cell_median = np.median(gaussian_img[self.cells.final_cells == i])
                        # generate the thresholded binary mask for each cell
                        threshold_img[np.logical_and(self.cells.final_cells == i,
                                      gaussian_img > cell_median + self.bg_diff)] = 1
                        self.thresholds[i] = cell_median + self.bg_diff #store val
                print('thresholding complete.')
            else:
                raise ValueError('mode parameter must be bg_scaled or threshold.')
            # distance and maxima transformation to find objects
            # next two steps assume 100x objective and 0.2 um slices
            print('generating distance map...')
            dist_map = distance_transform_edt(threshold_img, sampling = edt_sampling)
            print('distance map complete.')
            print('smoothing distance map...')
            # smooth the distance map
            smooth_dist = gaussian_filter(dist_map, edt_smooth)
            print('distance map smoothed.')
            print('identifying maxima...')
            # find local maxima in the smoothed distance map
            # these will be the watershed seeds
            max_strel = generate_binary_structure(3,2)
            maxima = maximum_filter(smooth_dist,
                                    footprint = max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d, structure = max_strel,
                                         border_value = 1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask = threshold_img)
            print('watershedding complete.')
            if self.mode == 'bg_scaled':
                # find cell boundaries and define objects that are on the
                # edges, then assign segmented objects to parent cells
                edge_struct = generate_binary_structure(3,1)
                self.c_edges = {}
                print('finding edges of cells...')
                for i in self.cells.obj_nums:
                    self.c_edges[i] = np.logical_xor(self.cells.final_cells == i,
                                                          binary_erosion(self.cells.final_cells== i,
                                                                         edge_struct))
                print('cell edges found.')
                self.primary_objs = [x for x in np.unique(peroxisomes) if x != 0]
                self.parent = {}
                self.obj_edges = {}
                self.on_edge = {}
                pex_mask = peroxisomes != 0
                for obj in self.primary_objs:
                    self.parent[obj] = self.cells.final_cells[labs == obj][0]
                    obj_mask = peroxisomes == obj
                    obj_edge = np.logical_xor(obj_mask,
                                              binary_erosion(obj_mask,
                                                             edge_struct))
                    self.obj_edges[obj] = obj_edge
                    # test if the object's edge and its cell's edge overlap
                    if np.any(np.logical_and(obj_edge,
                                             self.c_edges[self.parent[obj]])):
                        self.on_edge[obj] = True
                        print('object on the edge: ' + str(obj))
                        print('parent cell: ' + str(self.parent[obj]))
                        new_obj = obj_mask
                        search_obj = obj_mask
                        tester = 0
                        iteration = 1
                        while tester == 0:
                            # TODO: FIX THIS BLOCK OF CODE! GETTING STUCK WITHIN
                            # IT! NOT SURE HOW MANY ITERATIONS ITS DOING, OR FOR
                            # HOW MANY DIFFERENT PEROXISOMES.
                            new_px = binary_dilation(search_obj, edge_struct)
                            new_px[np.logical_or(new_obj, pex_mask)] = False
                            print('iteration: ' + str(iteration))
                            # print('new pixels for iteration ' + str(iteration) + \
                            #      ': ')
                            # print(np.nonzero(new_px))
                            if np.any(gaussian_img[new_px] >
                                      self.thresholds[self.parent[obj]]):
                                to_add = np.logical_and(new_px, gaussian_img >
                                                        self.thresholds[self.parent[obj]])
                                new_obj = np.logical_or(new_obj, to_add)
                            #    print('object pixels after iteration '
                            #          + str(iteration) + ': ')
                            #    print(np.nonzero(new_obj))
                                search_obj = to_add # only search from new pixels
                            else:
                                peroxisomes[new_obj] = obj
                                tester = 1
                            iteration = iteration + 1
                    else:
                        self.on_edge[obj] = False
        elif self.seg_method == 'canny':
            ## EDGE-DETECTION BASED SEGMENTATION ##
            threshold_img = np.empty_like(gaussian_img)
            edge_img = np.empty_like(gaussian_img)
            c_strel = generate_binary_structure(2,1)
            # perform canny edge detection on each slice s
            for s in range(0,gaussian_img.shape[0]):
                if self.mode == 'absolute':
                    c = canny(gaussian_img[s, :, :],
                              sigma=0,
                              low_threshold=self.low_threshold,
                              high_threshold=self.high_threshold)
                elif self.mode == 'scaled':
                    c = canny(gaussian_img[s, :, :],
                              sigma=0,
                              low_threshold=self.low_threshold,
                              high_threshold=self.high_threshold,
                              use_quantiles=True)
                # clean up object edges that have gaps
                c = binary_closing(c,c_strel)
                edge_img[s,:,:] = np.copy(c)
                # fill holes to generate binary mask of objects
                c = binary_fill_holes(c)
                c = binary_opening(c, c_strel) # eliminate incomplete lines
                threshold_img[s,:,:] = c
            print('generating distance map...')
            dist_map = distance_transform_edt(threshold_img, sampling = (3,1,1))
            print('distance map complete.')
            print('smoothing distance map...')
            smooth_dist = gaussian_filter(dist_map, [1,2,2])
            print('distance map smoothed.')
            print('identifying maxima...')
            max_strel = generate_binary_structure(3,2)
            # identify local maxima (these will be the seed points for
            # watershed segmentation)
            maxima = maximum_filter(smooth_dist,
                                    footprint = max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d, structure = max_strel,
                                         border_value = 1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask = threshold_img)
            print('watershedding complete.')
            if hasattr(self, 'cells'):
                # assign segmented objects to cells if a CellSegmentObj was
                # included
                self.primary_objs = [x for x in np.unique(peroxisomes) \
                                     if x != 0]
                self.parent = {}
                for obj in self.primary_objs:
                    o_parent = self.cells.final_cells[labs == obj][0]
                    if o_parent == 0:
                        self.primary_objs.remove(obj)
                    else:
                        self.parent[obj] = o_parent
        elif self.seg_method == 'pre-thresholded':
            threshold_img = np.copy(gaussian_img)
            if fill_holes:
                print('filling holes in objects.')
                for i in range(0, threshold_img.shape[0]):
                    threshold_img[i, :, :] = binary_fill_holes(
                        threshold_img[i, :, :])
                print('holes filled.')
            dist_map = distance_transform_edt(threshold_img,
                                              sampling=edt_sampling)
            print('distance map complete.')
            print('smoothing distance map...')
            # smooth the distance map
            smooth_dist = gaussian_filter(dist_map, edt_smooth)
            print('distance map smoothed.')
            print('identifying maxima...')
            # find local maxima in the smoothed distance map
            # these will be the watershed seeds
            max_strel = generate_binary_structure(3, 2)
            maxima = maximum_filter(smooth_dist,
                                    footprint=max_strel) == smooth_dist
            # clean up background and edges
            bgrd_3d = smooth_dist == 0
            eroded_bgrd = binary_erosion(bgrd_3d, structure= max_strel,
                                         border_value=1)
            maxima = np.logical_xor(maxima, eroded_bgrd)
            print('maxima identified.')
            # watershed segmentation
            labs = self.watershed_labels(maxima)
            print('watershedding...')
            peroxisomes = watershed(-smooth_dist, labs, mask=threshold_img)
            print('watershedding complete.')
        # Sometimes the watershedding algorithm inaccurately separates objects
        # on different Z-slices. The next section merges objects with
        # significant overlap
        for s in range(1,peroxisomes.shape[0]):
            cslice = peroxisomes[s,:,:]
            lslice = peroxisomes[s-1,:,:]
            for obj in np.unique(cslice)[np.unique(cslice)!= 0]:
                lslice_vals, cts = np.unique(lslice[cslice == obj],
                                             return_counts = True)
                lslice_vals = lslice_vals.tolist()
                cts = cts.tolist()
                ordered_by_ct = sorted(zip(lslice_vals, cts),
                                       key = itemgetter(1))
                if ordered_by_ct[-1][0] == 0 or ordered_by_ct[-1][0] == obj:
                    continue
                else:
                    # if >75% of pixels in the slice below obj are from another
                    # object, change obj to that object #
                    if float(ordered_by_ct[-1][1])/cslice[cslice == obj].size>0.5:
                        peroxisomes[s,:,:][cslice == obj] = ordered_by_ct[-1][0]
        obj_nums, volumes = np.unique(peroxisomes, return_counts=True)
        volumes = dict(zip(obj_nums.astype('uint16'), volumes))
        # remove the background
        del volumes[0]
        obj_nums = obj_nums.astype('uint16').tolist()
        obj_nums.remove(0)
        # generate dict of relevant parameters to pass to PexSegmentObj
        mode_params = {}
        if hasattr(self, 'parent'):
            pdout.append('parent')
            mode_params['parent'] = self.parent
        if self.seg_method == 'canny':
            mode_params['high_threshold'] = self.high_threshold
            mode_params['low_threshold'] = self.low_threshold
            mode_params['edges'] = edge_img
            pdout.append('volumes')
        if self.seg_method == 'threshold':
            if self.mode == 'threshold':
                mode_params['threshold'] = self.threshold
                pdout.append('volumes')
            elif self.mode == 'bg_scaled':
                mode_params['thresholds'] = self.thresholds
                mode_params['bg_diff'] = self.bg_diff
                mode_params['cells'] = self.cells
                mode_params['cell_edges'] = self.c_edges
                mode_params['cell_nums'] = self.cells.obj_nums
                mode_params['obj_edges'] = self.obj_edges
                mode_params['on_edge'] = self.on_edge
                for x in ['thresholds','on_edge','parent', 'volumes']:
                    pdout.append(x)
        return PexSegmentObj(f_directory, self.filename, raw_img,
                             gaussian_img, self.seg_method, self.mode,
                             threshold_img, dist_map, smooth_dist, maxima,
                             labs, peroxisomes, obj_nums, volumes,
                             to_pdout=pdout, mode_params=mode_params)
    def heart_segmentation(self, heart_threshold=0, top_threshold=200):
        a = self.convolve_structure_heart()
        seg_prub = np.array(
            self.segmentation == self.slab['rlung']) + np.array(
                self.segmentation == self.slab['llung'])

        logger.debug('pred konvoluci')
        seg_prub = filters.convolve((seg_prub - 0.5), a)
        logger.debug('po konvoluci')

        # import sed3
        # ed = sed3.sed3(seg_prub)
        # ed.show()
        if self.smer == 0:
            seg_prub = np.array(seg_prub <= -0.3)
        else:
            seg_prub = np.array(seg_prub <= 0.3)
        cc = self.__above_diaphragm_calculation(seg_prub)
        #ipdb.set_trace()
        plice1 = np.array(self.segmentation == self.slab['llung'])
        z, x, y = np.nonzero(plice1)

        x1 = [0, 0, 0, 0]
        y1 = [0, 0, 0, 0]
        z1 = [0, 0, 0, 0]
        x1[0] = np.min(x)
        x1[1] = np.max(x)
        y1[0] = np.min(y)
        y1[1] = np.max(y)
        z1[0] = np.max(z)
        z1[1] = np.min(z)
        plice2 = np.array(self.segmentation == self.slab['rlung'])
        z, x, y = np.nonzero(plice2)
        x1[2] = np.min(x)
        x1[3] = np.max(x)
        y1[2] = np.min(y)
        y1[3] = np.max(y)
        z1[2] = np.max(z)
        z1[3] = np.min(z)
        mp = np.zeros(self.segmentation.shape)
        xmin = np.min(x1)
        xmax = np.max(x1)
        ymin = np.min(y1)
        ymax = np.max(y1)
        zmin = np.min(z1)
        zmax = np.max(z1)
        if self.smer == 0:
            mp[zmin:, xmin:xmax, ymin:ymax] = 1
        else:
            mp[:zmax, xmin:xmax, ymin:ymax] = 1

        bones = np.array(self.data3d >= top_threshold)
        aaa = np.array(self.data3d >= heart_threshold)
        aaa = aaa - bones
        logger.debug('pred binary opening')
        aaa = morphology.binary_opening(aaa, iterations=self.iteration() +
                                        2).astype(self.segmentation.dtype)
        aaa = morphology.binary_erosion(aaa, iterations=self.iteration())
        aaa = cc * aaa * mp

        lab, num = label(aaa)
        counts = [0] * (num + 1)
        for x in range(1, num + 1):
            a = np.sum(np.array(lab == x))
            counts[x] = a
        index = np.argmax(counts)
        aaa = np.array(lab == index)
        logger.debug('pred dilataci')
        aaa = morphology.binary_dilation(aaa, iterations=self.iteration())
        #self.segmentation= aaa
        self.segmentation = self.segmentation + aaa.astype(
            np.int8) * self.slab['heart']