def run(self, ips, imgs, para=None): lab = WindowsManager.get(para['lab']).ips.get_img() if lab.dtype != np.uint8 and lab.dtype != np.uint16: IPy.alert('Label image must be in type 8-bit or 16-bit') return index = range(1, lab.max() + 1) data = [index] img = ips.get_img() if img is lab: img = img > 0 if para['mode'] == 'Center': pos = np.round(ndimage.center_of_mass(img, lab, index), 2)[:, ::-1] data.append(pos[:, 0]) data.append(pos[:, 1]) if para['mode'] == 'Max': pos = np.round(ndimage.maximum_position(img, lab, index), 2)[:, ::-1] data.append(pos[:, 0]) data.append(pos[:, 1]) if para['mode'] == 'Min': pos = np.round(ndimage.minimum_position(img, lab, index), 2)[:, ::-1] data.append(pos[:, 0]) data.append(pos[:, 1]) body = [tuple(i) for i in pos] ips.roi = PointRoi(body)
def test_minimum_position06(): "minimum position 6" labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, 2) assert_equal(output, (0, 1))
def run(self, ips, imgs, para=None): lab = WindowsManager.get(para['lab']).ips.get_img() if lab.dtype != np.uint8 and lab.dtype != np.uint16: IPy.alert('Label image must be in type 8-bit or 16-bit') return index = range(1, lab.max() + 1) titles = ['Center-X', 'Center-Y', 'Max-X', 'Max-Y', 'Min-X', 'Min-Y'] key = { 'Max-X': 'max', 'Max-Y': 'max', 'Min-X': 'min', 'Min-Y': 'min', 'Center-X': 'center', 'Center-Y': 'center' } titles = ['value'] + [i for i in titles if para[key[i]]] data = [index] img = ips.get_img() if img is lab: img = img > 0 if para['center']: pos = np.round(ndimage.center_of_mass(img, lab, index), 2) data.append(pos[:, 0]) data.append(pos[:, 1]) if para['max']: pos = np.round(ndimage.minimum_position(img, lab, index), 2) data.append(pos[:, 0]) data.append(pos[:, 1]) if para['min']: pos = np.round(ndimage.maximum_position(img, lab, index), 2) data.append(pos[:, 0]) data.append(pos[:, 1]) data = zip(*data) IPy.table(ips.title + '-position', data, titles)
def test_minimum_position04(): "minimum position 4" input = np.array([[5, 4, 2, 5], [3, 7, 1, 2], [1, 5, 1, 1]], bool) output = ndimage.minimum_position(input) assert_equal(output, (0, 0))
def test_minimum_position05(): "minimum position 5" labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 2, 3]], type) output = ndimage.minimum_position(input, labels) assert_equal(output, (2, 0))
def test_minimum_position01(): "minimum position 1" labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output = ndimage.minimum_position(input, labels=labels) assert_equal(output, (0, 0))
def test_minimum_position02(): for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input) assert_equal(output, (1, 2))
def test_minimum_position07(): labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, [2, 3]) assert_equal(output[0], (0, 1)) assert_equal(output[1], (1, 2))
def test_minimum_position06(): labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, 2) assert_equal(output, (0, 1))
def test_minimum_position05(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 2, 3]], type) output = ndimage.minimum_position(input, labels) assert_equal(output, (2, 0))
def _forwardImplementation(self, inbuf, outbuf): """ assigns one of the neurons to the input given in inbuf and writes the neuron's coordinates to outbuf. """ # calculate the winner neuron with lowest error (square difference) self.difference = self.neurons - tile(inbuf, (self.nNeurons, self.nNeurons, 1)) error = sum(self.difference ** 2, 2) self.winner = array(minimum_position(error)) if not self.outputFullMap: outbuf[:] = self.winner
def find_center(img): def diagSum(a, d=1): #sum intensity values across a diagonal w, h = a.shape l = (np.tile(range(h), (w, 1))) * d + (np.tile(range(w), (h, 1))).T return nd.sum(a, l, list(range(np.amin(l), np.amax(l) + 1))) def diagMean(a, d=1, mask=None): #obtain average intensity value across a diagonal if mask is None: mask = np.ones_like(a) num = diagSum(mask, d) return np.divide(diagSum(a, d), num, where=num != 0) #filter definitions gx = np.array([[-1., 0., 1.], [-2., 0., 2.], [-1., 0., 1.]]) gy = np.transpose(gx) r2 = .5**.5 sxy = gx * r2 + gy * r2 syx = gx * r2 - gy * r2 w, h = img.shape #gaussian blur blur = nd.gaussian_filter(img, sigma=1) #set up sobel filters dx = nd.convolve(blur, weights=gx) #horizontal dy = nd.convolve(blur, weights=gy) #vertical dxy = nd.convolve(blur, weights=sxy) # dyx = nd.convolve(blur, weights=syx) # #step 1. sobel filters sob1 = np.hypot(dx, dy) sob2 = np.hypot(dx, -dy) sob3 = np.hypot(dxy, dyx) sob4 = np.hypot(dxy, -dyx) #step 2. blur bm1 = nd.gaussian_filter(np.abs(sob1).astype(img.dtype), sigma=10) bm2 = nd.gaussian_filter(np.abs(sob2).astype(img.dtype), sigma=10) bm3 = nd.gaussian_filter(np.abs(sob3).astype(img.dtype), sigma=10) bm4 = nd.gaussian_filter(np.abs(sob4).astype(img.dtype), sigma=10) #step 3. intensity along line through middle of region a1 = diagMean(bm1, -1) a2 = diagMean(bm2) a3 = bm3.mean(1, keepdims=False) a4 = bm4.mean(0, keepdims=False) #step 4. replace pixels with average intensity along line aa1 = np.tile(a1, (w + h - 2, 1)) aa1 = np.reshape(aa1, (-1, w + h - 2))[-w:, :h] aa2 = np.tile(a2, (w + h, 1)) aa2 = np.reshape(aa2, (-1, w + h))[:w, :h] aa3 = np.tile(a3, (h, 1)).T aa4 = np.tile(a4, (w, 1)) #step 5. sum up images a12 = aa1 * aa2**2 a34 = aa3 * aa4**2 a1234 = a12 + a34 a1234 = nd.gaussian_filter(a1234, sigma=25) p = nd.minimum_position(a1234) #center return p
def test_extrema02(): labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=2) output2 = ndimage.minimum(input, labels=labels, index=2) output3 = ndimage.maximum(input, labels=labels, index=2) output4 = ndimage.minimum_position(input, labels=labels, index=2) output5 = ndimage.maximum_position(input, labels=labels, index=2) assert_equal(output1, (output2, output3, output4, output5))
def calculate_nema_uniformity(imagearray, resamplesize, results, domecorrection=False): """ Wrapper function for flood calculation according to NEMA recommendations. Input: imagearray : NxN numpy input array resamplesize : downsample size (MxM), typically (64,64) results : instance of PluginData-class (container for generated results) domecorrection : Perform dome correction? [True, False] Dome correction can be used for intrinsic uniformity measurements (e.g. with Siemens camera's) where the distance between point-source and detector is smaller than 5 times the maximum FOV dimension. """ if domecorrection == True: print 'Performing dome-correction...' imagearray = dome_correction(imagearray) IUufov = 0 IUcfov = 0 DUxufov = 0 DUyufov = 0 DUxcfov = 0 DUycfov = 0 imshape = np.shape(imagearray) try: ufov, cfov = nema_data_preprocess(imagearray, resamplesize) except: print "warning: could not preprocess ufov, cfov" ufov, cfov = np.ones((resamplesize)) ufov.fill_value = 0 cfov.fill_value = 0 #unifcalc = lambda arr: 100*(ma.max(arr) - ma.min(arr))/(ma.max(arr) + ma.min(arr)) unifxy_min = lambda arr: ndimage.minimum_position(arr) unifxy_max = lambda arr: ndimage.maximum_position(arr) IUufov = 100 * unifcalc(ufov) IUufov_min = unifxy_min(ufov) IUufov_max = unifxy_max(ufov) IUcfov = 100 * unifcalc(cfov) IUcfov_min = unifxy_min(cfov) IUcfov_max = unifxy_max(cfov) DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord = diff_data(ufov) DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord = diff_data(cfov) output = DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord, DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord, IUufov, IUcfov, ufov, cfov return output
def test_extrema01(): "extrema 1" labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels) output2 = ndimage.minimum(input, labels=labels) output3 = ndimage.maximum(input, labels=labels) output4 = ndimage.minimum_position(input, labels=labels) output5 = ndimage.maximum_position(input, labels=labels) assert_equal(output1, (output2, output3, output4, output5))
def test_minimum_position07(): "minimum position 7" labels = [1, 2, 3, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], type) output = ndimage.minimum_position(input, labels, [2, 3]) assert_equal(output[0], (0, 1)) assert_equal(output[1], (1, 2))
def test_extrema01(): labels = np.array([1, 0], bool) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels) output2 = ndimage.minimum(input, labels=labels) output3 = ndimage.maximum(input, labels=labels) output4 = ndimage.minimum_position(input, labels=labels) output5 = ndimage.maximum_position(input, labels=labels) assert_equal(output1, (output2, output3, output4, output5))
def calculate_nema_uniformity (imagearray, resamplesize, results, domecorrection=False): """ Wrapper function for flood calculation according to NEMA recommendations. Input: imagearray : NxN numpy input array resamplesize : downsample size (MxM), typically (64,64) results : instance of PluginData-class (container for generated results) domecorrection : Perform dome correction? [True, False] Dome correction can be used for intrinsic uniformity measurements (e.g. with Siemens camera's) where the distance between point-source and detector is smaller than 5 times the maximum FOV dimension. """ if domecorrection == True: print 'Performing dome-correction...' imagearray = dome_correction(imagearray) IUufov = 0 IUcfov = 0 DUxufov = 0 DUyufov = 0 DUxcfov = 0 DUycfov = 0 imshape = np.shape(imagearray) try: ufov, cfov = nema_data_preprocess(imagearray,resamplesize) except: print "warning: could not preprocess ufov, cfov" ufov, cfov = np.ones((resamplesize)) ufov.fill_value=0 cfov.fill_value=0 #unifcalc = lambda arr: 100*(ma.max(arr) - ma.min(arr))/(ma.max(arr) + ma.min(arr)) unifxy_min = lambda arr: ndimage.minimum_position(arr) unifxy_max = lambda arr: ndimage.maximum_position(arr) IUufov = 100*unifcalc(ufov) IUufov_min = unifxy_min(ufov) IUufov_max = unifxy_max(ufov) IUcfov = 100*unifcalc(cfov) IUcfov_min = unifxy_min(cfov) IUcfov_max = unifxy_max(cfov) DUxufov_val,DUyufov_val, DUxufov_coord, DUyufov_coord = diff_data(ufov) DUxcfov_val,DUycfov_val, DUxcfov_coord, DUycfov_coord = diff_data(cfov) output = DUxufov_val, DUyufov_val, DUxufov_coord, DUyufov_coord, DUxcfov_val, DUycfov_val, DUxcfov_coord, DUycfov_coord, IUufov, IUcfov, ufov, cfov return output
def test_extrema04(): labels = [1, 2, 0, 4] for type in types: input = np.array([[5, 4, 2, 5], [3, 7, 8, 2], [1, 5, 1, 1]], type) output1 = ndimage.extrema(input, labels, [1, 2]) output2 = ndimage.minimum(input, labels, [1, 2]) output3 = ndimage.maximum(input, labels, [1, 2]) output4 = ndimage.minimum_position(input, labels, [1, 2]) output5 = ndimage.maximum_position(input, labels, [1, 2]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5)
def find_vortices(cloud, hp = 3, lp = 30, thresh = -0.5, rad = 3, \ showplots = True): bp = bandpass(cloud, hp, lp) th = minfilt_thresh(bp, thresh, rad) limg, numvort = ndi.label(th) vpts = ndi.minimum_position(bp,limg, index = range(1, numvort+1)) if showplots: plt.figure(100) plt.clf() plt.imshow(bp) for point in vpts: plt.plot(point[1], point[0], 'x', ms = 10, mec = 'white', mew = 2) plt.axis([0, bp.shape[0], 0, bp.shape[1]]) return numvort, vpts
def test_extrema02(): "extrema 2" labels = np.array([1, 2]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=2) output2 = ndimage.minimum(input, labels=labels, index=2) output3 = ndimage.maximum(input, labels=labels, index=2) output4 = ndimage.minimum_position(input, labels=labels, index=2) output5 = ndimage.maximum_position(input, labels=labels, index=2) assert_equal(output1, (output2, output3, output4, output5))
def test_extrema03(): labels = np.array([[1, 2], [2, 3]]) for type in types: input = np.array([[1, 2], [3, 4]], type) output1 = ndimage.extrema(input, labels=labels, index=[2, 3, 8]) output2 = ndimage.minimum(input, labels=labels, index=[2, 3, 8]) output3 = ndimage.maximum(input, labels=labels, index=[2, 3, 8]) output4 = ndimage.minimum_position(input, labels=labels, index=[2, 3, 8]) output5 = ndimage.maximum_position(input, labels=labels, index=[2, 3, 8]) assert_array_almost_equal(output1[0], output2) assert_array_almost_equal(output1[1], output3) assert_array_almost_equal(output1[2], output4) assert_array_almost_equal(output1[3], output5)
def get_seeds(centers, embeddings, mask) -> np.ndarray: '''determine the location of the closest point in embeddings (within the mask) to each center. ''' # We use KDTree to find the closest center for each # foreground location, then we search for the minimum within # this partition. tree = KDTree(centers, leaf_size=1) dist, ind = tree.query(embeddings[mask], k=1) cond_dist = np.zeros(mask.shape) cond_dist[mask] = dist.squeeze() regions = np.zeros(mask.shape) regions[mask] = ind.squeeze() + 1 return minimum_position(cond_dist, labels=regions, index=list(range(1, len(centers) + 1)))
def find_all_nconnected(data, thres, find_segs=False, diag=False): """ Find all negatively connected segments in data. Parameters ---------- data : ndarray Data to perform segmentation on. thres : float Threshold, below this nodes are considered noise. find_segs : bool, optional True to return a list of slices for the segments. diag : bool True to include diagonal neighbors in connection. Returns ------- locations : list List of indicies of local maximum in each segment. seg_slices : list, optional List of slices which extract a given segment from the data. Only returned when fig_segs is True. """ # build structure array for defining feature connections ndim = data.ndim if diag: structure = ndimage.generate_binary_structure(ndim, ndim) else: structure = ndimage.generate_binary_structure(ndim, 1) # determine labeled array of segments labels, num_features = label_nconnected(data, thres, structure) # determine locations of segment maxima locations = ndimage.minimum_position(data, labels, range(1, num_features + 1)) # find segment slices if requested and return if find_segs is True: seg_slices = ndimage.find_objects(labels) return locations, seg_slices else: return locations
def find_all_upward(data, thres, find_segs=False, diag=False): """ Find all upward connected segments in data Parameters: * data Array of data to perform segmentation on. * thres Threshold, below this nodes are considered noise. * find_segs True or False to return a list of slices for the segments. * diag True or False to include diagonal neighbors in connection. Returns: locations,[seg_slices] * locations List of indicies of local maximum in each segment. * seg_slices List of slices which extract a given segment from the data. Only returned when fig_segs is True. """ # build structure array for defining feature connections ndim = data.ndim if diag: structure = ndimage.generate_binary_structure(ndim, ndim) else: structure = ndimage.generate_binary_structure(ndim, 1) # determine labeled array of segments labels, num_features = label_upward(data, thres, structure) # determine locations of segment maxima locations = ndimage.minimum_position(data, labels, range(1, num_features + 1)) # find segment slices if requested and return if find_segs == True: seg_slices = ndimage.find_objects(labels) return locations, seg_slices else: return locations
def fit_labeled_srcs(fmap, labels, inds, extended_threshold=1.1): # Our normal fit is based on the center of mass. This is # probably a bit suboptimal for faint sources, but those will # be pretty bad anyway. pos_com = np.array(ndimage.center_of_mass(fmap, labels, inds)) amp_com = fmap.at(pos_com.T, unit="pix") negative= amp_com < 0 # We compare these amplitudes with the maxima. Normally these # will be very close. If they are significantly different, then # this is probably an extended object. To allow the description # of these objects as a sum of sources, it's most robust to use # the maximum positions and amplitudes here. pos_max = np.array(ndimage.maximum_position(fmap, labels, inds)) amp_max = np.array(ndimage.maximum(fmap, labels, inds)) pos_min = np.array(ndimage.minimum_position(fmap, labels, inds)) amp_min = np.array(ndimage.minimum(fmap, labels, inds)) pos_ext = pos_max.copy(); pos_ext[negative] = pos_min[negative] amp_ext = amp_max.copy(); amp_ext[negative] = amp_min[negative] pos, amp = pos_com.copy(), amp_com.copy() extended = np.abs(amp_ext) > np.abs(amp_com)*extended_threshold pos[extended] = pos_max[extended] amp[extended] = amp_max[extended] return pos, amp
def test_minimum_position03(): input = np.array([[5, 4, 2, 5], [3, 7, 0, 2], [1, 5, 1, 1]], bool) output = ndimage.minimum_position(input) assert_equal(output, (1, 2))
def save_imgmap(inarray,vertmax,hormax,filename): """ Converts input-array to image, with superposed the 5-pixel row/column with highest DU, the minimal and maximum pixel coordinates. input: inarray = ufov or cfov vertmax = first coordinate of 5-pixel column (vertical) hormax = first coordinate of 5-pixel row (horizontal) filename = png-filename of output """ wi,he = np.shape(inarray) rgb = np.zeros((wi, he, 3), dtype=np.uint8) _max=np.max(inarray) _min=np.min(inarray)*0.95 grayvalue = np.round(255/(_max-_min)*(ma.filled(inarray,fill_value=0)-_min)) grayvalue[grayvalue<0]=0 rgb[:,:, 0] = grayvalue rgb[:,:, 1] = grayvalue rgb[:,:, 2] = grayvalue rgb[vertmax[0]:vertmax[0]+5,vertmax[1],:] = (255,150,50) # orange rgb[hormax[0],hormax[1]:hormax[1]+5,:] = (0,200,0) # green minpos=ndimage.minimum_position(inarray) maxpos=ndimage.maximum_position(inarray) rgb[minpos[0], minpos[1], :] = (0,0,255) # blue rgb[maxpos[0], maxpos[1], :] = (255,0,0) # red rgb = np.zeros((wi, he, 3), dtype=np.uint8) _max=np.max(inarray) _min=np.min(inarray)*0.95 grayvalue = np.round(255/(_max-_min)*(ma.filled(inarray,fill_value=0)-_min)) grayvalue[grayvalue<0]=0 rgb[:,:, 0] = grayvalue rgb[:,:, 1] = grayvalue rgb[:,:, 2] = grayvalue rgb = rgb.repeat(16, axis=0).repeat(16, axis=1) # d=line thickness of roi d=4 # 5 pixels in vertical direction, starting from 16*(vertmax[0],vertmax[1]) # left edge rgb[16*vertmax[0]:16*(vertmax[0]+5),16*vertmax[1]:16*vertmax[1]+d,:] = (255,150,50) # orange # right edge rgb[16*vertmax[0]:16*(vertmax[0]+5),16*(vertmax[1]+1)-d:16*(vertmax[1]+1),:] = (255,150,50) # orange # top edge rgb[16*vertmax[0]:16*vertmax[0]+d,16*vertmax[1]:16*(vertmax[1]+1)-1,:] = (255,150,50) # orange # bottom edge rgb[16*(vertmax[0]+5)-d:16*(vertmax[0]+5),16*vertmax[1]:16*(vertmax[1]+1)-1,:] = (255,150,50) # orange # 5 pixels in horizontal direction, starting from 16*(vertmax[0],vertmax[1]) # left edge rgb[16*hormax[0]:16*(hormax[0]+1),16*hormax[1]:16*hormax[1]+d,:] = (0,200,0) # green # right edge rgb[16*hormax[0]:16*(hormax[0]+1),16*(hormax[1]+5)-d:16*(hormax[1]+5),:] = (0,200,0) # green # top edge rgb[16*hormax[0]:16*hormax[0]+d,16*hormax[1]:16*(hormax[1]+5)-1,:] = (0,200,0) # green # bottom edge rgb[16*(hormax[0]+1)-d:16*(hormax[0]+1),16*hormax[1]:16*(hormax[1]+5)-1,:] = (0,200,0) # green minpos=ndimage.minimum_position(inarray) maxpos=ndimage.maximum_position(inarray) # position of lowest pixel value # left edge rgb[16*minpos[0]:16*(minpos[0]+1),16*minpos[1]:16*minpos[1]+d,:] = (0,0,255) # blue # right edge rgb[16*minpos[0]:16*(minpos[0]+1),16*(minpos[1]+1)-d:16*(minpos[1]+1),:] = (0,0,255) # blue # top edge rgb[16*minpos[0]:16*minpos[0]+d,16*minpos[1]:16*(minpos[1]+1)-1,:] = (0,0,255) # blue # bottom edge rgb[16*(minpos[0]+1)-d:16*(minpos[0]+1),16*minpos[1]:16*(minpos[1]+1)-1,:] = (0,0,255) # blue # position of highest pixel value # left edge rgb[16*maxpos[0]:16*(maxpos[0]+1),16*maxpos[1]:16*maxpos[1]+d,:] = (255,0,0) # red # right edge rgb[16*maxpos[0]:16*(maxpos[0]+1),16*(maxpos[1]+1)-d:16*(maxpos[1]+1),:] = (255,0,0) # red # top edge rgb[16*maxpos[0]:16*maxpos[0]+d,16*maxpos[1]:16*(maxpos[1]+1)-1,:] = (255,0,0) # red # bottom edge rgb[16*(maxpos[0]+1)-d:16*(maxpos[0]+1),16*maxpos[1]:16*(maxpos[1]+1)-1,:] = (255,0,0) # red #rgb[16*minpos[0]:16*(minpos[0]+1), 16*minpos[1]:16*(minpos[1]+1), :] = (0,0,255) # blue #rgb[16*maxpos[0]:16*(maxpos[0]+1), 16*maxpos[1]:16*(maxpos[1]+1), :] = (255,0,0) # red ''' rgb[vertmax[0]:vertmax[0]+5,vertmax[1],:] = (255,150,50) # orange rgb[hormax[0],hormax[1]:hormax[1]+5,:] = (0,200,0) # green minpos=ndimage.minimum_position(inarray) maxpos=ndimage.maximum_position(inarray) rgb[minpos[0], minpos[1], :] = (0,0,255) # blue rgb[maxpos[0], maxpos[1], :] = (255,0,0) # red ''' #imshow(rgb,interpolation='None') # truncate image # UL, LR = bounding_box(rgb[:,:,0]) # rgb = rgb[UL[0]:LR[0],UL[1]:LR[1]] pl.imsave(filename,ma.filled(rgb,fill_value=0))
import scipy.misc as misc import scipy.ndimage as ndi img = misc.ascent() print(ndi.minimum(img)) print(ndi.minimum_position(img)) print(ndi.maximum(img)) print(ndi.maximum_position(img)) print(ndi.extrema(img))
def __getitem__(self, ix): assert ix < self.__len__(), "Index OOB" # Establish a hook to the data. sampled_slice = self.data[ix] npz_ = np.load(sampled_slice) # Access the tensor using the _slice key. sample = npz_['_slice'] # assert sample.shape == (224, 224, 5), "shape mismatch" t1 = sample[:, :, 0] t2 = sample[:, :, 1] t1ce = sample[:, :, 2] flair = sample[:, :, 3] op = np.uint8(sample[:, :, -1]) wt = np.expand_dims((op > 0).astype(np.float64), axis=-1) tc = np.expand_dims(np.logical_or(op == 1, op == 4).astype(np.float64), axis=-1) et = np.expand_dims((op == 4).astype(np.float64), axis=-1) op = np.concatenate([wt, tc, et], axis=-1) # assert op.shape == (3, 224, 224) #################### Transformations #################### if "rotate" in self.transform_dict.keys( ) and self.transform_dict["rotate"]: angle = np.random.choice( np.linspace(0., self.transform_dict["rotate"])) t1 = self.__rotate(t1, angle) t2 = self.__rotate(t2, angle) t1ce = self.__rotate(t1ce, angle) flair = self.__rotate(flair, angle) op = self.__rotate(op, angle) if "hflip" in self.transform_dict.keys( ) and self.transform_dict["hflip"]: # Flip with a probability. if np.random.rand() > 0.5: t1 = np.flip(t1, axis=1) t2 = np.flip(t2, axis=1) t1ce = np.flip(t1ce, axis=1) flair = np.flip(flair, axis=1) op = np.flip(op, axis=1) if "vflip" in self.transform_dict.keys( ) and self.transform_dict["vflip"]: # Flip with a probability. if np.random.rand() > 0.5: t1 = np.flip(t1, axis=0) t2 = np.flip(t2, axis=0) t1ce = np.flip(t1ce, axis=0) flair = np.flip(flair, axis=0) op = np.flip(op, axis=0) # Mark a rough circle around the contour ### TODO: Skip the circle, make a square labels, nb = ndimage.label(op, structure=structure) unique = np.unique(labels).shape[0] # Find centroids # centroids = np.array(ndimage.measurements.center_of_mass(op, labels, [i for i in range(1, unique+1)])) min_positions = np.array( ndimage.minimum_position(op, labels, [i for i in range(1, unique + 1)])) max_positions = np.array( ndimage.maximum_position(op, labels, [i for i in range(1, unique + 1)])) ''' centers = min_positions.copy() centers[:, :2] = (max_positions[:, :2] + min_positions[:, :2]) // 2 radii = np.maximum( np.linalg.norm(centers[:, :2] - min_positions[:, :2], axis=1), np.linalg.norm(centers[:, :2] - max_positions[:, :2], axis=1) ) # Draw circles ''' min_positions[:, :2] -= self.pad max_positions[:, :2] += self.pad mask = np.zeros_like(op) idx = np.arange(op.shape[0]) mask[idx, min_positions[:, 0]:max_positions[:, 0], min_positions[:, 1]:max_positions[:, 1], min_positions[:, 2]] = 1. op = np.transpose(op, axis=(2, 0, 1)) mask = np.transpose(mask, axis=(2, 0, 1)) # (224, 224) => (1, 224, 224) t1 = torch.from_numpy(t1.copy()).unsqueeze(0) t2 = torch.from_numpy(t2.copy()).unsqueeze(0) t1ce = torch.from_numpy(t1ce.copy()).unsqueeze(0) flair = torch.from_numpy(flair.copy()).unsqueeze(0) op = torch.from_numpy(op.copy()) mask = torch.from_numpy(mask.copy()) # ((4, 224, 224), (3, 224, 224)) # Return a tuple of two elements: a tuple of inputs and the output tensor. return (torch.cat([t1, t2, t1ce, flair], dim=0), op, mask)
def ariadne_run(self): # # The heuristic for matching synapses with neurites # # 0) Dilate the synapses # 1) Remove all interior pixels from synapses. # 2) Count synapse / neurite overlaps # 3) Pick two best neurites and discard synapses with < 2 # # Removing the interior pixels favors neurites with broad and # shallow contacts with synapses and disfavors something that # intersects a corner heavily. # # Synapses are sparse - we can perform a naive dilation of them # without worrying about running two of them together. # neuron_target = DestVolumeReader(self.neuron_seg_load_plan_path) synapse_target = DestVolumeReader(self.synapse_seg_load_plan_path) if self.transmitter_probability_map_load_plan_path == EMPTY_LOCATION: transmitter_target = None receptor_target = None else: transmitter_target = DestVolumeReader( self.transmitter_probability_map_load_plan_path) receptor_target = DestVolumeReader( self.receptor_probability_map_load_plan_path) synapse = synapse_target.imread() n_synapses = np.max(synapse) + 1 # # Use a rectangular structuring element for speed. # strel = np.ones((self.z_dilation * 2 + 1, self.xy_dilation * 2 + 1, self.xy_dilation * 2 + 1), bool) grey_dilation(synapse, footprint=strel, output=synapse, mode='constant', cval=0) if self.wants_edge_contact: # # Remove the interior (connected to self on 6 sides) # strel = np.array([[[False, False, False], [False, True, False], [False, False, False]], [[False, True, False], [True, True, True], [False, True, False]], [[False, False, False], [False, True, False], [False, False, False]]]) mask = \ grey_dilation( synapse, footprint=strel, mode='constant', cval=0) !=\ grey_erosion( synapse, footprint=strel, mode='constant', cval=255) else: mask = True # # Extract only the overlapping pixels from the neurons and synapses # neuron = neuron_target.imread() volume_mask = (synapse != 0) & (neuron != 0) & mask svoxels = synapse[volume_mask] nvoxels = neuron[volume_mask] if len(nvoxels) > 0: # # Make a matrix of counts of voxels in both synapses and neurons # then extract synapse / neuron matches # matrix = coo_matrix( (np.ones(len(nvoxels), int), (svoxels, nvoxels))) matrix.sum_duplicates() maxsynapses = matrix.shape[1] + 1 synapse_labels, neuron_labels = matrix.nonzero() counts = matrix.tocsr()[synapse_labels, neuron_labels].getA1() # # Filter neurons with too little overlap # mask = counts >= self.min_contact counts, neuron_labels, synapse_labels = [ _[mask] for _ in counts, neuron_labels, synapse_labels] # # Order by synapse label and -count to get the neurons with # the highest count first # order = np.lexsort((-counts, synapse_labels)) counts, neuron_labels, synapse_labels = \ [_[order] for _ in counts, neuron_labels, synapse_labels] first = np.hstack( [[True], synapse_labels[:-1] != synapse_labels[1:], [True]]) idx = np.where(first)[0] per_synapse_counts = idx[1:] - idx[:-1] # # Get rid of counts < 2 # mask = per_synapse_counts >= 2 if not np.any(mask): # another way to get nothing. self.report_empty_result() return idx = idx[:-1][mask] # # pick out the first and second most overlapping neurons and # their synapse. # neuron_1 = neuron_labels[idx] synapses = synapse_labels[idx] neuron_2 = neuron_labels[idx+1] if transmitter_target != None: # put transmitters first and receptors second. transmitter_probs = transmitter_target.imread() receptor_probs = receptor_target.imread() # # Start by making a matrix to transform the map. # neuron_mapping = np.hstack(([0], neuron_1, neuron_2)) matrix = coo_matrix( (np.arange(len(idx)*2) + 1, (np.hstack((neuron_1, neuron_2)), np.hstack((synapses, synapses)))), shape=(np.max(nvoxels)+1, np.max(svoxels) + 1)).tocsr() # # Convert the neuron / synapse map to the mapping labels # mapping_labeling = matrix[nvoxels, svoxels] # # Score each synapse / label overlap on both the transmitter # and receptor probabilities # areas = np.bincount(mapping_labeling.A1) transmitter_score = np.bincount( mapping_labeling.A1, transmitter_probs[volume_mask]) receptor_score = np.bincount( mapping_labeling.A1, receptor_probs[volume_mask]) total_scores = (transmitter_score - receptor_score) / areas score_1 = total_scores[1:len(idx)+1] score_2 = total_scores[len(idx)+1:] tscore_1 = transmitter_score[1:len(idx)+1] tscore_2 = transmitter_score[len(idx)+1:] rscore_1 = receptor_score[1:len(idx)+1] rscore_2 = receptor_score[len(idx)+1:] # # Flip the scores and neuron assignments if score_2 > score_1 # flippers = score_2 > score_1 score_1[flippers], score_2[flippers] = \ score_2[flippers], score_1[flippers] neuron_1[flippers], neuron_2[flippers] = \ neuron_2[flippers], neuron_1[flippers] # # Compute the integrated transmitter score + receptor score # per synapse. # flippers_mult = flippers.astype(tscore_1.dtype) synapse_score = \ (tscore_1 + rscore_2) * (1 - flippers_mult) + \ (tscore_2 + rscore_1) * flippers_mult else: synapse_score = np.zeros(len(neuron_1)) # # Recompute the centroids of the synapses based on where they # intersect the edge of neuron_1. This is closer to what people # do when they annotate synapses. # edge_z, edge_y, edge_x = np.where( (synapse != 0) & (grey_dilation(neuron, size=3) != grey_erosion(neuron, size=3))) areas = np.bincount(synapse[edge_z, edge_y, edge_x], minlength=maxsynapses) xs, ys, zs = [ np.bincount(synapse[edge_z, edge_y, edge_x], _, minlength=maxsynapses) for _ in edge_x, edge_y, edge_z] xc = xs[synapses] / areas[synapses] yc = ys[synapses] / areas[synapses] zc = zs[synapses] / areas[synapses] # # Record the synapse coords. "synapse_centers" goes from 1 to # N so that is why we subtract 1 below. # synapse_center_dict = dict( x=xc.tolist(), y=yc.tolist(), z=zc.tolist()) # # Compute the point in n1 that is closest to the synapse center # n1_per_synapse = np.zeros(maxsynapses, np.uint32) n1_per_synapse[synapses] = neuron_1 idx_per_synapse = np.zeros(maxsynapses, np.uint32) idx_per_synapse[synapses] = np.arange(len(synapses)) n1z, n1y, n1x = np.where(n1_per_synapse[synapse] == neuron) n1_idxs = idx_per_synapse[synapse[n1z, n1y, n1x]] d = np.sqrt(((n1z - zc[n1_idxs]) * self.z_nm)**2 + ((n1y - yc[n1_idxs]) * self.y_nm)**2 + ((n1x - xc[n1_idxs]) * self.x_nm)**2) n1_idx = np.array( minimum_position(np.abs(d - self.distance_from_centroid), synapse[n1z, n1y, n1x], synapses)).flatten() xn1, yn1, zn1 = n1x[n1_idx], n1y[n1_idx], n1z[n1_idx] n1_center_dict = \ dict(x=xn1.tolist(), y=yn1.tolist(), z=zn1.tolist()) n2_per_synapse = np.zeros(maxsynapses, np.uint32) n2_per_synapse[synapses] = neuron_2 n2z, n2y, n2x = np.where(n2_per_synapse[synapse] == neuron) n2_idxs = idx_per_synapse[synapse[n2z, n2y, n2x]] d = np.sqrt(((n2z - zc[n2_idxs]) * self.z_nm)**2 + ((n2y - yc[n2_idxs]) * self.y_nm)**2 + ((n2x - xc[n2_idxs]) * self.x_nm)**2) n2_idx = np.array( minimum_position(np.abs(d - self.distance_from_centroid), synapse[n2z, n2y, n2x], synapses)).flatten() xn2, yn2, zn2 = n2x[n2_idx], n2y[n2_idx], n2z[n2_idx] n2_center_dict = \ dict(x=xn2.tolist(), y=yn2.tolist(), z=zn2.tolist()) else: synapse_score = np.zeros(0, np.float32) neuron_1 = neuron_2 = synapses = np.zeros(0, int) score_1 = score_2 = np.zeros(0) synapse_center_dict = n1_center_dict = n2_center_dict = \ dict(x=[], y=[], z=[]) volume = dict(x=neuron_target.volume.x, y=neuron_target.volume.y, z=neuron_target.volume.z, width=neuron_target.volume.width, height=neuron_target.volume.height, depth=neuron_target.volume.depth) result = dict(volume=volume, neuron_1=neuron_1.tolist(), neuron_2=neuron_2.tolist(), synapse=synapses.tolist(), score=synapse_score.tolist(), synapse_centers=synapse_center_dict, neuron_1_centers=n1_center_dict, neuron_2_centers=n2_center_dict) if transmitter_target != None: result["transmitter_score_1"] = score_1.tolist() result["transmitter_score_2"] = score_2.tolist() with self.output().open("w") as fd: json.dump(result, fd)
#ddd=[] #method2 #for i in dos: # f=map(lambda x:255 if x==0 else x, i) # ddd.append(f) #dos=ddd #dos=dos.reshape(width*height,1) #method3 longest time #dos=numpy.array(list((map(lambda x:255 if x==0 else x, dos)))) #dos=dos.reshape(width,height) #dos=map(lambda x:255 if x==0 else x, dos.flat) #method4 #fastest #dos=numpy.array(dos).reshape(width,height) dos[dos == 0] = 255 #method5 #fastest#转换之后可以求最小值,不然最小值是0 a = ndi.minimum_position(dos) #找最小点位置并画圈 c = list(a) c[0], c[1] = c[1], c[0] a = tuple(c) cir1 = Circle(a, radius=19, color='r', fill=False, alpha=0.5) b = ndi.maximum_position(dist_on_skel) #找最大点位置并画圈 c = list(b) c[0], c[1] = c[1], c[0] b = tuple(c) cir2 = Circle(b, radius=19, color='y', fill=False, alpha=0.5) fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(8, 4), sharex=True,
def Get_Flow_Dirn_using_9x9_window(DEM, Flow_dirn_arr , pit_list): """ Given a DEM the function returns the flow direction matrix, it also erodes the DEM as per requirement to direct flow Args: DEM: Digital Elevation Model (2-D array of floats) Flow_dirn_arr: Empty flow direction array having all entries zero (2-D array of tuples (0,0) ) pit_list :Empty list used to hold pits ( Empty List ) Result: pit_list: pits found using 9x9 window ( List of tuple (int, int) ) Flow_dirn_arr: 2-D array containing Flow Directions (2-D array of tuples (int, int) ) DEM: Modified DEM after little erosion during flow direction assignment (2-D array of floats) """ (x_len,y_len) = DEM.shape pit_list = [] #Get the flow direction using 9x9 window for i in range(4,x_len-4): for j in range(4,y_len-4):#loop index start from 4 and ends at len-4 to handle boundary cases if Flow_dirn_arr[i][j][0] == 0 and Flow_dirn_arr[i][j][1] == 0: (x,y) = ndimage.minimum_position( DEM[i - 4:i + 5, j - 4:j + 5] ) # (x,y) is the position of minimum element in 9x9 window (min_x,min_y) = (x - 4, y - 4) # (min_x,min_y) is the position of minimum element in 9x9 window with origin # shifted to the central pixel # 9x9 window can be divided into 4 quadrants ,7 lines of code below takes care of 3 # other quadrants in 9x9 window, since we are writing a general code for first quadrant, # where q and p are non-negative integers sign_x = 1 # indicative of +ve x value sign_y = 1 # indicative of +ve y value if min_x < 0: sign_x = -1 if min_y < 0: sign_y = -1 (p, q) = (abs(min_x),abs(min_y)) Elev_diff = (DEM[i][j] - DEM[p*sign_x + i][q*sign_y + j])/max(p,q) # difference in elevation of the central pixel and the pixel with minimum elevation # in 9x9 window, required for the purpose of erosion #Different cases in the 9x9 window has been handled in various if-else statements if p == 0: if q == 1: Flow_dirn_arr[i][j] = (i + 0*sign_x,j + 1*sign_y) elif q == 2: Flow_dirn_arr[i][j] = (i + 0*sign_x,j + 1*sign_y) Flow_dirn_arr[i + 0*sign_x][j + 1*sign_y] = (0*sign_x + i ,2*sign_y + j) DEM[i + 0*sign_x][j + 1*sign_y] = DEM[0*sign_x + i][2*sign_y + j] + Elev_diff elif q == 3: Flow_dirn_arr[i][j] = (i + 0*sign_x,j + 1*sign_y) Flow_dirn_arr[i + 0*sign_x][j + 1*sign_y] = (0*sign_x + i ,2*sign_y + j) Flow_dirn_arr[i + 0*sign_x][j + 2*sign_y] = (0*sign_x + i, 3*sign_y + j) DEM[i + 0*sign_x][j + 1*sign_y] = DEM[0*sign_x + i][3*sign_y + j] + 2*Elev_diff DEM[i + 0*sign_x][j + 2*sign_y] = DEM[0*sign_x + i][3*sign_y + j] + Elev_diff elif q == 4: Flow_dirn_arr[i][j] = (i + 0*sign_x,j + 1*sign_y) Flow_dirn_arr[i + 0*sign_x][j + 1*sign_y] = (0*sign_x + i ,2*sign_y + j) Flow_dirn_arr[i + 0*sign_x][j + 2*sign_y] = (0*sign_x + i, 3*sign_y + j) Flow_dirn_arr[i + 0*sign_x][j + 3*sign_y] = (0*sign_x + i, 4*sign_y + j) DEM[i + 0*sign_x][j + 1*sign_y] = DEM[0*sign_x + i][4*sign_y + j] + 3*Elev_diff DEM[i + 0*sign_x][j + 2*sign_y] = DEM[0*sign_x + i][4*sign_y + j] + 2*Elev_diff DEM[i + 0*sign_x][j + 3*sign_y] = DEM[0*sign_x + i][4*sign_y + j] + Elev_diff if p == 1: if q == 0: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 0*sign_y ) elif q == 1: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) elif q == 2: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 1*sign_x ,j + 2*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 1*sign_x][j + 2*sign_y] + Elev_diff elif q == 3: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 1*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 2*sign_y] = (i + 1*sign_x ,j + 3*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 1*sign_x ][j + 3*sign_y ] + 2*Elev_diff DEM[i + 1*sign_x][j + 2*sign_y] = DEM[i + 1*sign_x ][j + 3*sign_y ] + Elev_diff elif q == 4: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 1*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 2*sign_y] = (i + 1*sign_x ,j + 3*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 3*sign_y] = (i + 1*sign_x ,j + 4*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 1*sign_x ][j + 4*sign_y ] + 3*Elev_diff DEM[i + 1*sign_x][j + 2*sign_y] = DEM[i + 1*sign_x ][j + 4*sign_y ] + 2*Elev_diff DEM[i + 1*sign_x][j + 3*sign_y] = DEM[i + 1*sign_x ][j + 4*sign_y ] + Elev_diff if p == 2: if q == 0: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 0*sign_y] = (i + 2*sign_x ,j + 0*sign_y ) DEM[i + 1*sign_x][j + 0*sign_y] = DEM[i + 2*sign_x ][j + 0*sign_y] + Elev_diff elif q == 1: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 1*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 2*sign_x ][j + 1*sign_y] + Elev_diff elif q == 2: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 2*sign_x ][j + 2*sign_y ] + Elev_diff elif q == 3: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 2*sign_x ,j + 3*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 2*sign_x ][j + 3*sign_y ] + 2*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 2*sign_x ][j + 3*sign_y ] + Elev_diff elif q == 4: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 2*sign_x ,j + 3*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 3*sign_y] = (i + 2*sign_x ,j + 4*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 2*sign_x ][j + 4*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 2*sign_x ][j + 4*sign_y ] + 2*Elev_diff DEM[i + 2*sign_x][j + 3*sign_y] = DEM[i + 2*sign_x ][j + 4*sign_y ] + Elev_diff if p == 3: if q == 0: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 0*sign_y] = (i + 2*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 0*sign_y] = (i + 3*sign_x ,j + 0*sign_y ) DEM[i + 1*sign_x][j + 0*sign_y] = DEM[i + 3*sign_x][j + 0*sign_y] + 2*Elev_diff DEM[i + 2*sign_x][j + 0*sign_y] = DEM[i + 3*sign_x][j + 0*sign_y] + Elev_diff elif q == 1: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 1*sign_y] = (i + 3*sign_x ,j + 1*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 3*sign_x][j + 1*sign_y] + 2*Elev_diff DEM[i + 2*sign_x][j + 1*sign_y] = DEM[i + 3*sign_x][j + 1*sign_y] + Elev_diff elif q == 2: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 2*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 3*sign_x][j + 2*sign_y] + 2*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 3*sign_x][j + 2*sign_y] + Elev_diff elif q == 3: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 3*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 3*sign_x ][j + 3*sign_y ] + 2*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 3*sign_x ][j + 3*sign_y ] + Elev_diff elif q == 4: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 3*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 3*sign_y] = (i + 3*sign_x ,j + 4*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 3*sign_x ][j + 4*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 3*sign_x ][j + 4*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 3*sign_y] = DEM[i + 3*sign_x ][j + 4*sign_y ] + Elev_diff if p == 4: if q == 0: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 0*sign_y] = (i + 2*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 0*sign_y] = (i + 3*sign_x ,j + 0*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 0*sign_y] = (i + 4*sign_x ,j + 0*sign_y ) DEM[i + 1*sign_x][j + 0*sign_y] = DEM[i + 4*sign_x ][j + 0*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 0*sign_y] = DEM[i + 4*sign_x ][j + 0*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 0*sign_y] = DEM[i + 4*sign_x ][j + 0*sign_y ] + Elev_diff elif q == 1: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 1*sign_y] = (i + 3*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 1*sign_y] = (i + 4*sign_x ,j + 1*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 1*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 1*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 1*sign_y ] + Elev_diff elif q == 2: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 2*sign_y] = (i + 4*sign_x ,j + 2*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 2*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 4*sign_x ][j + 2*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 2*sign_y] = DEM[i + 4*sign_x ][j + 2*sign_y ] + Elev_diff elif q == 3: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 3*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 3*sign_y] = (i + 4*sign_x ,j + 3*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 3*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 4*sign_x ][j + 3*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 3*sign_y] = DEM[i + 4*sign_x ][j + 3*sign_y ] + Elev_diff elif q == 4: Flow_dirn_arr[i][j] = (i + 1*sign_x ,j + 1*sign_y ) Flow_dirn_arr[i + 1*sign_x][j + 1*sign_y] = (i + 2*sign_x ,j + 2*sign_y ) Flow_dirn_arr[i + 2*sign_x][j + 2*sign_y] = (i + 3*sign_x ,j + 3*sign_y ) Flow_dirn_arr[i + 3*sign_x][j + 3*sign_y] = (i + 4*sign_x ,j + 4*sign_y ) DEM[i + 1*sign_x][j + 1*sign_y] = DEM[i + 4*sign_x ][j + 4*sign_y ] + 3*Elev_diff DEM[i + 2*sign_x][j + 2*sign_y] = DEM[i + 4*sign_x ][j + 4*sign_y ] + 2*Elev_diff DEM[i + 3*sign_x][j + 3*sign_y] = DEM[i + 4*sign_x ][j + 4*sign_y ] + Elev_diff if p == 0 and q == 0: pit_list.append((i,j)) return (pit_list, Flow_dirn_arr,DEM)
def save_imgmap(inarray, vertmax, hormax, filename): """ Converts input-array to image, with superposed the 5-pixel row/column with highest DU, the minimal and maximum pixel coordinates. input: inarray = ufov or cfov vertmax = first coordinate of 5-pixel column (vertical) hormax = first coordinate of 5-pixel row (horizontal) filename = png-filename of output """ wi, he = np.shape(inarray) rgb = np.zeros((wi, he, 3), dtype=np.uint8) _max = np.max(inarray) _min = np.min(inarray) * 0.95 grayvalue = np.round(255 / (_max - _min) * (ma.filled(inarray, fill_value=0) - _min)) grayvalue[grayvalue < 0] = 0 rgb[:, :, 0] = grayvalue rgb[:, :, 1] = grayvalue rgb[:, :, 2] = grayvalue rgb[vertmax[0]:vertmax[0] + 5, vertmax[1], :] = (255, 150, 50) # orange rgb[hormax[0], hormax[1]:hormax[1] + 5, :] = (0, 200, 0) # green minpos = ndimage.minimum_position(inarray) maxpos = ndimage.maximum_position(inarray) rgb[minpos[0], minpos[1], :] = (0, 0, 255) # blue rgb[maxpos[0], maxpos[1], :] = (255, 0, 0) # red rgb = np.zeros((wi, he, 3), dtype=np.uint8) _max = np.max(inarray) _min = np.min(inarray) * 0.95 grayvalue = np.round(255 / (_max - _min) * (ma.filled(inarray, fill_value=0) - _min)) grayvalue[grayvalue < 0] = 0 rgb[:, :, 0] = grayvalue rgb[:, :, 1] = grayvalue rgb[:, :, 2] = grayvalue rgb = rgb.repeat(16, axis=0).repeat(16, axis=1) # d=line thickness of roi d = 4 # 5 pixels in vertical direction, starting from 16*(vertmax[0],vertmax[1]) # left edge rgb[16 * vertmax[0]:16 * (vertmax[0] + 5), 16 * vertmax[1]:16 * vertmax[1] + d, :] = (255, 150, 50) # orange # right edge rgb[16 * vertmax[0]:16 * (vertmax[0] + 5), 16 * (vertmax[1] + 1) - d:16 * (vertmax[1] + 1), :] = (255, 150, 50 ) # orange # top edge rgb[16 * vertmax[0]:16 * vertmax[0] + d, 16 * vertmax[1]:16 * (vertmax[1] + 1) - 1, :] = (255, 150, 50 ) # orange # bottom edge rgb[16 * (vertmax[0] + 5) - d:16 * (vertmax[0] + 5), 16 * vertmax[1]:16 * (vertmax[1] + 1) - 1, :] = (255, 150, 50 ) # orange # 5 pixels in horizontal direction, starting from 16*(vertmax[0],vertmax[1]) # left edge rgb[16 * hormax[0]:16 * (hormax[0] + 1), 16 * hormax[1]:16 * hormax[1] + d, :] = (0, 200, 0) # green # right edge rgb[16 * hormax[0]:16 * (hormax[0] + 1), 16 * (hormax[1] + 5) - d:16 * (hormax[1] + 5), :] = (0, 200, 0 ) # green # top edge rgb[16 * hormax[0]:16 * hormax[0] + d, 16 * hormax[1]:16 * (hormax[1] + 5) - 1, :] = (0, 200, 0) # green # bottom edge rgb[16 * (hormax[0] + 1) - d:16 * (hormax[0] + 1), 16 * hormax[1]:16 * (hormax[1] + 5) - 1, :] = (0, 200, 0) # green minpos = ndimage.minimum_position(inarray) maxpos = ndimage.maximum_position(inarray) # position of lowest pixel value # left edge rgb[16 * minpos[0]:16 * (minpos[0] + 1), 16 * minpos[1]:16 * minpos[1] + d, :] = (0, 0, 255) # blue # right edge rgb[16 * minpos[0]:16 * (minpos[0] + 1), 16 * (minpos[1] + 1) - d:16 * (minpos[1] + 1), :] = (0, 0, 255) # blue # top edge rgb[16 * minpos[0]:16 * minpos[0] + d, 16 * minpos[1]:16 * (minpos[1] + 1) - 1, :] = (0, 0, 255) # blue # bottom edge rgb[16 * (minpos[0] + 1) - d:16 * (minpos[0] + 1), 16 * minpos[1]:16 * (minpos[1] + 1) - 1, :] = (0, 0, 255) # blue # position of highest pixel value # left edge rgb[16 * maxpos[0]:16 * (maxpos[0] + 1), 16 * maxpos[1]:16 * maxpos[1] + d, :] = (255, 0, 0) # red # right edge rgb[16 * maxpos[0]:16 * (maxpos[0] + 1), 16 * (maxpos[1] + 1) - d:16 * (maxpos[1] + 1), :] = (255, 0, 0) # red # top edge rgb[16 * maxpos[0]:16 * maxpos[0] + d, 16 * maxpos[1]:16 * (maxpos[1] + 1) - 1, :] = (255, 0, 0) # red # bottom edge rgb[16 * (maxpos[0] + 1) - d:16 * (maxpos[0] + 1), 16 * maxpos[1]:16 * (maxpos[1] + 1) - 1, :] = (255, 0, 0) # red #rgb[16*minpos[0]:16*(minpos[0]+1), 16*minpos[1]:16*(minpos[1]+1), :] = (0,0,255) # blue #rgb[16*maxpos[0]:16*(maxpos[0]+1), 16*maxpos[1]:16*(maxpos[1]+1), :] = (255,0,0) # red ''' rgb[vertmax[0]:vertmax[0]+5,vertmax[1],:] = (255,150,50) # orange rgb[hormax[0],hormax[1]:hormax[1]+5,:] = (0,200,0) # green minpos=ndimage.minimum_position(inarray) maxpos=ndimage.maximum_position(inarray) rgb[minpos[0], minpos[1], :] = (0,0,255) # blue rgb[maxpos[0], maxpos[1], :] = (255,0,0) # red ''' #imshow(rgb,interpolation='None') # truncate image # UL, LR = bounding_box(rgb[:,:,0]) # rgb = rgb[UL[0]:LR[0],UL[1]:LR[1]] pl.imsave(filename, ma.filled(rgb, fill_value=0))
def _getSingleLength2Bound( self, segments, id_, boundaries, boundaryIds, distance, structEl, line='straight', position=False): """ Calculate length of a given segment that contacts exactly two boundaries. The length is calculated as a shortest path between a contact point with one boundary, a segment point lying on the middle layer between the boundaries and a contact point on the other boundary. If the line mode is 'straight', the length is calculated as a smallest straight (Euclidean) distance between points on the two contact regions. Otherwise, in the 'mid' or 'mid-seg' line modes, the length is calculated as a smallest sum of distances between a 'central' and two contact points. A central point has to belong to the intersection of the segment and a central layer formed exactly in the middle between the two boundaries. In other words, the sum of distances is minimized over all contact and mid points. """ # alias b_ids = boundaryIds # restrict to a subarray that contains current segment and boundaries region = (segments.data == id_) \ | ((boundaries.data == b_ids[0]) | (boundaries.data == b_ids[1])) inset = ndimage.find_objects(region)[0] local_seg = Segment(data=segments.data[inset], copy=True, ids=[id_], clean=True) local_bound = Segment(data=boundaries.data[inset], copy=True, ids=b_ids, clean=True) # make contacts if (distance == 'b2b') or (distance == 'boundary'): dilated = ndimage.binary_dilation( input=local_seg.data==id_, structure=structEl) contact_1 = dilated & (local_bound.data == b_ids[0]) contact_2 = dilated & (local_bound.data == b_ids[1]) elif (distance == 'c2c') or (distance == 'contact'): dilated_1 = ndimage.binary_dilation( input=local_bound.data==b_ids[0], structure=structEl) contact_1 = dilated_1 & (local_seg.data == id_) dilated_2 = ndimage.binary_dilation( input=local_bound.data==b_ids[1], structure=structEl) contact_2 = dilated_2 & (local_seg.data == id_) elif (distance == 'b2c'): dilated_1 = ndimage.binary_dilation( input=local_seg.data==id_, structure=structEl) contact_1 = dilated_1 & (local_bound.data == b_ids[0]) dilated_2 = ndimage.binary_dilation( input=local_bound.data==b_ids[1], structure=structEl) contact_2 = dilated_2 & (local_seg.data == id_) elif (distance == 'c2b'): dilated_1 = ndimage.binary_dilation( input=local_bound.data==b_ids[0], structure=structEl) contact_1 = dilated_1 & (local_seg.data == id_) dilated_2 = ndimage.binary_dilation( input=local_seg.data==id_, structure=structEl) contact_2 = dilated_2 & (local_bound.data == b_ids[1]) else: raise ValueError( "Argument distance: " + str(distance) + " was not understood." + "Defined values are 'b2b', 'boundary', 'c2', 'contact', " + "'b2c' and 'c2c'.") # distances from contacts 1 if (~contact_1 > 0).all(): # workaround for scipy bug 1089 raise ValueError("Can't calculate distance_function ", "(no background)") else: dist_1 = ndimage.distance_transform_edt(input=~contact_1) if line == 'straight': # get straight length length = ndimage.minimum(input=dist_1, labels=contact_2) # get position if position: pos_2 = ndimage.minimum_position(input=dist_1, labels=contact_2) point_2 = numpy.zeros_like(contact_2) point_2[pos_2] = 1 if (~point_2 > 0).all(): # workaround for scipy bug 1089 raise ValueError("Can't calculate distance_function ", "(no background)") else: dist_2 = ndimage.distance_transform_edt(input=~point_2) pos_1 = ndimage.minimum_position(input=dist_2, labels=contact_1) return length, pos_1, pos_2 return length elif (line == 'mid') or (line == 'mid-seg'): if (~contact_2 > 0).all(): # workaround for scipy bug 1089 raise ValueError("Can't calculate distance_function ", "(no background)") else: dist_2 = ndimage.distance_transform_edt(input=~contact_2) # make layers if (line == 'mid'): layers, lay_dist = local_bound.makeLayersBetween( bound_1=b_ids[0], bound_2=b_ids[1], mask=0, between='min') elif (line == 'mid-seg'): layers, lay_dist = local_bound.makeLayersBetween( bound_1=b_ids[0], bound_2=b_ids[1], mask=local_seg.data, between='min') # make sure the middle layer id is at least 1 if lay_dist <= 1: half = 1 else: half = int(numpy.rint(lay_dist / 2)) # keep only the middle layer(s) layers.keep(ids=[half]) middle = (local_seg.data == id_) & (layers.data > 0) # min sum of distances to both contacts length = ndimage.minimum(input=dist_1+dist_2, labels=middle) # find positions of points used to calculate length if position: # position of the point on the middle layer having min distance mid_position = ndimage.minimum_position(input=dist_1+dist_2, labels=middle) # distances to the mid point mid_point = numpy.zeros_like(contact_1) mid_point[mid_position] = 1 if (~mid_point > 0).all(): # workaround for scipy bug 1089 raise ValueError("Can't calculate distance_function ", "(no background)") else: mid_dist = ndimage.distance_transform_edt(input=~mid_point) # positions of points having min distances to contacts pos_1 = ndimage.minimum_position(input=mid_dist, labels=contact_1) pos_2 = ndimage.minimum_position(input=mid_dist, labels=contact_2) return length, pos_1, pos_2 return length else: raise ValueError( "Line mode: " + line + " was not recognized. " + "Available line modes are 'straight' and 'mid'.")
def minimum_index(input,labels,index=None): if index is None: index = np.unique(labels) argmin = nd.minimum_position(input,labels,index) return np.asarray(argmin,dtype=int).reshape(len(argmin))