def get_orientation_motion(seg): brain_center = np.array(nd.center_of_mass( (seg == 5).view(np.ndarray) ), dtype='float32') heart_center = np.array(nd.center_of_mass( (seg == 3).view(np.ndarray) ), dtype='float32') left_lung = np.array(nd.center_of_mass( (seg == 1).view(np.ndarray) ), dtype='float') right_lung = np.array(nd.center_of_mass( (seg == 2).view(np.ndarray) ), dtype='float') u = brain_center - heart_center v = right_lung - left_lung u /= np.linalg.norm(u) v -= np.dot(v,u)*u v /= np.linalg.norm(v) w = np.cross(u,v) w /= np.linalg.norm(w) return ( u.astype("float32"), v.astype("float32"), w.astype("float32") )
def rebin_data(self, grid, use_psf=True): """Calculates the center of mass of the grid and then rebins so that the center pixel really is the center of the array For this we do a 2-d interpolation on the grid """ a = psf_fitter.psffit(abs(grid), circle=False, rotate=1) xcen = a[2] ycen = a[2] xlen, ylen = grid.shape xval = arange(xlen) yval = arange(ylen) xint = interp1d(xval, self.xpos_abs) yint = interp1d(yval, self.ypos_abs) xintcen = self.xmax_pos-xint(xcen) yintcen = self.ymax_pos-yint(ycen) print self.xmax_pos, xintcen, self.ymax_pos, yintcen f_real = interp2d(self.xpos_rel, self.ypos_rel, real(grid)) f_imag = interp2d(self.xpos_rel, self.ypos_rel, imag(grid)) xnew = self.xpos_rel - xintcen ynew = self.ypos_rel - yintcen recen_grid = f_real(xnew, ynew) + 1j*f_imag(xnew, ynew) print nd.center_of_mass(abs(recen_grid)) return recen_grid
def get_roi_center(roi_native_path, roi_mni_path): """Get ROI center of mass. Get back coordinate in img space and in coordinate space. Also actual center of mass. """ # computations in native space if type(roi_native_path) is str: img = nib.load(roi_native_path) else: img = roi_native_path data = img.get_data() data = as_ndarray(data) my_map = data.copy() center_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = center_coords[:3] native_coords = np.asarray(coord_transform(x_map, y_map, z_map, img.get_affine())).tolist() voxel = [round(x) for x in center_coords] # computations in mni space if type(roi_mni_path) is str: img = nib.load(roi_mni_path) else: img = roi_mni_path data = img.get_data() data = as_ndarray(data) my_map = data.copy() mni_center_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = mni_center_coords[:3] mni_coords = np.asarray(coord_transform(x_map, y_map, z_map, img.get_affine())).tolist() # returns voxel and true center mass coords # returns also native and mni space coords return (voxel[:3], center_coords[:3], [round(x) for x in native_coords], [round(x) for x in mni_coords])
def guess_center_nested(image, halfwidth=50): '''Guess the position of the central object as two-step process First, this function calculates the center of mass of an image. This works well if the central object is the only bright source, however even a moderately bright source that is far away can shift the center of mass of an image by a few pixels. To improve the first guess the function selects a subimage with the halfwidth ``halfwidth`` in a second step and calculates the center of mass of that subimage. Parameters ---------- image : 2d np.array input image halfwidth : int half width of the subimage selected in the second step. Returns ------- xm, ym : float x and y coordinates estimated position of the central object ''' xm, ym = ndimage.center_of_mass(np.ma.masked_invalid(image)) n = 2 * halfwidth + 1 subimage, xmymsmall = extract_array(image, (n, n), (xm, ym), return_position=True) x1, y1 = ndimage.center_of_mass(np.ma.masked_invalid(subimage)) # xmymsmall is the xm, ym position in the coordinates of subimage # So, correct the initial (xm, ym) by delta(xmsmall, x1) return xm + (x1 - xmymsmall[0]), ym + (y1 - xmymsmall[1])
def find_albino_features(self, T, im): import scipy.ndimage as ndi binarized = zeros_like(T) binarized[T > self.albino_threshold] = True (labels, nlabels) = ndi.label(binarized) slices = ndi.find_objects(labels) intensities = [] transform_means = [] if len(slices) < 2: return (None, None) for s in slices: transform_means.append(mean(T[s])) intensities.append(mean(im[s])) sorted_transform_means = argsort(transform_means) candidate1 = sorted_transform_means[-1] candidate2 = sorted_transform_means[-2] c1_center = array(ndi.center_of_mass(im, labels, candidate1 + 1)) c2_center = array(ndi.center_of_mass(im, labels, candidate2 + 1)) if intensities[candidate1] > intensities[candidate2]: return (c2_center, c1_center) else: return (c1_center, c2_center)
def objectfeatures(img): """ values=objectfeatures(img) This implements the object features described in "Object Type Recognition for Automated Analysis of Protein Subcellular Location" by Ting Zhao, Meel Velliste, Michael V. Boland, and Robert F. Murphy in IEEE Transaction on Image Processing """ protimg = img.get("procprotein") dnaimg = img.channeldata.get("procdna", None) assert ( dnaimg is None or protimg.shape == dnaimg.shape ), "pymorph.objectfeatures: DNA image is not of same size as Protein image." labeled, N = ndimage.label(protimg, ones((3, 3))) if not N: return np.zeros((0, 11)) sofs = np.zeros((N, 11)) indices = np.arange(1, N + 1) if dnaimg is not None: dnacofy, dnacofx = ndimage.center_of_mass(dnaimg) bindna = dnaimg > 0 # According to the documentation, it shouldn't matter if indices is None, # but in my version of scipy.ndimage, you *have* to use indices. centers = ndimage.center_of_mass(protimg, labeled, indices) if N == 1: centers = list(centers) centers = np.asarray(centers) centers -= np.array((dnacofy, dnacofx)) centers **= 2 sofs[:, 1] = np.sqrt(centers.sum(1)) locations = ndimage.find_objects(labeled, N) sofs[:, 9] = ndimage.measurements.sum(protimg, labeled, indices) for obji in xrange(N): slice = locations[obji] binobj = (labeled[slice] == (obji + 1)).copy() protobj = protimg[slice] binskel = thin(binobj) objhull = convexhull(binobj) no_of_branch_points = fast_sum(find_branch_points(binskel)) hfeats = hullfeatures(binobj, objhull) sofs[obji, 0] = fast_sum(binobj) if dnaimg is not None: sofs[obji, 2] = fast_sum(binobj & bindna[slice]) sofs[obji, 3] = hfeats[2] sofs[obji, 4] = euler(binobj) sofs[obji, 5] = hfeats[1] sofs[obji, 6] = fast_sum(binskel) sofs[obji, 7] = hfeats[0] sofs[obji, 9] /= fast_sum(binskel * protobj) sofs[obji, 10] = no_of_branch_points sofs[:, 2] /= sofs[:, 0] sofs[:, 8] = sofs[:, 6] / sofs[:, 0] sofs[:, 10] /= sofs[:, 6] return sofs
def angles2transfo(image1, image2, angleX=0, angleY=0, angleZ=0) : """ Compute transformation matrix between 2 images from the angles in each directions. :Parameters: - `image1` (|SpatialImage|) - - `image2` (|SpatialImage|) - - `angleX` (int) - Rotation through angleX (degree) - `angleY` (int) - Rotation through angleY (degree) - `angleZ` (int) - Rotation through angleZ (degree) :Returns: - matrix (numpy array) - Transformation matrix """ x = np.array(center_of_mass(image1)) y = np.array(center_of_mass(image2)) # Rx rotates the y-axis towards the z-axis thetaX = radians(angleX) Rx = np.zeros((3,3)) Rx[0,0] = 1. Rx[1,1] = Rx[2,2] = cos(thetaX) Rx[1,2] = -sin(thetaX) Rx[2,1] = sin(thetaX) # Ry rotates the z-axis towards the x-axis thetaY = radians(angleY) Ry = np.zeros((3,3)) Ry[0,0] = Ry[2,2] = cos(thetaY) Ry[0,2] = sin(thetaY) Ry[2,0] = -sin(thetaY) Ry[1,1] = 1. # Rz rotates the x-axis towards the y-axis thetaZ = radians(angleZ) Rz = np.zeros((3,3)) Rz[0,0] = Rz[1,1] = cos(thetaZ) Rz[1,0] = sin(thetaZ) Rz[0,1] = -sin(thetaZ) Rz[2,2] = 1. # General rotations R = np.dot(np.dot(Rx,Ry),Rz) t = y - np.dot(R,x) matrix = np.zeros((4,4)) matrix[0:3,0:3] = R matrix[0:3,3] = t matrix[2,2] = matrix[3,3] = 1. return matrix
def get_centers( seg ): brain_center = np.array(nd.center_of_mass( (seg == 2).view(np.ndarray) ), dtype='float32') heart_center = np.array(nd.center_of_mass( (seg == 5).view(np.ndarray) ), dtype='float32') left_lung = np.array(nd.center_of_mass( (seg == 3).view(np.ndarray) ), dtype='float') right_lung = np.array(nd.center_of_mass( (seg == 4).view(np.ndarray) ), dtype='float') return brain_center, heart_center, left_lung, right_lung
def align_heart(img,labels): BPD = get_BPD(30.0) CRL = get_CRL(30.0) brain_center = labels.ImageToWorld( np.array(nd.center_of_mass( (labels == 2).view(np.ndarray) ), dtype='float32')[::-1] ) heart_center = labels.ImageToWorld( np.array(nd.center_of_mass( (labels == 5).view(np.ndarray) ), dtype='float32')[::-1] ) lungs_center = labels.ImageToWorld( np.array(nd.center_of_mass(np.logical_or(labels == 3, labels == 4 ).view(np.ndarray) ), dtype='float32')[::-1] ) left_lung = labels.ImageToWorld( np.array(nd.center_of_mass( (labels == 3).view(np.ndarray) ), dtype='float')[::-1] ) right_lung = labels.ImageToWorld( np.array(nd.center_of_mass( (labels == 4).view(np.ndarray) ), dtype='float')[::-1] ) u = brain_center - heart_center #v = lungs_center - heart_center v = right_lung - left_lung u /= np.linalg.norm(u) v -= np.dot(v,u)*u v /= np.linalg.norm(v) w = np.cross(u,v) w /= np.linalg.norm(w) # v = np.cross(w,u) # v /= np.linalg.norm(v) header = img.get_header() header['orientation'][0] = u header['orientation'][1] = v header['orientation'][2] = w header['origin'][:3] = heart_center header['dim'][0] = CRL header['dim'][1] = CRL header['dim'][2] = CRL new_img = img.transform( target=header, interpolation="bspline" ) new_labels = labels.transform( target=header, interpolation="nearest" ) return new_img, new_labels
def com_dist(self): """ This function calculates the euclidean distance between the centres of mass of the reference and segmentation. :return: """ if self.flag_empty: return -1 com_ref = ndimage.center_of_mass(self.ref) com_seg = ndimage.center_of_mass(self.seg) com_dist = np.sqrt(np.dot(np.square(np.asarray(com_ref) - np.asarray(com_seg)), np.square( self.pixdim))) return com_dist
def shiftToCenter(infile,shiftfile,isEMAN=False): ''' EMAN defines the rotation origin differently from other packages. Therefore, it needs to be recenterred according to the package after using EMAN proc3d rotation functions. ''' # center of rotation for eman is not at length/2. if isEMAN: formatoffset = getEmanCenter() prefix = '' else: formatoffset = (0,0,0) prefix = 'non-' apDisplay.printMsg('Shifting map center for %sEMAN usage' % (prefix,)) # Find center of mass of the density map a = mrc.read(infile) t = a.mean()+2*a.std() numpy.putmask(a,a>=t,t) numpy.putmask(a,a<t,0) center = ndimage.center_of_mass(a) offset = (center[0]+formatoffset[0]-a.shape[0]/2,center[1]+formatoffset[1]-a.shape[1]/2,center[2]+formatoffset[2]-a.shape[2]/2) offset = (-offset[0],-offset[1],-offset[2]) apDisplay.printMsg('Shifting map center by (x,y,z)=(%.2f,%.2f,%.2f)' % (offset[2],offset[1],offset[0])) # shift the map a = mrc.read(infile) a = ndimage.interpolation.shift(a,offset) mrc.write(a,shiftfile) h = mrc.readHeaderFromFile(infile) mrc.update_file_header(shiftfile,h)
def _hull_computations(imageproc,imagehull = None): # Just share code between the two functions below if imagehull is None: imagehull = convexhull(imageproc > 0) Ahull = _bwarea(imagehull) Phull = _bwarea(bwperim(imagehull)) cofy,cofx = center_of_mass(imagehull) hull_mu00 = imgcentmoments(imagehull,0,0,cofy,cofx) hull_mu11 = imgcentmoments(imagehull,1,1,cofy,cofx) hull_mu02 = imgcentmoments(imagehull,0,2,cofy,cofx) hull_mu20 = imgcentmoments(imagehull,2,0,cofy,cofx) # Parameters of the 'image ellipse' # (the constant intensity ellipse with the same mass and # second order moments as the original image.) # From Prokop, RJ, and Reeves, AP. 1992. CVGIP: Graphical # Models and Image Processing 54(5):438-460 hull_semimajor = sqrt((2 * (hull_mu20 + hull_mu02 + \ sqrt((hull_mu20 - hull_mu02)**2 + \ 4 * hull_mu11**2)))/hull_mu00) hull_semiminor = sqrt((2 * (hull_mu20 + hull_mu02 - \ sqrt((hull_mu20 - hull_mu02)**2 + \ 4 * hull_mu11**2)))/hull_mu00) return imagehull,Ahull, Phull, hull_semimajor, hull_semiminor
def calculate_life(self, cells, area, radius): y, x = nd.center_of_mass(cells) life = np.zeros(np.shape(area)) for cell in np.transpose(area.nonzero()): d = np.sqrt(np.power(y - cell[0], 2) + np.power(x - cell[1], 2)) life[cell[0], cell[1]] = (1.0 / d) * radius + random.random() * .5 + .1 return life
def nucleicof(dnaimg,options=None): ''' Returns a set of nuclear centres. ''' labeled,N=labelnuclei(dnaimg) cofs=center_of_mass(dnaimg,labeled,range(1,N+1)) return cofs
def test_center_of_mass04(): "center of mass 4" expected = [1, 1] for type in types: input = np.array([[0, 0], [0, 1]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected)
def test_center_of_mass08(): "center of mass 8" labels = [1, 2] expected = [0.5, 1.0] input = np.array([[5, 2], [3, 1]], bool) output = ndimage.center_of_mass(input, labels, 2) assert_array_almost_equal(output, expected)
def test_center_of_mass05(): "center of mass 5" expected = [0.5, 0.5] for type in types: input = np.array([[1, 1], [1, 1]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected)
def test_center_of_mass09(): "center of mass 9" labels = [1, 2] expected = [(0.5, 0.0), (0.5, 1.0)] input = np.array([[1, 2], [1, 1]], bool) output = ndimage.center_of_mass(input, labels, [1, 2]) assert_array_almost_equal(output, expected)
def get_labels(self): """ find clusters and extract center and size Parameters ---------- Returns ------- self.labels : 'list' list of cluster labels self.centers : 'list' list of cluster centers self.sizes : 'list' list of cluster sizes """ b_img = self.img_b label_im, nb_labels = ndimage.label(b_img) center = np.asarray(ndimage.center_of_mass(b_img, label_im, range(1, nb_labels + 1))) size = np.asarray(ndimage.sum(b_img, label_im, range(1, nb_labels + 1))) self.labels = label_im self.centers = center self.sizes = size
def getRealLabeledAreaCenter(image,labeled_image,indices,info): print "Getting real area and center" shape=numpy.shape(image) ones=numpy.ones(shape) area=nd.sum(ones,labels=labeled_image,index=indices) center=nd.center_of_mass(ones,labels=labeled_image,index=indices) ll=0 try: len(area) except: area=[area] center=[center] try: len(indices) except: indices=[indices] try: info.keys() except: offset=1 else: offset=0 for l in indices: info[l-offset][0]=area[ll] info[l-offset][4]=center[ll] ll += 1 return info
def get_spec(fname, roi_start, roi_width=180, nchannels=2, force_start=False, **kwargs): """return a sipm spectrum using the cm method as a cut. roi_start is the star of the region of interest, + roi_width channels ref_cm is the reference center of mass. if None then mean(cm) of all events will be calculated. dev_cm is the allowed deviation from ref_cm. if None then std(cm) of all events will be calculated. nchannels is the number of DRS channels with data. either 1 or 2""" st, wd = roi_start, roi_width my_dtype = return_dtype(nchannels) if not force_start: st, ref_cm, dev_cm = find_start(fname, roi_start, roi_width, nchannels, **kwargs) else: cmsarr = cms_(fname, roi_start, roi_width, nchannels) cmhist = histogram(cmsarr, bins=512) ref_cm = cmhist[1][argmax(cmhist[0])] dev_cm = dev_cm_(cmsarr) with open(fname, 'r') as f: gen = (fromstring(event, my_dtype)[0][5] for event in event_generator(f, nchannels)) specdata = [sum(event[st:st + wd]) for event in gen if abs(center_of_mass(- event[st:st + wd])[0] - ref_cm) < dev_cm] return histogram(specdata, bins=2048)
def test_cmp_ndimage(): R = (255 * np.random.rand(128, 256)).astype(np.uint16) R += np.arange(256) m0, m1 = mahotas.center_of_mass(R) n0, n1 = ndimage.center_of_mass(R) assert np.abs(n0 - m0) < 1.0 assert np.abs(n1 - m1) < 1.0
def find_local_maxima(image, min_distance): """Find maxima in an image. Finds the highest-valued points in an image, such that each point is separted by at least min_distance. If there are flat regions that are all at a maxima, the enter of mass of the region is reported. Large flat regions of more than min_distance in radius will be erroneously returned as maxima even if they are not. Further filtering should be performed to exclude these if needed. Returns the position of the maxima and the value at each maximum. Parameters: image: image of arbitrary dimensionality min_distance: maxima found will be at least this many pixels apart Returns: centroids: list of centers of each maxima values: image value at each maxima """ image_max = ndimage.maximum_filter(image, size=2*min_distance+1, mode='constant') peak_mask = (image == image_max) # NB: some maxima might be marked by multiple contiguous pixels if the image # has "plateaus". So we need to label the mask and get the centroids # of each of the labeled regions. labeled_image, num_regions = ndimage.label(peak_mask) label_indices = numpy.arange(1, num_regions+1) centroids = ndimage.center_of_mass(peak_mask, labeled_image, label_indices) values = ndimage.mean(image, labeled_image, label_indices) return numpy.array(centroids), values
def find_start_point(npa): print('finding start point') # print(npa.shape) len_y = npa.shape[1]-1 j = int() prev = 0 row = [] for i in range(len_y,int(0.8*float(len_y)),-1): row = npa[i,0:] if i<len_y: prev = npa[i+1,0:] if len(row[row>130]) and len(prev[prev>130]): j = i break try: st_pt = ndimage.center_of_mass(row)[0] except RuntimeWarning: print(row[row>130]) # print(j,st_pt) try: pts = ndimage.measurements.center_of_mass(npa[0:int(npa.shape[1]*0.6),0:]) except RuntimeWarning: print(npa[0:int(npa.shape[1]*0.6),0:]) # print(pts) ### Testing ### # img = im.fromarray(npa) # img.convert('RGB') # draw = imd.Draw(img) # draw.ellipse((pts[1]-20,pts[0]-20,pts[1]+20,pts[0]+20),fill="red") # # img.show() # del draw ################ cm_x = int(pts[1]) return (st_pt,j,cm_x)
def create_paths(pathimage): # shortest path through black then purple then cyan arr = np.transpose(pygame.surfarray.array3d(pathimage), [1,0,2]) maxval = arr.max() black = (((arr == 0).sum(axis=2)) == 3) print "found %d black pixels"%(black.sum()) purple = ((arr[:,:,0] == maxval) & (arr[:,:,1] == 0) & (arr[:,:,2] == maxval)) print "found %d purple pixels"%(purple.sum()) cyan = ((arr[:,:,0] == 0) & (arr[:,:,1] == maxval) & (arr[:,:,2] == maxval)) print "found %d cyan pixels"%(cyan.sum()) white = (((arr == arr.max()).sum(axis=2)) == 3) print "found %d white pixels"%(white.sum()) mask = black | purple | cyan | white black_blobs, black_count = ndi.label(black) purple_blobs, purple_count = ndi.label(purple) cyan_blobs, cyan_count = ndi.label(cyan) print black_count, purple_count, cyan_count try: black_centers, purple_distances, cyan_distances = cPickle.load(open("distances.pickle")) except: black_centers = ndi.center_of_mass(np.ones(black_blobs.shape), black_blobs, range(1, black_count + 1)) purple_distances = [masked_distance(purple_blobs == (b + 1), mask) for b in range(purple_count)] cyan_distances = [masked_distance(cyan_blobs == (b + 1), mask) for b in range(cyan_count)] cPickle.dump((black_centers, purple_distances, cyan_distances), open("distances.pickle", "w")) return black_centers, [purple_distances, cyan_distances]
def obj_params_with_offset(img, labels, aslice, label_idx): y_offset = aslice[0].start x_offset = aslice[1].start thumb = img[aslice] lb = labels[aslice] yc, xc = ndimage.center_of_mass(thumb, labels=lb, index=label_idx) br = thumb[lb == label_idx].sum() #the intensity of the source return [br, xc + x_offset, yc + y_offset]
def cms_(fname, roi_start, roi_width, nchannels=2): st, wd = roi_start, roi_width my_dtype = return_dtype(nchannels) with open(fname, 'r') as f: gen = (fromstring(event, my_dtype)[0][5] for event in event_generator(f, nchannels)) cms = [center_of_mass(-event[st:st + wd])[0] for event in gen] return cms
def test_cmp_ndimage3(): R = (255 * np.random.rand(32, 128, 8, 16)).astype(np.uint16) R += np.arange(16) m = mahotas.center_of_mass(R) n = ndimage.center_of_mass(R) p = slow_center_of_mass(R) assert np.abs(n - m).max() < 1.0 assert np.abs(p - m).max() < 1.0
def com_ref(self): """ This function calculates the centre of mass of the reference segmentation :return: """ return ndimage.center_of_mass(self.ref) * np.array(self.pixdim)
def metric_from_binarized(self, seg, ref): """ :param seg: numpy array with binary mask from inferred segmentation :param ref: numpy array with binary mask from reference segmentation :return: dict of centers of mass in each axis """ return {d: 'com_ref_' + x for d, x in zip('XYZ', ndimage.center_of_mass(ref))}
def find_xyz_cut_coords(img, mask_img=None, activation_threshold=None): """ Find the center of the largest activation connected component. Parameters ----------- img : 3D Nifti1Image The brain map. mask_img : 3D Nifti1Image, optional An optional brain mask, provided mask_img should not be empty. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using the 80% percentile of the absolute value of the map. Returns ------- x : float the x world coordinate. y : float the y world coordinate. z : float the z world coordinate. """ # if a pseudo-4D image or several images were passed (cf. #922), # we reduce to a single 3D image to find the coordinates img = check_niimg_3d(img) data = _safe_get_data(img) # when given image is empty, return (0., 0., 0.) if np.all(data == 0.): warnings.warn( "Given img is empty. Returning default cut_coords={0} instead.". format(DEFAULT_CUT_COORDS)) x_map, y_map, z_map = DEFAULT_CUT_COORDS return np.asarray(coord_transform(x_map, y_map, z_map, img.affine)).tolist() # Retrieve optional mask if mask_img is not None: mask_img = check_niimg_3d(mask_img) mask = _safe_get_data(mask_img) if not np.allclose(mask_img.affine, img.affine): raise ValueError( 'Mask affine: \n%s\n is different from img affine:' '\n%s' % (str(mask_img.affine), str(img.affine))) else: mask = None # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(data, 'mask'): not_mask = np.logical_not(data.mask) if mask is None: mask = not_mask else: mask *= not_mask data = np.asarray(data) # Get rid of potential memmapping data = as_ndarray(data) my_map = data.copy() if mask is not None: # check against empty mask if mask.sum() == 0.: warnings.warn( "Provided mask is empty. Returning center of mass instead.") cut_coords = ndimage.center_of_mass(np.abs(my_map)) + offset x_map, y_map, z_map = cut_coords return np.asarray(coord_transform(x_map, y_map, z_map, img.affine)).tolist() slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5 * np.array(data.shape) if activation_threshold is None: activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(), 80) try: eps = 2 * np.finfo(activation_threshold).eps except ValueError: # The above will fail for exact types, eg integers eps = 1e-15 mask = np.abs(my_map) > (activation_threshold - eps) # mask may be zero everywhere in rare cases if mask.max() == 0: return .5 * np.array(data.shape) mask = largest_connected_component(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map) > second_threshold) if second_mask.sum() > 50: my_map *= largest_connected_component(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = cut_coords + offset # Return as a list of scalars return np.asarray(coord_transform(x_map, y_map, z_map, img.affine)).tolist()
def extract_storm_patches(label_grid, data, x_grid, y_grid, times, dx=1, dt=1, patch_radius=16): """ After storms are labeled, this method extracts boxes of equal size centered on each storm from the grid and places them into STObjects. The STObjects contain intensity, location, and shape information about each storm at each timestep. Args: label_grid: 2D or 3D array output by label_storm_objects. data: 2D or 3D array used as input to label_storm_objects. x_grid: 2D array of x-coordinate data, preferably on a uniform spatial grid with units of length. y_grid: 2D array of y-coordinate data. times: List or array of time values, preferably as integers dx: grid spacing in same units as x_grid and y_grid. dt: period elapsed between times patch_radius: Number of grid points from center of mass to extract Returns: storm_objects: list of lists containing STObjects identified at each time. """ storm_objects = [] if len(label_grid.shape) == 3: ij_grid = np.indices(label_grid.shape[1:]) for t, time in enumerate(times): storm_objects.append([]) # object_slices = find_objects(label_grid[t], label_grid[t].max()) centers = list( center_of_mass(data[t], labels=label_grid[t], index=np.arange(1, label_grid[t].max() + 1))) if len(centers) > 0: for o, center in enumerate(centers): int_center = np.round(center).astype(int) obj_slice_buff = (slice(int_center[0] - patch_radius, int_center[0] + patch_radius), slice(int_center[1] - patch_radius, int_center[1] + patch_radius)) storm_objects[-1].append( STObject(data[t][obj_slice_buff], np.where( label_grid[t][obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], time, time, dx=dx, step=dt)) if t > 0: dims = storm_objects[-1][-1].timesteps[0].shape storm_objects[-1][-1].estimate_motion( time, data[t - 1], dims[1], dims[0]) else: ij_grid = np.indices(label_grid.shape) storm_objects.append([]) centers = list( center_of_mass(data, labels=label_grid, index=np.arange(1, label_grid.max() + 1))) if len(centers) > 0: for o, center in enumerate(centers): int_center = np.round(center).astype(int) obj_slice_buff = (slice(int_center[0] - patch_radius, int_center[0] + patch_radius), slice(int_center[1] - patch_radius, int_center[1] + patch_radius)) storm_objects[-1].append( STObject(data[obj_slice_buff], np.where(label_grid[obj_slice_buff] == o + 1, 1, 0), x_grid[obj_slice_buff], y_grid[obj_slice_buff], ij_grid[0][obj_slice_buff], ij_grid[1][obj_slice_buff], times[0], times[0], dx=dx, step=dt)) return storm_objects
# Calculate left ventricle distances lv = np.where(labels == 1, 1, 0) dists = ndi.distance_transform_edt(lv, sampling=vol.meta['sampling']) # Report on distances print('Max distance (mm):', ndi.maximum(dists)) print('Max location:', ndi.maximum_position(dists)) # Plot overlay of distances overlay = np.where(dists[5] > 0, dists[5], np.nan) plt.imshow(overlay, cmap='hot') format_and_render_plot() # Extract centers of mass for objects 1 and 2 coms = ndi.center_of_mass(vol, labels, index=[1, 2]) print('Label 1 center:', coms[0]) print('Label 2 center:', coms[1]) # Add marks to plot for c0, c1, c2 in coms: plt.scatter(c2, c1, s=100, marker='o') plt.show() # Create an empty time series ts = np.zeros(20) # Calculate volume at each voxel d0, d1, d2, d3 = vol_ts.meta['sampling'] dvoxel = d1 * d2 * d3
def Find_Char(File_path, Frame_size, Overlap, Gaussian_blur): ## load image im = cv2.imread(File_path) im_gray = cv2.cvtColor(im, cv2.COLOR_BGR2GRAY) if (Gaussian_blur == "true"): im_gray = cv2.GaussianBlur(im_gray, (5, 5), 0) thresh = 127 t, im_bw = cv2.threshold(im_gray, thresh, 255, cv2.THRESH_BINARY) y, x, c = im.shape map_buff = np.zeros((int(y / Frame_size), int(x / Frame_size))) ## creat raw figure every # allocate Frame_size*Frame_size pixels to one value for i in range(int(y / Frame_size)): for j in range(int(x / Frame_size)): count = 0 for I in range(Frame_size): for J in range(Frame_size): if (im_bw[i * Frame_size + I, j * Frame_size + J] == 0): count = count + 1 map_buff[i, j] = count ## creat feature map ## map_result = np.zeros( (int(y / Frame_size) - Overlap, int(x / Frame_size) - Overlap)) for i in range(int(y / Frame_size) - Overlap): for j in range(int(x / Frame_size) - Overlap): map_result[i, j] = np.sum(map_buff[i:i + Overlap + 1, j:j + Overlap + 1]) ## find local maxima in feature map #set parameter neighborhood_size = 2 threshold = np.mean(map_result) ## parameter needed in neighborhood_size converge process ## buff_num = 100 local_maxima_num = 0 while (local_maxima_num != buff_num): buff_num = local_maxima_num data = map_result data_max = filters.maximum_filter(data, neighborhood_size) maxima = (data == data_max) data_min = filters.minimum_filter(data, neighborhood_size) diff = ((data_max - data_min) > threshold) maxima[diff == 0] = 0 labeled, num_objects = ndimage.label(maxima) xy = np.array( ndimage.center_of_mass(data, labeled, range(1, num_objects + 1))) local_maxima_num, dimension = xy.shape neighborhood_size = neighborhood_size + 1 ## find the corresponding corrodinate based on local maxima in feature map local_maxima_num, dimension = xy.shape Local_Maxima_Index = xy Local_Maxima_Index = Local_Maxima_Index * Frame_size Out_Char = [] Out_Char_lefttop = [] for i in range(local_maxima_num): y_low = int(Local_Maxima_Index[i, 0] - int((neighborhood_size) / 2) * Frame_size) y_high = int(Local_Maxima_Index[i, 0] + (int((neighborhood_size) / 2) + 4) * Frame_size) x_low = int(Local_Maxima_Index[i, 1] - int((neighborhood_size) / 2) * Frame_size) x_high = int(Local_Maxima_Index[i, 1] + (int((neighborhood_size) / 2) + 4) * Frame_size) if (y_low < 0): y_low = 0 if (x_low < 0): x_low = 0 if (y_high > y): y_high = y if (x_high > x): x_high = x Out_Char.append(im_bw[y_low:y_high, x_low:x_high]) Out_Char_lefttop.append([y_low, x_low]) #plt.imshow(Out_Char[i]) #plt.show() img, location = sort(Out_Char, Out_Char_lefttop, local_maxima_num) for i in range(local_maxima_num): print("sort") plt.title(i) plt.imshow(img[i]) plt.show() print(location[i]) return img, location
def run(self, rinput): _logger.info('starting processing for slit detection') flow = self.init_filters(rinput) hdulist = basic_processing_with_combination(rinput, flow=flow) hdr = hdulist[0].header self.set_base_headers(hdr) _logger.debug('finding pinholes') try: filtername = hdr['FILTER'] readmode = hdr['READMODE'] rotang = hdr['ROTANG'] detpa = hdr['DETPA'] dtupa = hdr['DTUPA'] dtub, dtur = datamodel.get_dtur_from_header(hdr) except KeyError as error: _logger.error(error) raise numina.exceptions.RecipeError(error) if rinput.shift_coordinates: xdtur, ydtur, zdtur = dtur xfac = xdtur / EMIR_PIXSCALE yfac = -ydtur / EMIR_PIXSCALE vec = numpy.array([yfac, xfac]) _logger.info('shift is %s', vec) ncenters = rinput.pinhole_nominal_positions + vec else: _logger.info('using pinhole coordinates as they are') ncenters = rinput.pinhole_nominal_positions _logger.info('pinhole characterization') positions = pinhole_char( hdulist[0].data, ncenters, box=rinput.box_half_size, recenter_pinhole=rinput.recenter, maxdist=rinput.max_recenter_radius ) _logger.info('alternate pinhole characterization') positions_alt = pinhole_char2( hdulist[0].data, ncenters, recenter_pinhole=rinput.recenter, recenter_half_box=rinput.box_half_size, recenter_maxdist=rinput.max_recenter_radius ) _logger.debug('finding slits') # First, prefilter with median median_filter_size = rinput.median_filter_size canny_sigma = rinput.canny_sigma obj_min_size = rinput.obj_min_size obj_max_size = rinput.obj_max_size data1 = hdulist[0].data _logger.debug('Median filter with box %d', median_filter_size) data2 = median_filter(data1, size=median_filter_size) # Grey level image img_grey = normalize(data2) # Find edges with canny _logger.debug('Find edges with canny, sigma %d', canny_sigma) edges = canny(img_grey, sigma=canny_sigma) # Fill edges _logger.debug('Fill holes') fill_slits = ndimage.binary_fill_holes(edges) _logger.debug('Label objects') label_objects, nb_labels = ndimage.label(fill_slits) _logger.debug('%d objects found', nb_labels) # Filter on the area of the labeled region # Perhaps we could ignore this filtering and # do it later? _logger.debug('Filter objects by size') # Sizes of regions sizes = numpy.bincount(label_objects.ravel()) _logger.debug('Min size is %d', obj_min_size) _logger.debug('Max size is %d', obj_max_size) mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size) # Filter out regions nids, = numpy.where(mask_sizes) mm = numpy.in1d(label_objects, nids) mm.shape = label_objects.shape fill_slits_clean = numpy.where(mm, 1, 0) # and relabel _logger.debug('Label filtered objects') relabel_objects, nb_labels = ndimage.label(fill_slits_clean) _logger.debug('%d objects found after filtering', nb_labels) ids = list(six.moves.range(1, nb_labels + 1)) _logger.debug('Find regions and centers') regions = ndimage.find_objects(relabel_objects) centers = ndimage.center_of_mass(data2, labels=relabel_objects, index=ids ) table = char_slit(data2, regions, slit_size_ratio=rinput.slit_size_ratio ) result = self.create_result(frame=hdulist, positions=positions, positions_alt=positions_alt, slitstable=table, filter=filtername, DTU=dtub, readmode=readmode, ROTANG=rotang, DETPA=detpa, DTUPA=dtupa, param_recenter=rinput.recenter, param_max_recenter_radius=rinput.max_recenter_radius, param_box_half_size=rinput.box_half_size ) return result
x, y, w, h = cv2.boundingRect(cont) area = w * h if area > mx_area: mx = x, y, w, h mx_area = area x, y, w, h = mx roi = cells_morph[y:y + h, x:x + w] # cutted cell cv2.rectangle(inv, (x, y), (x + w, y + h), (200, 0, 0), 2) contour_cell, hierarchy_cell = cv2.findContours( invert(roi), cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE) # Convex Hull chull = convex_hull_image(invert(roi)) # using image processing module of scipy to find the center of the convex hull cy, cx = ndi.center_of_mass(chull) # Find Countours #contours = measure.find_contours(invert(roi), .8) #contour = max(contours, key=len) labels2 = label(chull, background=0) for region in regionprops(labels2): CHA_area = region.area # convex hull area CHA_perimeter = region.perimeter # convex hull perimeter # Cell area and Cell perimeter label_area = label(invert(roi), background=0) for region in regionprops(label_area): cell_area = region.area cell_perimeter = region.perimeter # Density
def find_parcellation_cut_coords(labels_img, background_label=0, return_label_names=False, label_hemisphere='left'): """ Return coordinates of center of mass of 3D parcellation atlas Parameters ---------- labels_img: 3D Nifti1Image A brain parcellation atlas with specific mask labels for each parcellated region. background_label: int, optional (default 0) Label value used in labels_img to represent background. return_label_names: bool, optional (default False) Returns list of labels label_hemisphere: 'left' or 'right', optional (default 'left') Choice of hemisphere to compute label center coords for. Applies only in cases where atlas labels are lateralized. Eg. Yeo or Harvard Oxford atlas. Returns ------- coords: numpy.ndarray of shape (n_labels, 3) Label regions cut coordinates in image space (mm). labels_list: list, optional Label region. Returned only when return_label_names is True. See Also -------- nilearn.plotting.find_probabilistic_atlas_cut_coords : For coordinates extraction on probabilistic atlases (4D) (Eg. MSDL atlas) """ # check label_hemisphere input if label_hemisphere not in ['left', 'right']: raise ValueError( "Invalid label_hemisphere name:{0}. Should be one " "of these 'left' or 'right'.".format(label_hemisphere)) # Grab data and affine labels_img = reorder_img(check_niimg_3d(labels_img)) labels_data = get_data(labels_img) labels_affine = labels_img.affine # Grab number of unique values in 3d image unique_labels = set(np.unique(labels_data)) - set([background_label]) # Loop over parcellation labels, grab center of mass and dump into coords # list coord_list = [] label_list = [] for cur_label in unique_labels: cur_img = labels_data == cur_label # Grab hemispheres separately x, y, z = coord_transform(0, 0, 0, np.linalg.inv(labels_affine)) left_hemi = get_data(labels_img).copy() == cur_label right_hemi = get_data(labels_img).copy() == cur_label left_hemi[int(x):] = 0 right_hemi[:int(x)] = 0 # Two connected component in both hemispheres if not np.all(left_hemi == False) or np.all(right_hemi == False): if label_hemisphere == 'left': cur_img = left_hemi.astype(int) elif label_hemisphere == 'right': cur_img = right_hemi.astype(int) # Take the largest connected component labels, label_nb = ndimage.label(cur_img) label_count = np.bincount(labels.ravel().astype(int)) label_count[0] = 0 component = labels == label_count.argmax() # Get parcellation center of mass x, y, z = ndimage.center_of_mass(component) # Dump label region and coordinates into a dictionary label_list.append(cur_label) coord_list.append((x, y, z)) # Transform coordinates coords = [ coord_transform(i[0], i[1], i[2], labels_affine) for i in coord_list ] if return_label_names: return np.array(coords), label_list else: return np.array(coords)
def execute(self, namespace): from scipy.ndimage import center_of_mass from PYME.IO.MetaDataHandler import DictMDHandler from PYME.IO import tabular chan0 = namespace[self.input_chan0] mdh = DictMDHandler() mdh.copyEntriesFrom(chan0.mdh) vx, vy, vz = chan0.voxelsize chan0 = np.stack([ chan0.data[:, :, t, 0].squeeze() for t in range(chan0.data.shape[2]) ], axis=2) mask0 = namespace[self.input_mask0] mask0 = np.stack([ mask0.data[:, :, t, 0].squeeze() for t in range(mask0.data.shape[2]) ], axis=2) mask0 = mask0 > 0 chan1 = namespace[self.input_chan1] chan1 = np.stack([ chan1.data[:, :, t, 0].squeeze() for t in range(chan1.data.shape[2]) ], axis=2) mask1 = namespace[self.input_mask1] mask1 = np.stack([ mask1.data[:, :, t, 0].squeeze() for t in range(mask1.data.shape[2]) ], axis=2) mask1 = mask1 > 0 com0 = center_of_mass(chan0, mask0) # [px] com1 = center_of_mass(chan1, mask1) ox = vx * (com0[0] - com1[0]) # [nm] oy = vy * (com0[1] - com1[1]) oz = vz * (com0[2] - com1[2]) offset = np.sqrt((ox**2) + (oy**2) + (oz**2)) n0 = mask0.sum() n1 = mask1.sum() n_total = n0 + n1 mask_both = mask0 * mask1 intensity_overlap = (mask_both * (chan0 + chan1)).sum() intensity0 = (chan0 * mask0).sum() intensity1 = (chan1 * mask1).sum() intensity_total = intensity0 + intensity1 n_overlapping = np.sum(mask0 * mask1) out = np.empty((1, ), dtype=self._dtype) out[0]['offset'] = offset out[0]['com0'] = com0 out[0]['com1'] = com1 out[0]['n_overlapping'] = n_overlapping out[0]['n_0'] = n0 out[0]['n_1'] = n1 out[0]['n_total'] = n_total out[0]['fractional_volume_overlap'] = n_overlapping / n_total out[0][ 'fractional_intensity_overlap'] = intensity_overlap / intensity_total out[0]['intensity_total'] = intensity_total out[0]['intensity0'] = intensity0 out[0]['intensity1'] = intensity1 out = tabular.RecArraySource(out) out.mdh = mdh namespace[self.output_name] = out
def find_centers(image, label_matrix, count): centers = numpy.array(ndimage.center_of_mass(image, label_matrix, range(1, count + 1))) # find centers for center in centers: print "A center is located at: " + str(center)
def peak_detection(self, image_2D, local_window_size): """Do the local peak dection to get the best coordinate of molecular center. This function does a local peak dection to the score map to get the best coordinates. Args: image_2d: numpy.array, it is a 2d array, the dim is 2, the value of it was a prediction score given by the CNN model. local_window_size: this is the distance threshold between two particles. The peak detection is done in the local window. Returns: return list_coordinate_clean list_coordinate_clean: a list, the length of this list stands for the number of picked particles. Each element in the list is also a list, the length is 3. The first one is x-axis, the second one is y-axis, the third one is the predicted score. """ col = image_2D.shape[0] row = image_2D.shape[1] # filter the array in local, the values are replaced by local max value. data_max = filters.maximum_filter(image_2D, local_window_size) # compare the filter array to the original one, the same value in the same location is the local maximum. # maxima is a bool 2D array, true stands for the local maximum maxima = (image_2D == data_max) data_min = filters.minimum_filter(image_2D, local_window_size) diff = ((data_max - data_min) > 0) maxima[diff == 0] = 0 labeled, num_objects = ndimage.label(maxima) # get the coordinate of the local maximum # the shape of the array_y_x is (number, 2) array_y_x = np.array( ndimage.center_of_mass(image_2D, labeled, range(1, num_objects + 1))) array_y_x = array_y_x.astype(int) list_y_x = array_y_x.tolist() #print("number of local maximum:%d"%len(list_y_x)) for i in range(len(list_y_x)): # add the prediction score to the list list_y_x[i].append(image_2D[array_y_x[i][0]][array_y_x[i][1]]) # add a symbol to the list, and it is used to remove crowded candidate list_y_x[i].append(0) # remove close candidate for i in range(len(list_y_x) - 1): if list_y_x[i][3] == 1: continue for j in range(i + 1, len(list_y_x)): if list_y_x[i][3] == 1: break if list_y_x[j][3] == 1: continue d_y = list_y_x[i][0] - list_y_x[j][0] d_x = list_y_x[i][1] - list_y_x[j][1] d_distance = math.sqrt(d_y**2 + d_x**2) if d_distance < local_window_size / 2: if list_y_x[i][2] >= list_y_x[j][2]: list_y_x[j][3] = 1 else: list_y_x[i][3] = 1 list_coordinate_clean = [] for i in range(len(list_y_x)): if list_y_x[i][3] == 0: # remove the symbol element list_x_y = [] list_x_y.append(list_y_x[i][1]) list_x_y.append(list_y_x[i][0]) list_x_y.append(list_y_x[i][2]) list_coordinate_clean.append(list_x_y) return list_coordinate_clean
def _find_seed_point(vesselMask=None, binarize=False): """ The HFM solver requires seed-points to track vessels, this function automates that process. Z-axis (dim=2) in the DCE image is the floor-ceiling axis (w.r.t scanner). Therefore we select a seed-point from the top-most (max(z dim)) slice which has a non-zero value in its vessel mask. :param vesselMask: (numpy ndarray) :param binarize: (bool) If true, the mask pixels are rescaled to have values 0 or 1 :return: seed_points: (numpy ndarray) Array of seed-point co-ordinates """ # Make the image 1's and 0's if binarize is True: vesselMask = np.divide(vesselMask, np.amax(vesselMask)).astype(np.uint8) _, _, slices = vesselMask.shape seed_slice_idx = np.nan for slice_idx in np.arange(slices - 1, -1, -1): # Check if mask contains non-zero locations nz_indices = np.nonzero(vesselMask[:, :, slice_idx]) if nz_indices[0].size != 0 and nz_indices[1].size != 0: mask_slice = vesselMask[:, :, slice_idx] se_cc = generate_binary_structure(rank=mask_slice.ndim, connectivity=4) labelled_array, num_labels = label(mask_slice, se_cc) if num_labels >= 1: seed_slice_idx = slice_idx seed_label_array = labelled_array seed_num_labels = num_labels seed_slice = vesselMask[:, :, seed_slice_idx] # Find largest component among different labels comp_sizes = scipy.ndimage.measurements.sum( input=seed_slice, labels=seed_label_array, index=np.arange(1, seed_num_labels + 1)) # Since we ignore label 0 (background), add one to index of largest sum to get "true" label of CC largest_component_label = list(comp_sizes).index( max(comp_sizes)) + 1 # This seed-point returns a 2D array with the X and Y co-ordinate of the center-of-mass # of the largest component in the seed_slice slice_seed_point = center_of_mass( input=seed_slice, labels=seed_label_array, index=largest_component_label) if np.isnan(slice_seed_point[0]) or np.isnan( slice_seed_point[1]): continue else: break else: continue if np.isnan(seed_slice_idx): raise RuntimeError( 'Unable to find slice with non-zero mask value.') # Create the 3D seed-point by appending the slice idx seed_point = [slice_seed_point[0], slice_seed_point[1], seed_slice_idx] return np.array(seed_point)
def com(signal): '''Return the center of mass of a 1D array. This is used to find the center of rocking curve and slit height scans.''' return int(center_of_mass(signal)[0])
def detect_eddies(field, lon, lat, ssh_crits, res, Npix_min, Npix_max, amp_thresh, d_thresh, cyc='anticyclonic'): ''' Detect eddies present in field which satisfy the criteria outlined in Chelton et al., Prog. ocean., 2011, App. B.2. Field is a 2D array specified on grid defined by lat and lon. ssh_crits is an array of ssh levels over which to perform eddy detection loop res is resolutin in degrees of field Npix_min, Npix_max, amp_thresh, d_thresh specify the constants used by the eddy detection algorithm (see Chelton paper for more details) cyc = 'cyclonic' or 'anticyclonic' [default] specifies type of eddies to be detected Function outputs lon, lat coordinates of detected eddies ''' len_deg_lat = 111.325 # length of 1 degree of latitude [km] llon, llat = np.meshgrid(lon, lat) lon_eddies = np.array([]) lat_eddies = np.array([]) amp_eddies = np.array([]) area_eddies = np.array([]) scale_eddies = np.array([]) # ssh_crits increasing for 'cyclonic', decreasing for 'anticyclonic' ssh_crits.sort() if cyc == 'cyclonic': ssh_crits = np.flipud(ssh_crits) # loop over ssh_crits and remove interior pixels of detected eddies from subsequent loop steps for ssh_crit in ssh_crits: # 1. Find all regions with eta greater (less than) than ssh_crit for anticyclonic (cyclonic) eddies (Chelton et al. 2011, App. B.2, criterion 1) if cyc == 'anticyclonic': regions, nregions = ndimage.label((field > ssh_crit).astype(int)) elif cyc == 'cyclonic': regions, nregions = ndimage.label((field < ssh_crit).astype(int)) for iregion in range(nregions): # 2. Calculate number of pixels comprising detected region, reject if not within [Npix_min, Npix_max] region = (regions == iregion + 1).astype(int) region_Npix = region.sum() eddy_area_within_limits = (region_Npix < Npix_max) * (region_Npix > Npix_min) # 3. Detect presence of local maximum (minimum) for anticylonic (cyclonic) eddies, reject if non-existent interior = ndimage.binary_erosion(region) exterior = region.astype(bool) - interior if interior.sum() == 0: continue if cyc == 'anticyclonic': has_internal_ext = field[interior].max() > field[exterior].max( ) elif cyc == 'cyclonic': has_internal_ext = field[interior].min() < field[exterior].min( ) # 4. Find amplitude of region, reject if < amp_thresh if cyc == 'anticyclonic': amp = field[interior].max() - field[exterior].mean() elif cyc == 'cyclonic': amp = field[exterior].mean() - field[interior].min() is_tall_eddy = amp >= amp_thresh # 5. Find maximum linear dimension of region, reject if < d_thresh if np.logical_not(eddy_area_within_limits * has_internal_ext * is_tall_eddy): continue lon_ext = llon[exterior] lat_ext = llat[exterior] d = distance_matrix(lon_ext, lat_ext) is_small_eddy = d.max() < d_thresh # Detected eddies: if eddy_area_within_limits * has_internal_ext * is_tall_eddy * is_small_eddy: # find centre of mass of eddy eddy_object_with_mass = field * region eddy_object_with_mass[np.isnan(eddy_object_with_mass)] = 0 j_cen, i_cen = ndimage.center_of_mass(eddy_object_with_mass) lon_cen = np.interp(i_cen, range(0, len(lon)), lon) lat_cen = np.interp(j_cen, range(0, len(lat)), lat) lon_eddies = np.append(lon_eddies, lon_cen) lat_eddies = np.append(lat_eddies, lat_cen) # assign (and calculated) amplitude, area, and scale of eddies amp_eddies = np.append(amp_eddies, amp) area = region_Npix * res**2 * len_deg_lat * len_deg_lon( lat_cen) # [km**2] area_eddies = np.append(area_eddies, area) scale = np.sqrt(area / np.pi) # [km] scale_eddies = np.append(scale_eddies, scale) # remove its interior pixels from further eddy detection eddy_mask = np.ones(field.shape) eddy_mask[interior.astype(int) == 1] = np.nan field = field * eddy_mask return lon_eddies, lat_eddies, amp_eddies, area_eddies, scale_eddies
def find_xyz_cut_coords(img, mask=None, activation_threshold=None): """ Find the center of the largest activation connected component. Parameters ----------- img : 3D Nifti1Image The brain map. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using the 80% percentile of the absolute value of the map. Returns ------- x : float the x world coordinate. y : float the y world coordinate. z : float the z world coordinate. """ # if a pseudo-4D image or several images were passed (cf. #922), # we reduce to a single 3D image to find the coordinates img = check_niimg_3d(img) data = _safe_get_data(img) # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(data, 'mask'): not_mask = np.logical_not(data.mask) if mask is None: mask = not_mask else: mask *= not_mask data = np.asarray(data) # Get rid of potential memmapping data = as_ndarray(data) my_map = data.copy() if mask is not None: # check against empty mask if mask.sum() == 0.: warnings.warn( "Provided mask is empty. Returning center of mass instead.") cut_coords = ndimage.center_of_mass(np.abs(my_map)) + offset x_map, y_map, z_map = cut_coords return np.asarray( coord_transform(x_map, y_map, z_map, img.get_affine())).tolist() slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5 * np.array(data.shape) if activation_threshold is None: activation_threshold = fast_abs_percentile(my_map[my_map != 0].ravel(), 80) mask = np.abs(my_map) > activation_threshold - 1.e-15 # mask may be zero everywhere in rare cases if mask.max() == 0: return .5 * np.array(data.shape) mask = largest_connected_component(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map) > second_threshold) if second_mask.sum() > 50: my_map *= largest_connected_component(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) x_map, y_map, z_map = cut_coords + offset # Return as a list of scalars return np.asarray(coord_transform(x_map, y_map, z_map, img.get_affine())).tolist()
import numpy as np import matplotlib.image as mpimg import matplotlib.pyplot as plt import matplotlib.patches as mpatches from skimage import measure import scipy.ndimage as ndi # matplotlib setup # matplotlib inline from pylab import rcParams img = mpimg.imread("images/53.jpg") cy, cx = ndi.center_of_mass(img) plt.imshow(img, cmap="Set2") plt.scatter(cx, cy) plt.show() contours = measure.find_contours(img, .8) contour = max(contours, key=len) plt.plot(contour[::, 1], contour[::, 0], linewidth=0.5) plt.imshow(img, cmap="Set3") plt.show() def cart2pol(x, y): rho = np.sqrt(x**2 + y**2)
def run(self, rinput): self.logger.info('starting slit processing') self.logger.info('basic image reduction') flow = self.init_filters(rinput) hdulist = basic_processing_with_combination(rinput, flow=flow) hdr = hdulist[0].header self.set_base_headers(hdr) try: rotang = hdr['ROTANG'] detpa = hdr['DETPA'] dtupa = hdr['DTUPA'] dtub, dtur = datamodel.get_dtur_from_header(hdr) except KeyError as error: self.logger.error(error) raise RecipeError(error) self.logger.debug('finding slits') # Filter values below 0.0 self.logger.debug('Filter values below 0') data1 = hdulist[0].data[:] data1[data1 < 0.0] = 0.0 # First, prefilter with median median_filter_size = rinput.median_filter_size canny_sigma = rinput.canny_sigma self.logger.debug('Median filter with box %d', median_filter_size) data2 = median_filter(data1, size=median_filter_size) # Grey level image img_grey = normalize_raw(data2) # Find edges with Canny self.logger.debug('Find edges, Canny sigma %f', canny_sigma) # These thresholds corespond roughly with # value x (2**16 - 1) high_threshold = rinput.canny_high_threshold low_threshold = rinput.canny_low_threshold self.logger.debug('Find edges, Canny high threshold %f', high_threshold) self.logger.debug('Find edges, Canny low threshold %f', low_threshold) edges = canny(img_grey, sigma=canny_sigma, high_threshold=high_threshold, low_threshold=low_threshold) # Fill edges self.logger.debug('Fill holes') # I do a dilation and erosion to fill # possible holes in 'edges' fill = ndimage.binary_dilation(edges) fill2 = ndimage.binary_fill_holes(fill) fill_slits = ndimage.binary_erosion(fill2) self.logger.debug('Label objects') label_objects, nb_labels = ndimage.label(fill_slits) self.logger.debug('%d objects found', nb_labels) ids = list(six.moves.range(1, nb_labels + 1)) self.logger.debug('Find regions and centers') regions = ndimage.find_objects(label_objects) centers = ndimage.center_of_mass(data2, labels=label_objects, index=ids) table = char_slit(data2, regions, slit_size_ratio=-1.0) result = self.create_result(frame=hdulist, slitstable=table, DTU=dtub, ROTANG=rotang, DETPA=detpa, DTUPA=dtupa) return result
def test_center_of_mass05(): expected = [0.5, 0.5] for type in types: input = np.array([[1, 1], [1, 1]], type) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected)
data = exposure.rescale_intensity(data) #data = exposure.adjust_gamma(data, 5) plt.imshow(data, cmap="gray") plt.show() fdata = filters.gaussian_filter(data, sigma=10) thresh = threshold_otsu(data)/3 plt.imshow(fdata > thresh) plt.show() labeled, n = ndimage.label(fdata > thresh) xy = np.array(ndimage.center_of_mass(fdata, labeled, range(1, n + 1))) xy = xy[:,[1,0]] plt.imshow(labeled) plt.plot(xy[:,0],xy[:,1],"rx") plt.show() # plt.imshow(genmask((80,80),5,sub)) # plt.show() # # n = 4 # r = 5.5 # pos = np.zeros((n*2),dtype=np.int32) # for i in range(n):
fig = plt.figure() row = 3 column = 2 mat = [[[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[.8, 0, -20], [0, .8, -10], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]], [[1, 0, 0], [0, 1, 0], [0, 0, 1]]] img_path = './images/Screenshot from 2020-03-11 14-34-34.png' img = imageio.imread(img_path) img = img.transpose(2, 0, 1)[0] ax = fig.add_subplot(row, column, 1) ax.imshow(img, cmap='gray') com = ndi.center_of_mass(img) d0 = com[0] - 90 d1 = com[1] - 90 xfm_shift = ndi.shift(img, shift=[d0, d1]) im = Image.fromarray(xfm_shift) im.save('./images/image2.png') ax1 = fig.add_subplot(row, column, 2) ax1.imshow(xfm_shift, cmap='gray') xfm_rotate = ndi.rotate(img, angle=-30, axes=(0, 1), reshape=False) ax2 = fig.add_subplot(row, column, 3) ax2.imshow(xfm_rotate, cmap='gray') xfm_aff_transform = ndi.affine_transform(img, mat[1])
def run(self, ips, imgs, para=None): inten = ImageManager.get(para['inten']) if not para['slice']: imgs = [inten.img] msks = [ips.img] else: msks = ips.imgs imgs = inten.imgs if len(msks) == 1: msks *= len(imgs) buf = imgs[0].astype(np.uint16) strc = ndimage.generate_binary_structure( 2, 1 if para['con'] == '4-connect' else 2) idct = ['Max', 'Min', 'Mean', 'Variance', 'Standard', 'Sum'] key = { 'Max': 'max', 'Min': 'min', 'Mean': 'mean', 'Variance': 'var', 'Standard': 'std', 'Sum': 'sum' } idct = [i for i in idct if para[key[i]]] titles = ['Slice', 'ID'][0 if para['slice'] else 1:] if para['center']: titles.extend(['Center-X', 'Center-Y']) if para['extent']: titles.extend(['Min-Y', 'Min-X', 'Max-Y', 'Max-X']) titles.extend(idct) k = ips.unit[0] data, mark = [], {'type': 'layers', 'body': {}} # data,mark=[],[] for i in range(len(imgs)): n = ndimage.label(msks[i], strc, output=buf) index = range(1, n + 1) dt = [] if para['slice']: dt.append([i] * n) dt.append(range(n)) xy = ndimage.center_of_mass(imgs[i], buf, index) xy = np.array(xy).round(2).T if para['center']: dt.extend([xy[1] * k, xy[0] * k]) boxs = [None] * n if para['extent']: boxs = ndimage.find_objects(buf) boxs = [(i[1].start + (i[1].stop - i[1].start) / 2, i[0].start + (i[0].stop - i[0].start) / 2, i[1].stop - i[1].start, i[0].stop - i[0].start) for i in boxs] for j in (0, 1, 2, 3): dt.append([i[j] * k for i in boxs]) if para['max']: dt.append(ndimage.maximum(imgs[i], buf, index).round(2)) if para['min']: dt.append(ndimage.minimum(imgs[i], buf, index).round(2)) if para['mean']: dt.append(ndimage.mean(imgs[i], buf, index).round(2)) if para['var']: dt.append(ndimage.variance(imgs[i], buf, index).round(2)) if para['std']: dt.append( ndimage.standard_deviation(imgs[i], buf, index).round(2)) if para['sum']: dt.append(ndimage.sum(imgs[i], buf, index).round(2)) layer = {'type': 'layer', 'body': []} xy = np.int0(xy).T texts = [(i[1], i[0]) + ('id=%d' % n, ) for i, n in zip(xy, range(len(xy)))] layer['body'].append({'type': 'texts', 'body': texts}) if para['extent']: layer['body'].append({'type': 'rectangles', 'body': boxs}) mark['body'][i] = layer data.extend(list(zip(*dt))) IPy.show_table(pd.DataFrame(data, columns=titles), inten.title + '-region statistic') inten.mark = GeometryMark(mark) inten.update()
def extract_particles(self, segmentation): """ Saves particle centers into output .star file, after dismissing regions that are too big to contain a particle. Args: segmentation: Segmentation of the micrograph into noise and particle projections. """ segmentation = segmentation[self.query_size // 2 - 1:-self.query_size // 2, self.query_size // 2 - 1:-self.query_size // 2, ] labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3))) values, repeats = np.unique(labeled_segments, return_counts=True) values_to_remove = np.where(repeats > self.max_size**2) values = np.take(values, values_to_remove) values = np.reshape(values, (1, 1, np.prod(values.shape)), "F") labeled_segments = np.reshape( labeled_segments, (labeled_segments.shape[0], labeled_segments.shape[1], 1), "F", ) matrix1 = np.repeat(labeled_segments, values.shape[2], 2) matrix2 = np.repeat(values, matrix1.shape[0], 0) matrix2 = np.repeat(matrix2, matrix1.shape[1], 1) matrix3 = np.equal(matrix1, matrix2) matrix4 = np.sum(matrix3, 2) segmentation[np.where(matrix4 == 1)] = 0 labeled_segments, _ = ndimage.label(segmentation, np.ones((3, 3))) max_val = np.amax( np.reshape(labeled_segments, (np.prod(labeled_segments.shape)))) center = center_of_mass(segmentation, labeled_segments, np.arange(1, max_val)) center = np.rint(center) img = np.zeros((segmentation.shape[0], segmentation.shape[1])) img[center[:, 0].astype(int), center[:, 1].astype(int)] = 1 y, x = np.ogrid[-self.moa:self.moa + 1, -self.moa:self.moa + 1] element = x * x + y * y <= self.moa * self.moa img = binary_dilation(img, structure=element) labeled_img, _ = ndimage.label(img, np.ones((3, 3))) values, repeats = np.unique(labeled_img, return_counts=True) y = np.where(repeats == np.count_nonzero(element)) y = np.array(y) y = y.astype(int) y = np.reshape(y, (np.prod(y.shape)), "F") y -= 1 center = center[y, :] center = center + (self.query_size // 2 - 1) * np.ones(center.shape) center = center + (self.query_size // 2 - 1) * np.ones(center.shape) center = center + np.ones(center.shape) center = config.apple.mrc_shrink_factor * center # swap columns to align with Relion center = center[:, [1, 0]] # first column is x; second column is y - offset by margins that were discarded from the image center[:, 0] += config.apple.mrc_margin_left center[:, 1] += config.apple.mrc_margin_top if self.output_directory is not None: basename = os.path.basename(self.filename) name_str, ext = os.path.splitext(basename) applepick_path = os.path.join(self.output_directory, "{}_applepick.star".format(name_str)) with open(applepick_path, "w") as f: np.savetxt( f, [ "data_root\n\nloop_\n_rlnCoordinateX #1\n_rlnCoordinateY #2" ], fmt="%s", ) np.savetxt(f, center, fmt="%d %d") return center
def execute(self, userdata): # State execution grid = np.array(self.gmap.data) grid = np.resize(grid, (self.gmap.info.height, self.gmap.info.width)) edges = np.logical_and( np.abs(ndimage.laplace(grid)) > 0, np.abs(ndimage.laplace(grid)) < 10) labels, nlabels = ndimage.label(edges) cy, cx = np.vstack( ndimage.center_of_mass(edges, labels, np.arange(nlabels) + 1)).T sizes = ndimage.sum(grid, labels, np.arange(nlabels) + 1) points = Marker() points.header.frame_id = "map" points.header.stamp = rospy.Time.now() points.ns = 'frontier_centroids' points.action = Marker.ADD points.id = 0 points.type = Marker.POINTS points.scale.x = 0.2 points.scale.y = 0.2 points.color.g = 1.0 points.color.a = 1.0 # greedily pick closest frontier reward = 0.0 goalCoords = [] for i in range(0, nlabels): pt = Point() pt_coord = self.state_from_index(cx[i], cy[i]) pt.x = pt_coord[0] pt.y = pt_coord[1] pt.z = 0.0 points.points.append(pt) this_reward = -sizes[i] / self.distance(pt_coord) if -sizes[i] > 5 and this_reward > reward: print(this_reward, pt_coord) reward = this_reward goalCoords = pt_coord goalHeading = np.arctan2(goalCoords[1] - self.y_rob, goalCoords[0] - self.x_rob) # Make a goal. This has a coordinate frame, a time stamp, and a target pose. The Target pose is a # Point (where the z component should be zero) and a Quaternion (for the orientation). if not goalCoords: print('No more goals of relevant size (>5) identified!') return 'finish' self.pub.publish(points) goal = MoveBaseGoal() goal.target_pose.header.frame_id = 'map' goal.target_pose.header.stamp = rospy.Time.now() goal.target_pose.pose = Pose(Point(*goalCoords), heading(goalHeading)) # Send the goal ot move base, and wait for a result. We can put a timeout on the wait, so that we don't # get stuck here for unreachable goals. Once we're done, get the state from move base. self.move_base.send_goal(goal) success = self.move_base.wait_for_result(rospy.Duration(120)) state = self.move_base.get_state() # Did everything work as expects? if rospy.is_shutdown() or time.time() - self.start_time > 300: print('Timeout or ROS shutdown - finishing program') return 'timeout' if success and state == GoalStatus.SUCCEEDED: print('Made it!') return 'continue' else: print('Goal not reachable') self.move_base.cancel_goal() return 'goal_not_reached'
def test_center_of_mass06(): expected = [0.5, 0.5] input = np.array([[1, 2], [3, 1]], bool) output = ndimage.center_of_mass(input) assert_array_almost_equal(output, expected)
def find_centroids(SFP): return [center_of_mass(SFP[:, :, ii]) for ii in range(SFP.shape[2])]
def test_center_of_mass08(): labels = [1, 2] expected = [0.5, 1.0] input = np.array([[5, 2], [3, 1]], bool) output = ndimage.center_of_mass(input, labels, 2) assert_array_almost_equal(output, expected)
def test_center_of_mass09(): labels = [1, 2] expected = [(0.5, 0.0), (0.5, 1.0)] input = np.array([[1, 2], [1, 1]], bool) output = ndimage.center_of_mass(input, labels, [1, 2]) assert_array_almost_equal(output, expected)
def find_cut_coords(map, mask=None, activation_threshold=None): """ Find the center of the largest activation connect component. Parameters ----------- map : 3D ndarray The activation map, as a 3D image. mask : 3D ndarray, boolean, optional An optional brain mask. activation_threshold : float, optional The lower threshold to the positive activation. If None, the activation threshold is computed using find_activation. Returns ------- x: float the x coordinate in voxels. y: float the y coordinate in voxels. z: float the z coordinate in voxels. """ # To speed up computations, we work with partial views of the array, # and keep track of the offset offset = np.zeros(3) # Deal with masked arrays: if hasattr(map, 'mask'): not_mask = np.logical_not(map.mask) if mask is None: mask = not_mask else: mask *= not_mask map = np.asarray(map) my_map = map.copy() if mask is not None: slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # Testing min and max is faster than np.all(my_map == 0) if (my_map.max() == 0) and (my_map.min() == 0): return .5 * np.array(map.shape) if activation_threshold is None: activation_threshold = stats.scoreatpercentile( np.abs(my_map[my_map != 0]).ravel(), 80) mask = np.abs(my_map) > activation_threshold - 1.e-15 mask = largest_cc(mask) slice_x, slice_y, slice_z = ndimage.find_objects(mask)[0] my_map = my_map[slice_x, slice_y, slice_z] mask = mask[slice_x, slice_y, slice_z] my_map *= mask offset += [slice_x.start, slice_y.start, slice_z.start] # For the second threshold, we use a mean, as it is much faster, # althought it is less robust second_threshold = np.abs(np.mean(my_map[mask])) second_mask = (np.abs(my_map) > second_threshold) if second_mask.sum() > 50: my_map *= largest_cc(second_mask) cut_coords = ndimage.center_of_mass(np.abs(my_map)) return cut_coords + offset
def diff( ref, new, align=False, inf_loss=0.25, smooth_psf=False, beta=True, shift=True, iterative=False, fitted_psf=True, ): """ Function that takes a list of SingleImage instances and performs a stacking using properimage R estimator """ logger = logging.getLogger() if fitted_psf: from .single_image import SingleImageGaussPSF as SI logger.info("Using single psf, gaussian modeled") else: from .single_image import SingleImage as SI if not isinstance(ref, SI): try: ref = SI(ref, smooth_psf=smooth_psf) except: # noqa try: ref = SI(ref.data, smooth_psf=smooth_psf) except: # noqa raise if not isinstance(new, SI): try: new = SI(new, smooth_psf=smooth_psf) except: # noqa try: new = SI(new.data, smooth_psf=smooth_psf) except: # noqa raise if align: registered = aa.register(new.data, ref.data) new._clean() registered = registered[: ref.data.shape[0], : ref.data.shape[1]] new = SI( registered.data, mask=registered.mask, borders=False, smooth_psf=smooth_psf, ) # new.data = registered # new.data.mask = registered.mask # make sure that the alignement has delivered arrays of size if new.data.data.shape != ref.data.data.shape: import ipdb ipdb.set_trace() t0 = time.time() mix_mask = np.ma.mask_or(new.data.mask, ref.data.mask) zps, meanmags = u.transparency([ref, new]) ref.zp = zps[0] new.zp = zps[1] n_zp = new.zp r_zp = ref.zp a_ref, psf_ref = ref.get_variable_psf(inf_loss) a_new, psf_new = new.get_variable_psf(inf_loss) if fitted_psf: # I already know that a_ref and a_new are None, both of them # And each psf is a list, first element a render, # second element a model p_r = psf_ref[1] p_n = psf_new[1] p_r.x_mean = ref.data.data.shape[0] / 2.0 p_r.y_mean = ref.data.data.shape[1] / 2.0 p_n.x_mean = new.data.data.shape[0] / 2.0 p_n.y_mean = new.data.data.shape[1] / 2.0 p_r.bounding_box = None p_n.bounding_box = None p_n = p_n.render(np.zeros(new.data.data.shape)) p_r = p_r.render(np.zeros(ref.data.data.shape)) dx_ref, dy_ref = center_of_mass(p_r) # [0]) dx_new, dy_new = center_of_mass(p_n) # [0]) else: p_r = psf_ref[0] p_n = psf_new[0] dx_ref, dy_ref = center_of_mass(p_r) # [0]) dx_new, dy_new = center_of_mass(p_n) # [0]) if dx_new < 0.0 or dy_new < 0.0: import ipdb ipdb.set_trace() # rad_ref_sq = dx_ref*dx_ref + dy_ref*dy_ref # rad_new_sq = dx_new*dx_new + dy_new*dy_new psf_ref_hat = _fftwn(p_r, s=ref.data.shape, norm="ortho") psf_new_hat = _fftwn(p_n, s=new.data.shape, norm="ortho") psf_ref_hat[np.where(psf_ref_hat.real == 0)] = eps psf_new_hat[np.where(psf_new_hat.real == 0)] = eps psf_ref_hat_conj = psf_ref_hat.conj() psf_new_hat_conj = psf_new_hat.conj() D_hat_r = fourier_shift(psf_new_hat * ref.interped_hat, (-dx_new, -dy_new)) D_hat_n = fourier_shift(psf_ref_hat * new.interped_hat, (-dx_ref, -dy_ref)) # D_hat_r = psf_new_hat * ref.interped_hat # D_hat_n = psf_ref_hat * new.interped_hat norm_b = ref.var ** 2 * psf_new_hat * psf_new_hat_conj norm_a = new.var ** 2 * psf_ref_hat * psf_ref_hat_conj new_back = sep.Background(new.interped).back() ref_back = sep.Background(ref.interped).back() gamma = new_back - ref_back b = n_zp / r_zp norm = np.sqrt(norm_a + norm_b * b ** 2) if beta: # start with beta=1 if shift: def cost(vec): b, dx, dy = vec gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2) norm = np.sqrt(norm_a + norm_b * b ** 2) dhn = D_hat_n / norm dhr = D_hat_r / norm b_n = ( _ifftwn(dhn, norm="ortho") - _ifftwn(fourier_shift(dhr, (dx, dy)), norm="ortho") * b - np.roll(gammap, (int(round(dx)), int(round(dy)))) ) cost = b_n.real[100:-100, 100:-100] cost = np.sum(np.abs(cost / (cost.shape[0] * cost.shape[1]))) return cost ti = time.time() vec0 = [b, 0.0, 0.0] bounds = ([0.1, -0.9, -0.9], [10.0, 0.9, 0.9]) solv_beta = optimize.least_squares( cost, vec0, xtol=1e-5, jac="3-point", method="trf", bounds=bounds, ) tf = time.time() if solv_beta.success: logger.info(("Found that beta = {}".format(solv_beta.x))) logger.info(("Took only {} awesome seconds".format(tf - ti))) logger.info( ("The solution was with cost {}".format(solv_beta.cost)) ) b, dx, dy = solv_beta.x else: logger.info("Least squares could not find our beta :(") logger.info("Beta is overriden to be the zp ratio again") b = n_zp / r_zp dx = 0.0 dy = 0.0 elif iterative: bi = b def F(b): gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2) norm = np.sqrt(norm_a + norm_b * b ** 2) b_n = ( _ifftwn(D_hat_n / norm, norm="ortho") - gammap - b * _ifftwn(D_hat_r / norm, norm="ortho") ) # robust_stats = lambda b: sigma_clipped_stats( # b_n(b).real[100:-100, 100:-100]) return np.sum(np.abs(b_n.real)) ti = time.time() solv_beta = optimize.minimize_scalar( F, method="bounded", bounds=[0.1, 10.0], options={"maxiter": 1000}, ) tf = time.time() if solv_beta.success: logger.info(("Found that beta = {}".format(solv_beta.x))) logger.info(("Took only {} awesome seconds".format(tf - tf))) b = solv_beta.x else: logger.info("Least squares could not find our beta :(") logger.info("Beta is overriden to be the zp ratio again") b = n_zp / r_zp dx = dy = 0.0 else: bi = b def F(b): gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2) norm = np.sqrt(norm_a + norm_b * b ** 2) b_n = ( _ifftwn(D_hat_n / norm, norm="ortho") - gammap - b * _ifftwn(D_hat_r / norm, norm="ortho") ) return np.sum(np.abs(b_n.real)) ti = time.time() solv_beta = optimize.least_squares( F, bi, ftol=1e-8, bounds=[0.1, 10.0], jac="2-point" ) tf = time.time() if solv_beta.success: logger.info(("Found that beta = {}".format(solv_beta.x))) logger.info(("Took only {} awesome seconds".format(tf - tf))) logger.info( ("The solution was with cost {}".format(solv_beta.cost)) ) b = solv_beta.x else: logger.info("Least squares could not find our beta :(") logger.info("Beta is overriden to be the zp ratio again") b = n_zp / r_zp dx = dy = 0.0 else: if shift: bi = n_zp / r_zp gammap = gamma / np.sqrt(new.var ** 2 + b ** 2 * ref.var ** 2) norm = np.sqrt(norm_a + norm_b * b ** 2) dhn = D_hat_n / norm dhr = D_hat_r / norm def cost(vec): dx, dy = vec b_n = ( _ifftwn(dhn, norm="ortho") - _ifftwn(fourier_shift(dhr, (dx, dy)), norm="ortho") * b - np.roll(gammap, (int(round(dx)), int(round(dy)))) ) cost = b_n.real[100:-100, 100:-100] cost = np.sum(np.abs(cost / (cost.shape[0] * cost.shape[1]))) return cost ti = time.time() vec0 = [0.0, 0.0] bounds = ([-0.9, -0.9], [0.9, 0.9]) solv_beta = optimize.least_squares( cost, vec0, xtol=1e-5, jac="3-point", method="trf", bounds=bounds, ) tf = time.time() if solv_beta.success: logger.info(("Found that shift = {}".format(solv_beta.x))) logger.info(("Took only {} awesome seconds".format(tf - ti))) logger.info( ("The solution was with cost {}".format(solv_beta.cost)) ) dx, dy = solv_beta.x else: logger.info("Least squares could not find our shift :(") dx = 0.0 dy = 0.0 else: b = new.zp / ref.zp dx = 0.0 dy = 0.0 norm = norm_a + norm_b * b ** 2 if dx == 0.0 and dy == 0.0: D_hat = (D_hat_n - b * D_hat_r) / np.sqrt(norm) else: D_hat = (D_hat_n - fourier_shift(b * D_hat_r, (dx, dy))) / np.sqrt( norm ) D = _ifftwn(D_hat, norm="ortho") if np.any(np.isnan(D.real)): pass d_zp = b / np.sqrt(ref.var ** 2 * b ** 2 + new.var ** 2) P_hat = (psf_ref_hat * psf_new_hat * b) / (np.sqrt(norm) * d_zp) P = _ifftwn(P_hat, norm="ortho").real dx_p, dy_p = center_of_mass(P) S_hat = fourier_shift(d_zp * D_hat * P_hat.conj(), (dx_p, dy_p)) kr = _ifftwn( new.zp * psf_ref_hat_conj * b * psf_new_hat * psf_new_hat_conj / norm, norm="ortho", ) kn = _ifftwn( new.zp * psf_new_hat_conj * psf_ref_hat * psf_ref_hat_conj / norm, norm="ortho", ) V_en = _ifftwn( _fftwn(new.data.filled(0) + 1.0, norm="ortho") * _fftwn(kn ** 2, s=new.data.shape), norm="ortho", ) V_er = _ifftwn( _fftwn(ref.data.filled(0) + 1.0, norm="ortho") * _fftwn(kr ** 2, s=ref.data.shape), norm="ortho", ) S_corr = _ifftwn(S_hat, norm="ortho") / np.sqrt(V_en + V_er) logger.info("S_corr sigma_clipped_stats ") logger.info( ( "mean = {}, median = {}, std = {}\n".format( *sigma_clipped_stats(S_corr.real.flatten(), sigma=4.0) ) ) ) logger.info( ("Subtraction performed in {} seconds\n\n".format(time.time() - t0)) ) # import ipdb; ipdb.set_trace() return D, P, S_corr.real, mix_mask
def peak_finder(img_raw, xy, profile = None, pxmask = None, noise = np.array ([[False,True,False],[True,True,True],[False,True,False]]), kernel_size = 9, threshold = 9, db_eps = 1.9, db_samples = 9, bkg_remove = False, img_clean = True, radial_min = 5): """ 1) (OPTIONAL) Construct background from xy and profile. Subtract from img_raw. 2) Remove Impulse noise using morpholigical opening and closing with noise struc. 3) Convolve Image with Gaussian kernel to find local background. Features are all pixels larger than the local background by a certain threshold. 4) Pick peaks out of image features using DBSCAN (Clustering) 5) Labelling of Peaks using ndimage 6) (OPTIONAL) Clean image Simple peakfinder built upon scipy.ndimage. Uses a morpholgical opening to find features larger than size and higher than threshold. The features are then labelled and the center of mass and sum of each peak is returned in a numpy array. """ ### 1) Strip Background if bkg_remove: ylen,xlen = img_raw.shape y,x = np.ogrid[0:ylen,0:xlen] radius = (np.rint(((x-xy[0])**2 + (y-xy[1])**2)**0.5)).astype(np.int32) prof = np.zeros(1+np.max(radius)) np.copyto(prof[0:len(profile)], profile) bkg = prof[radius] img = img_raw - bkg img = correct_dead_pixels(img, pxmask, 'replace', replace_val=-1, mask_gaps=True) else: img = img_raw bkg=None ### 2) Remove Impulse Noise img = ndimage.morphology.grey_opening(img, structure=noise) img = ndimage.morphology.grey_closing(img, structure=noise) ### 3) Feature detection NB: astropy.convolve is slow img_fil = np.where(img==-1,0,img) img_fil = ndimage.gaussian_filter(img_fil.astype(np.float), kernel_size,mode='constant',cval=0) img_norm = np.where(img==-1,0,1) img_norm = ndimage.gaussian_filter(img_norm.astype(np.float), kernel_size,mode='constant',cval=0) img_norm = np.where(img_norm==0.0,1,img_norm) img_fil = img_fil/img_norm img_feat = img - img_fil ### 4) Peak Picking loc = np.where(img_feat > threshold) X = np.transpose(np.stack([loc[1], loc[0] , np.log(img_feat[loc])])) db = DBSCAN(eps=db_eps, min_samples=db_samples, n_jobs=-1).fit(X) img_label = np.zeros(img.shape) img_label[loc] = 1 + db.labels_ num_feat = len(set(db.labels_)) ### 5) Peak Labelling com = ndimage.center_of_mass(img_feat,img_label,np.arange(1, num_feat)) vol = ndimage.sum(img_feat,img_label,np.arange(1, num_feat)) ### 6) Apply radial cut to peaks rad = np.sqrt(np.sum(np.square(np.array(com)-xy[::-1]),axis=1)) rad_cut = rad > radial_min com = np.array(com)[rad_cut] vol = vol[rad_cut] img_label[img_label-1 == np.where(rad<radial_min)] = 0 print("Found {} peaks".format(len(com))) ### 7) Clean Image if img_clean: img[img_label==0] = 0 return np.column_stack((com,vol)), img
def run(self, rinput): self.logger.info('starting slit processing') self.logger.info('basic image reduction') flow = self.init_filters(rinput) hdulist = basic_processing_with_combination(rinput, flow=flow) hdr = hdulist[0].header self.set_base_headers(hdr) try: rotang = hdr['ROTANG'] detpa = hdr['DETPA'] dtupa = hdr['DTUPA'] dtub, dtur = datamodel.get_dtur_from_header(hdr) except KeyError as error: self.logger.error(error) raise RecipeError(error) self.logger.debug('finding slits') # First, prefilter with median median_filter_size = rinput.median_filter_size canny_sigma = rinput.canny_sigma obj_min_size = rinput.obj_min_size obj_max_size = rinput.obj_max_size data1 = hdulist[0].data self.logger.debug('Median filter with box %d', median_filter_size) data2 = median_filter(data1, size=median_filter_size) # Grey level image img_grey = normalize_raw(data2) # Find edges with Canny self.logger.debug('Find edges with Canny, sigma %f', canny_sigma) # These thresholds corespond roughly with # value x (2**16 - 1) high_threshold = rinput.canny_high_threshold low_threshold = rinput.canny_low_threshold self.logger.debug('Find edges, Canny high threshold %f', high_threshold) self.logger.debug('Find edges, Canny low threshold %f', low_threshold) edges = canny(img_grey, sigma=canny_sigma, high_threshold=high_threshold, low_threshold=low_threshold) # Fill edges self.logger.debug('Fill holes') fill_slits = ndimage.binary_fill_holes(edges) self.logger.debug('Label objects') label_objects, nb_labels = ndimage.label(fill_slits) self.logger.debug('%d objects found', nb_labels) # Filter on the area of the labeled region # Perhaps we could ignore this filtering and # do it later? self.logger.debug('Filter objects by size') # Sizes of regions sizes = numpy.bincount(label_objects.ravel()) self.logger.debug('Min size is %d', obj_min_size) self.logger.debug('Max size is %d', obj_max_size) mask_sizes = (sizes > obj_min_size) & (sizes < obj_max_size) # Filter out regions nids, = numpy.where(mask_sizes) mm = numpy.in1d(label_objects, nids) mm.shape = label_objects.shape fill_slits_clean = numpy.where(mm, 1, 0) #plt.imshow(fill_slits_clean) # and relabel self.logger.debug('Label filtered objects') relabel_objects, nb_labels = ndimage.label(fill_slits_clean) self.logger.debug('%d objects found after filtering', nb_labels) ids = list(six.moves.range(1, nb_labels + 1)) self.logger.debug('Find regions and centers') regions = ndimage.find_objects(relabel_objects) centers = ndimage.center_of_mass(data2, labels=relabel_objects, index=ids) table = char_slit(data2, regions, slit_size_ratio=rinput.slit_size_ratio) result = self.create_result(frame=hdulist, slitstable=table, DTU=dtub, ROTANG=rotang, DETPA=detpa, DTUPA=dtupa) return result