def closing(): image_list = get_one_imagefrom_mnist() image_array =np.asarray(image_list) image =image_array.reshape(28, 28) ndimage.binary_closing(image, structure=np.ones((2,2))).astype(int) plt.imshow(image, cmap=cm.binary) plt.show()
def binaryClosing(binarydata, structure=None, iterations=1): result = np.zeros_like(binarydata) if structure is None: ndimage.binary_closing(binarydata, iterations=iterations, output=result) else: ndimage.binary_closing(binarydata, structure=structure, iterations=iterations, output=result) return result
def refine_worm(image, initial_area, candidate_edges): # find strong worm edges (roughly equivalent to the edges found by find_initial_worm, # which are in candidate_edges): smooth the image, do canny edge-finding, and # then keep only those edges near candidate_edges smooth_image = restoration.denoise_tv_bregman(image, 140).astype(numpy.float32) smoothed, gradient, sobel = canny.prepare_canny(smooth_image, 8, initial_area) local_maxima = canny.canny_local_maxima(gradient, sobel) candidate_edge_region = ndimage.binary_dilation(candidate_edges, iterations=4) strong_edges = local_maxima & candidate_edge_region # Now threshold the image to find dark blobs as our initial worm region # First, find areas in the initial region unlikely to be worm pixels mean, std = mcd.robust_mean_std(smooth_image[initial_area][::4], 0.85) non_worm = (smooth_image > mean - std) & initial_area # now fit a smoothly varying polynomial to the non-worm pixels in the initial # region of interest, and subtract that from the actual image to generate # an image with a flat illumination field background = polyfit.fit_polynomial(smooth_image, mask=non_worm, degree=2) minus_bg = smooth_image - background # now recalculate a threshold from the background-subtracted pixels mean, std = mcd.robust_mean_std(minus_bg[initial_area][::4], 0.85) initial_worm = (minus_bg < mean - std) & initial_area # Add any pixels near the strong edges to our candidate worm position initial_worm |= ndimage.binary_dilation(strong_edges, iterations=3) initial_worm = mask.fill_small_radius_holes(initial_worm, 5) # Now grow/shrink the initial_worm region so that as many of the strong # edges from the canny filter are in contact with the region edges as possible. ac = active_contour.EdgeClaimingAdvection(initial_worm, strong_edges, max_region_mask=initial_area) stopper = active_contour.StoppingCondition(ac, max_iterations=100) while stopper.should_continue(): ac.advect(iters=1) ac.smooth(iters=1, depth=2) worm_mask = mask.fill_small_radius_holes(ac.mask, 7) # Now, get edges from the image at a finer scale smoothed, gradient, sobel = canny.prepare_canny(smooth_image, 0.3, initial_area) local_maxima = canny.canny_local_maxima(gradient, sobel) strong_sum = strong_edges.sum() highp = 100 * (1 - 1.5*strong_sum/local_maxima.sum()) lowp = max(100 * (1 - 3*strong_sum/local_maxima.sum()), 0) low_worm, high_worm = numpy.percentile(gradient[local_maxima], [lowp, highp]) fine_edges = canny.canny_hysteresis(local_maxima, gradient, low_worm, high_worm) # Expand out the identified worm area to include any of these finer edges closed_edges = ndimage.binary_closing(fine_edges, structure=S) worm = ndimage.binary_propagation(worm_mask, mask=worm_mask|closed_edges, structure=S) worm = ndimage.binary_closing(worm, structure=S, iterations=2) worm = mask.fill_small_radius_holes(worm, 5) worm = ndimage.binary_opening(worm) worm = mask.get_largest_object(worm) # Last, smooth the shape a bit to reduce sharp corners, but not too much to # sand off the tail ac = active_contour.CurvatureMorphology(worm, max_region_mask=initial_area) ac.smooth(depth=2, iters=2) return strong_edges, ac.mask
def get_uv_mask(vertices_vis, triangles, uv_coords, h, w, resolution): triangles = triangles.T vertices_vis = vertices_vis.astype(np.float32) uv_mask = render_texture(uv_coords.T, vertices_vis[np.newaxis, :], triangles, resolution, resolution, 1) uv_mask = np.squeeze(uv_mask > 0) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_closing(uv_mask) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = ndimage.binary_erosion(uv_mask, structure = np.ones((4,4))) uv_mask = uv_mask.astype(np.float32) return np.squeeze(uv_mask)
def compute_mask(aparc, labels=[0, 5000]): import nibabel as nb import numpy as np import os.path as op import scipy.ndimage as nd segnii = nb.load(aparc) seg = segnii.get_data() mask = np.ones_like(seg, dtype=np.uint8) for l in labels: mask[seg == l] = 0 struct = nd.iterate_structure(nd.generate_binary_structure(3, 1), 4) mask = nd.binary_dilation(mask, structure=struct).astype(np.uint8) mask = nd.binary_closing(mask, structure=struct) mask = nd.binary_fill_holes(mask, structure=struct).astype(np.uint8) mask[mask > 0] = 1 mask[mask <= 0] = 0 hdr = segnii.get_header().copy() hdr.set_data_dtype(np.uint8) hdr.set_xyzt_units("mm", "sec") out_file = op.abspath("nobstem_mask.nii.gz") nii = nb.Nifti1Image(mask, segnii.get_affine(), hdr).to_filename(out_file) return out_file
def main(): usage="Look for holes in a 3d density map.\n\tfindholesinmap.py map.mrc\n**********scipy is required!*********" parser = EMArgumentParser(usage=usage,version=EMANVERSION) parser.add_argument("--thr", type=float,help="Threshold for the isosurface", default=1) parser.add_argument("--closeiter", type=int,help="Number of iterations for the closing operation", default=10) parser.add_argument("--filter_res", type=float,help="Resolution for the final filter", default=10) parser.add_argument("--output", type=str,help="output file name", default=None) (options, args) = parser.parse_args() logid=E2init(sys.argv) e=EMData(args[0]) img=e.numpy() if options.output==None: options.output=args[0][:-4]+"_holes.hdf" apix=e["apix_x"] img_open=ndimage.binary_closing(img>options.thr,iterations=options.closeiter) m=img.copy() m[m<0]=0 m/=np.max(m) hole=img_open-m a=from_numpy(hole) a["apix_x"]=apix a["apix_y"]=apix a["apix_z"]=apix a.process_inplace("filter.lowpass.gauss",{"cutoff_freq":1./options.filter_res}) a.write_image(options.output) E2end(logid)
def findNeuron(bgImage, threshold, xNeuron, yNeuron): """find bright object in small roi image.""" mask = np.where(bgImage > threshold[0], 1, 0) mask = ndimage.binary_opening(mask,structure = np.ones((2,2))) mask = ndimage.binary_closing(mask) # --- Individually label all connected regions and get their center of mass label_im, nb_labels = ndimage.label(mask) centroids = ndimage.measurements.center_of_mass(bgImage, label_im, xrange(1,nb_labels+1)) # --- select brightest object by default (mean brightness) meanBrightness = ndimage.measurements.mean(bgImage, label_im, xrange(1,nb_labels+1)) # # --- Calculate the distance of each new cms to the old neuron position # # --- and select the new neuron position to be the object closest to # # --- the old location # dist = [] # for coords in centroids: # dist.append((coords[0]-yNeuron)**2 + (coords[1]-xNeuron)**2) # if len(dist)==0: # yNewNeuron,xNewNeuron = yNeuron, xNeuron # else: # loc = np.argmin(dist) # yNewNeuron,xNewNeuron = centroids[loc] if nb_labels >1: loc = np.argmax(meanBrightness) yNewNeuron,xNewNeuron = centroids[loc] else: yNewNeuron,xNewNeuron = yNeuron, xNeuron loc = -1 neuronObject = np.where(label_im == loc+1,0,1) neuronArea = np.sum(neuronObject) # --- Get average of the neuron fluoresence --- tmp_neuron = np.ma.masked_array(bgImage, neuronObject) newNeuronAverage = np.ma.average(tmp_neuron[tmp_neuron>threshold[1]]) return yNewNeuron,xNewNeuron, newNeuronAverage,neuronArea, neuronObject
def findCom(self,data): data = data.astype(int) if self.update_counter >= 5: self.update_counter = 0 ########################################################################## ## Update the background image, adding a new image and removing the oldest. ########################################################################## self.background_list.insert(0,data) self.background_list.pop() background = np.zeros((480, 640, 3), dtype=int) for b in self.background_list: background += b self.background = background/len(self.background_list) ############################################################################ ## Detect foreground by looking at difference from mean. ############################################################################ foreground = np.sum(np.abs(np.subtract(self.background,data)),axis=2) falseImage = foreground ## clean foreground image falseImage[falseImage > 100] = 255 falseImage[falseImage < 101] = 0 falseImage = ndimage.binary_opening(falseImage) falseImage = ndimage.binary_closing(falseImage) com = ndimage.measurements.center_of_mass(falseImage) self.update_counter += 1 return com
def xy_map_to_np_image(xy_map,m_per_pixel,dilation_count=0,padding=50): ''' returns binary numpy image. (255 for occupied pixels, 0 for unoccupied) 2d array ''' min_x = np.min(xy_map[0,:]) max_x = np.max(xy_map[0,:]) min_y = np.min(xy_map[1,:]) max_y = np.max(xy_map[1,:]) br = np.matrix([min_x,min_y]).T n_x = int(round((max_x-min_x)/m_per_pixel)) + padding n_y = int(round((max_y-min_y)/m_per_pixel)) + padding img = np.zeros((n_x+padding,n_y+padding),dtype='int') occupied_pixels = np.matrix([n_x,n_y]).T - np.round((xy_map-br)/m_per_pixel).astype('int') if dilation_count == 0: img[(occupied_pixels[0,:],occupied_pixels[1,:])] = 255 else: img[(occupied_pixels[0,:],occupied_pixels[1,:])] = 1 connect_structure = np.empty((3,3),dtype='int') connect_structure[:,:] = 1 img = ni.binary_closing(img,connect_structure,iterations=dilation_count) img = ni.binary_dilation(img,connect_structure,iterations=1) img = img*255 return img,n_x,n_y,br
def get_difference_spots(pix): bpix = pix > 20 bpix = ndimage.binary_opening(bpix) bpix = ndimage.binary_closing(bpix) labels, n = ndimage.measurements.label(bpix) clicks = ndimage.measurements.center_of_mass(pix, labels, range(1, n+1)) return clicks
def detect_vortices(cloud, radius=70, showplots=False): """ Detects whether there are vortex-like features within a given radius of the peak density in the TOF image of an expanded BEC """ OD = cloud.get_OD() peak_coord = cloud.results['peak coordinates'] center_region = ROI(center=peak_coord, size=(1.5 * radius, 1.5 * radius)).slices smooth_cloud = ndi.median_filter(OD[center_region], size=4) minOD = smooth_cloud.min() maxOD = smooth_cloud.max() cloud_median = ndi.median_filter(smooth_cloud, size=10) belowthresh = where(smooth_cloud < cloud_median * 0.75, 1, 0) opened = ndi.binary_opening(belowthresh, iterations=1) closed = ndi.binary_closing(opened, iterations=1) vort_found = ndi.label(closed)[1] cloud.results['vort_found'] = vort_found if showplots == True: fig = plt.figure(1999) fig.add_subplot(221, xticks=[], yticks=[]) plt.imshow(smooth_cloud, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(222, xticks=[], yticks=[]) plt.imshow(cloud_median, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(223, xticks=[], yticks=[]) plt.imshow(closed, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) fig.add_subplot(224, xticks=[], yticks=[]) plt.imshow(belowthresh, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) return vort_found
def detect_current(cloud, showplots=False): """ Detects whether there is a vortex-like signature of persistent current in the center of a TOF image of an expanded ring BEC """ OD = cloud.get_OD() peak_coord = cloud.results['peak coordinates'] center_region = ROI(center=peak_coord, size=(40, 40)).slices cloud_center = ndi.median_filter(OD[center_region], size=2) minOD = cloud_center.min() maxOD = cloud_center.max() cloud_median = ndi.median_filter(cloud_center, size=10) belowthresh = where(cloud_center < cloud_median * 0.75, 1, 0) opened = ndi.binary_opening(belowthresh, iterations=1) closed = ndi.binary_closing(opened, iterations=3) current_found = ndi.label(closed)[1] cloud.results['current_found'] = current_found if showplots == True: fig = plt.figure(1999) fig.add_subplot(221, xticks=[], yticks=[]) plt.imshow(cloud_center, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(222, xticks=[], yticks=[]) plt.imshow(cloud_median, interpolation='nearest', vmin=minOD, vmax=maxOD) fig.add_subplot(223, xticks=[], yticks=[]) plt.imshow(closed, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) fig.add_subplot(224, xticks=[], yticks=[]) plt.imshow(belowthresh, interpolation='nearest', cmap=plt.cm.get_cmap('binary')) return current_found, asum(closed)
def plot_mask(mask, plot_axis=None, color='#ff0000', closing_iteration=None, **kwargs): ''' plot mask (ROI) borders by using pyplot.contour function. all the 0s and Nans in the input mask will be considered as background, and non-zero, non-nan pixel will be considered in ROI. ''' if not check_binary_2d_array(mask): raise(ValueError, 'input mask should be a 2d binary numpy.ndarray with dtype as integer and contains ' 'only 0s and 1s.') if not plot_axis: f = plt.figure() plot_axis = f.add_subplot(111) if closing_iteration is not None: ploting_mask = ni.binary_closing(mask, iterations=closing_iteration).astype(np.uint8) else: ploting_mask = mask currfig = plot_axis.contourf(ploting_mask, levels=[0.5, 1], colors=color, **kwargs) # put y axis in decreasing order y_lim = list(plot_axis.get_ylim()) y_lim.sort() plot_axis.set_ylim(y_lim[::-1]) plot_axis.set_aspect('equal') return currfig
def fetch_icbm152_brain_gm_mask(data_dir=None, threshold=0.2, resume=True, verbose=1): """Downloads ICBM152 template first, then loads 'gm' mask image. .. versionadded:: 0.2.5 Parameters ---------- data_dir: str, optional Path of the data directory. Used to force storage in a specified location. Defaults to None. threshold: float, optional The parameter which amounts to include the values in the mask image. The values lies above than this threshold will be included. Defaults to 0.2 (one fifth) of values. resume: bool, optional If True, try resuming partially downloaded data. Defaults to True. verbose: int, optional verbosity level (0 means no message). Returns ------- gm_mask_img: Nifti image Corresponding to brain grey matter from ICBM152 template. Notes ----- This function relies on ICBM152 templates where we particularly pick grey matter template and threshold the template at .2 to take one fifth of the values. Then, do a bit post processing such as binary closing operation to more compact mask image. Note: It is advised to check the mask image with your own data processing. See Also -------- nilearn.datasets.fetch_icbm152_2009: for details regarding the ICBM152 template. nilearn.datasets.load_mni152_template: for details about version of MNI152 template and related. """ # Fetching ICBM152 grey matter mask image icbm = fetch_icbm152_2009(data_dir=data_dir, resume=resume, verbose=verbose) gm = icbm['gm'] gm_img = check_niimg(gm) gm_data = niimg._safe_get_data(gm_img) # getting one fifth of the values gm_mask = (gm_data > threshold) gm_mask = ndimage.binary_closing(gm_mask, iterations=2) gm_mask_img = new_img_like(gm_img, gm_mask) return gm_mask_img
def adaptive_segment(args): """ Applies an adaptive threshold to reconstructed data. Also known as local or dynamic thresholding where the threshold value is the weighted mean for the local neighborhood of a pixel subtracted by constant. Alternatively the threshold can be determined dynamically by a given function using the 'generic' method. Parameters ---------- data : ndarray, float32 3-D reconstructed data with dimensions: [slices, pixels, pixels] block_size : scalar, int Uneven size of pixel neighborhood which is used to calculate the threshold value (e.g. 3, 5, 7, ..., 21, ...). offset : scalar, float Constant subtracted from weighted mean of neighborhood to calculate the local threshold value. Default offset is 0. Returns ------- output : ndarray Thresholded data. References ---------- - `http://scikit-image.org/docs/dev/auto_examples/plot_threshold_adaptive.html \ <http://scikit-image.org/docs/dev/auto_examples/plot_threshold_adaptive.html>`_ """ # Arguments passed by multi-processing wrapper ind, dshape, inputs = args # Function inputs data = mp.tonumpyarray(mp.shared_arr, dshape) # shared-array block_size, offset = inputs for m in ind: img = data[m, :, :] # Perform scikit adaptive thresholding. img = threshold_adaptive(img, block_size=block_size, offset=offset) # Remove small white regions img = ndimage.binary_opening(img) # Remove small black holes img = ndimage.binary_closing(img) data[m, :, :] = img
def morph_sequence(pix, *param): for oc, wd, ht in param: logi(" Performing Morph : ", oc, wd, ht) structure = np.ones((ht, wd)) if oc == "c": pix = binary_closing(pix, structure) elif oc == "o": pix = binary_opening(pix, structure) return pix
def breakup_region(component): distance = ndi.distance_transform_edt(component) skel = skeletonize(component) skeldist = distance*skel local_maxi = peak_local_max(skeldist, indices=False, footprint=disk(10)) local_maxi=ndi.binary_closing(local_maxi,structure = disk(4),iterations = 2) markers = ndi.label(local_maxi)[0] labels = watershed(-distance, markers, mask=component) return(labels)
def masked_slic(img, mask, n_segments, compactness): labels = slic(img, n_segments=n_segments, compactness=compactness) labels += 1 n_labels = len(np.unique(labels)) try: mask = ndi.binary_closing(mask, structure=np.ones((3, 3)), iterations=1) except IndexError, e: rospy.logerr(e) return
def main(): i, h = load(sys.argv[1]) i = i.copy() i = binary_closing(i, iterations=1) i = morphology2d(binary_closing, i, iterations=4) i = fill2d(i) save(i, sys.argv[1], h)
def masked_slic(img, mask, n_segments, compactness): labels = slic(img, n_segments=n_segments, compactness=compactness) labels += 1 n_labels = len(np.unique(labels)) mask = ndi.binary_closing(mask, structure=np.ones((3, 3)), iterations=1) labels[mask == 0] = 0 # set bg_label if len(np.unique(labels)) < n_labels - 2: sys.stderr.write('WARNING: number of label differs after masking.' ' Maybe this is not good for RAG construction.\n') return labels
def removeGrid(self,cs,removeGrid): """ Detect the grid of the phantom and remove it from the image """ shift = int(1./self.pixDim(cs)+.5) # try to find a threshold on pixelvalue to define a value representing the grid maskval = 0.75*cs.pixeldataIn.mean() # make a mask of grid-like values mask = (cs.pixeldataIn < maskval) # hole closing of the mask mask = scind.binary_closing(mask,structure=np.ones((5,5))) mask = scind.binary_opening(mask,structure=np.ones((5,5))) mask = scind.binary_dilation(mask) # new since 20150211 mask = scind.binary_dilation(mask) # fill the gridlines with the median values of the pixels around it medimage = np.roll(cs.pixeldataIn,shift,axis=0).astype(float) dest = cs.pixeldataIn+mask*(medimage-cs.pixeldataIn) # repeat to remove propagated mask # new since 20150211 medimage = np.roll(dest,shift,axis=0).astype(float) dest = cs.pixeldataIn+mask*(medimage-cs.pixeldataIn) medimage = None cs.gridimage = mask.astype(float) mask = None # find gridobject gridobject = scind.binary_fill_holes(cs.gridimage) label_im,nb_labels = scind.label(gridobject) sizes = scind.sum(gridobject, label_im, range(nb_labels + 1)) gridobject = None #Clean up small connect components: mask_size = sizes < max(sizes) #(100/self.pixDim())**2 remove_pixel = mask_size[label_im] label_im[remove_pixel] = 0 # Now reassign labels with np.searchsorted: labels = np.unique(label_im) label_im = np.searchsorted(labels, label_im) medimage = np.roll(dest,shift,axis=0).astype(float) dest += cs.gridimage*(medimage-dest) medimage = None cs.gridimage *= label_im if -1>0: # remove everything outside grid wid = dest.shape[0] hei = dest.shape[1] mv = np.mean(dest[wid/4:3*wid/4,hei/4:3*hei/4]) dest = label_im*(dest-mv)+mv if removeGrid: cs.pixeldataIn = dest
def medianClip(self,thr=3.0,medfiltersize=5,minaxislength=5,minSegment=50): """ Median clipping for segmentation Based on Lasseck's method This version only clips in time, ignoring frequency And it opens up the segments to be maximal (so assumes no overlap). The multitaper spectrogram helps a lot """ sg = self.sg/np.max(self.sg) # This next line gives an exact match to Lasseck, but screws up bitterns! #sg = sg[4:232, :] rowmedians = np.median(sg, axis=1) colmedians = np.median(sg, axis=0) clipped = np.zeros(np.shape(sg),dtype=int) for i in range(np.shape(sg)[0]): for j in range(np.shape(sg)[1]): if (sg[i, j] > thr * rowmedians[i]) and (sg[i, j] > thr * colmedians[j]): clipped[i, j] = 1 # This is the stencil for the closing and dilation. It's a 5x5 diamond. Can also use a 3x3 diamond diamond = np.zeros((5,5),dtype=int) diamond[2,:] = 1 diamond[:,2] = 1 diamond[1,1] = diamond[1,3] = diamond[3,1] = diamond[3,3] = 1 #diamond[2, 1:4] = 1 #diamond[1:4, 2] = 1 import scipy.ndimage as spi clipped = spi.binary_closing(clipped,structure=diamond).astype(int) clipped = spi.binary_dilation(clipped,structure=diamond).astype(int) clipped = spi.median_filter(clipped,size=medfiltersize) clipped = spi.binary_fill_holes(clipped) import skimage.measure as skm blobs = skm.regionprops(skm.label(clipped.astype(int))) # Delete blobs that are too small todelete = [] for i in blobs: if i.filled_area < minSegment or i.minor_axis_length < minaxislength: todelete.append(i) for i in todelete: blobs.remove(i) list = [] # convert bounding box pixels to milliseconds: for l in blobs: list.append([float(l.bbox[0] * self.incr / self.fs), float(l.bbox[2] * self.incr / self.fs)]) return list
def get_peaks(self,bin_image,r1,r2,threshold): """ Inputs ------ bin_image: Binary image r1,r2: Locations in the dispersion direction to collapse over. threshold: Lower limit. Any value in the collapse section greater than this is a potential edge. Output ------ peaks_location: A list with peak's pixel locations. Get edge locations from a FLAT spectrum that is nearly horizontal or nearly vertical as the GMOS, F2 and GMOS are. Sum (collapse) all the pixels in the dispersion direction between location r1 and r2. Get the indices of values in the sum ndarray that are higher that threshold. """ # Collapsing in the dispersion direction r2-r1 rows/cols might # result in spreading when the edges are slanted as in the case # of GNIRS, resulting in short sections of a few pixels # (the spreading) with value greater than one. # From r1,r2 form image slices to get the sections to be collapse. slice_y,slice_x = self.get_slices(r1,r2) line = np.sum(bin_image[slice_y,slice_x],axis=(not self.axis)) # This line is one-pixel thick with values greater than one # for those sections containing potential peaks. Pick those # and change the values to one. binary_line = np.where(line > threshold,1,0) # Make sure there are no holes in these short sections. binary_line = nd.binary_closing(binary_line) # Put labels on each of these continuous short sections. # 'label' puts a different integer values to different # sections. labels,nlabels = nd.label(binary_line) # Get the first element of each section as the position # of the edge at this r1 location in the dispersion # direction. if nlabels <=1: return [] peaks = [np.where(labels==k)[0][0] for k in range(1,nlabels+1)] return np.asarray(peaks)
def find_labels(img, threshold=90): """Returns a 2-tuple containing the labeled image, and the number of labels. Assumes img has already been cropped to neuron. """ height, width = img.shape # Isolate brightest peaks mask = np.where(img > threshold, 1, 0) mask = ndimage.binary_opening(mask, structure=np.ones((2, 2))) mask = ndimage.binary_closing(mask) return ndimage.label(mask)
def adjust_spot_positions(image, label_image, hp, debug=None): """Re-evaluate the spot positions based on the segmentation. Parameters: image: The original image (can be masked) that was sent to findspot3d label_image: the label image containing two labels hp: the original hotpoints debug: set to true to write out an image debugimg.nii.gz with the stuff """ struct2 = generate_binary_structure(3, 2) struct1 = generate_binary_structure(3, 1) peak_points =[] if debug is None: temp_path = os.getenv("PYSBR_TEMP") if temp_path is not None: debug = os.path.join(temp_path, "debug-labels.nii.gz") if debug is not None: debimg = image.copy() nlabels = label_image.max() if nlabels!=len(hp): raise RuntimeError( 'number of labels and hotspots should be the same' ) tins = [] for n in range(nlabels): label = n+1 area = binary_closing(label_image == label, struct2) thiniter = np.sum(area.reshape(-1)) / 1500 + 1 csbr.thinning3d(area, thiniter) tins.append(area) for n in range(nlabels): label = n+1 #avoid that a single pixel breaks the evaluation by running a closing area = label_image == label #evaluate the boundary dmask = binary_dilation(area, struct1) border = np.bitwise_xor(dmask, area) p = adjust_spot_position(image, border, image[tuple(hp[n])], tins[n], tins[(n + 1) % 2]) peak_points.append(p) if debug is not None: debimg[border>0] = 196 debimg[p] = 0 nib.save(nib.Nifti1Image(debimg, global_affine), debug) peak_points = np.array( peak_points ) return peak_points
def smooth_edges(mask, filter_size, min_pixels): no_small = mo.remove_small_holes(mask, min_size=min_pixels, connectivity=2) open_close = \ nd.binary_closing(nd.binary_opening(no_small, eight_conn), eight_conn) medianed = nd.median_filter(open_close, filter_size) return mo.remove_small_holes(medianed, min_size=min_pixels, connectivity=2)
def sg_filter(s1, winsize1=15, winsize2=11): s1m = ni.median_filter(s1, 11) #s1m = s1 #winsize1 = 15 #winsize2 = 11 f1 = savgol_filter(s1m, winsize1, 3) f1_std = np.nanstd(s1-f1) if 0: # calculate weight f1_mask = np.abs(s1-f1) > 2.*f1_std f1_mask2 = ni.binary_opening(f1_mask, iterations=int(winsize2*0.2)) f1_mask3 = ni.binary_closing(f1_mask2, iterations=int(winsize2*0.2)) f1_mask4 = ni.binary_dilation(f1_mask3, iterations=int(winsize2)) weight = ni.gaussian_filter(f1_mask4.astype("d"), winsize2) else: fd2 = savgol_filter(s1m, winsize1, 3, deriv=2) fd2_std = np.std(fd2) f1_mask = np.abs(fd2) > 2.*fd2_std f1_mask = f1_mask | (s1m < s1m.max()*0.4) f1_mask4 = ni.binary_dilation(f1_mask, iterations=int(winsize2)) #f1_mask4[:300] = True #f1_mask4[-300:] = True weight = ni.gaussian_filter(f1_mask4.astype("d"), winsize2*.5) # find a region where deviation is significant if np.any(weight): weight/=weight.max() f2 = savgol_filter(s1m, winsize2, 5) f12 = f1*(1.-weight) + f2*weight else: f12 = f1 weight = np.zeros(f12.shape) if 0: ax1.cla() ax2.cla() ax1.plot(f12) ax2.plot(s1 - f1, color="0.5") ax2.plot(s1 - f12) ax2.plot(weight * f1_std*2) ax2.set_ylim(-0.02, 0.02) return f12, f1_std
def find_words(self): logi("Finding words.") brick_ht = self.median_ht // 3 + 1 brick_wd = self.median_wd // 2 + 1 horz_buffer = np.zeros((self.ht, brick_wd)) logi("Dialating vertically by {}. Closing Horz by {}".format(brick_ht, brick_wd)) # Dilate Vertically self.word_closed_arr = nd.binary_dilation(self.arr, np.ones((brick_ht, 1))) # Close Horizontal Gaps (Slightly involved process) self.word_closed_arr = np.hstack((horz_buffer, self.word_closed_arr, horz_buffer)) nd.binary_closing(self.word_closed_arr, np.ones((1, brick_wd)), output=self.word_closed_arr) self.word_closed_arr = self.word_closed_arr[:, brick_wd:-brick_wd] self.word_comps, self.word_labelled_img = get_conn_comp(self.word_closed_arr) if False: self.word_comps = [c for c in self.word_comps if (c.ht > self.xht / 8 and c.wd > self.xht / 8)] self.num_words = len(self.word_comps)
def preprocess_algae_fill(img): # Crop the pictures as for raw images. # Apply thresholds img = filters.threshold_adaptive(img,25) threshold = 0.3 idx = img > img.max() * threshold idx2 = img < img.max() * threshold img[idx] = 255 img[idx2] = 0 img = ndimage.binary_erosion(img) img = ndimage.binary_closing(img) return util.img_as_int(img)
def __fast_despeckle(self): self.original = ndimage.binary_closing(self.original); self.original = np.multiply(self.original, 255); self.original = self.original[1:]; self.original = self.original[:-1]; for y in range(0, self.original.shape[0]): self.original[y][0] = 255; self.original[y][self.original.shape[1] - 1] = 255; misc.imsave("test.png", self.original);
def generate_masks(options, img): ''' random cutoff values or int multiplication factors for thresholding ('3' below) work only on few datasets it's best to estimate these things based on the size of the feature to mask, and decide on a per-dataset basis how harshly to threshold ''' if options.coords != "": nx = img["nx"] ny = img["ny"] sharp_msk = np.zeros((nx, ny)).astype(bool) r = old_div(options.goldsize, 2.) coords = np.loadtxt(options.coords) for c in coords: xc = c[0] + 2 * r yc = c[1] + 2 * r x, y = np.ogrid[-xc:nx - xc, -yc:ny - yc] circle = x * x + y * y <= r * r sharp_msk = np.logical_or(sharp_msk, circle) sharp_msk = sharp_msk.astype(int) sharp_msk = from_numpy(sharp_msk.T) else: img.process_inplace("normalize") fourierpixels = old_div(img['nx'], 2) cutoffpixels = fourierpixels - old_div(options.goldsize, 2) msk = img.process("filter.highpass.gauss", {"cutoff_pixels": cutoffpixels}) apix = img['apix_x'] goldsizeinangstroms = apix * options.goldsize freq = old_div(1.0, goldsizeinangstroms) msk.process_inplace( "filter.lowpass.tanh", {"cutoff_freq": freq} ) #c:lowpass shouldn't be arbitrary; rather, use gold size to derive it. msk.process_inplace( "threshold.clampminmax", { "maxval": msk["maximum"], "minval": msk["mean"] + options.nsigmas * msk["sigma"], "tozero": True }) # must be tozero # remove dust if options.keepdust: sharp_msk = msk.copy() * -1 else: nproc = msk.numpy().copy() s = np.sqrt(options.goldsize * 2).astype(int) se2 = np.ones((s, s)) nproc = ndimage.binary_closing(nproc, structure=se2).astype(int) nproc = ndimage.binary_opening(nproc, structure=se2).astype(int) sharp_msk = from_numpy(nproc) # grow slightly and create soft mask sharp_msk = sharp_msk.process("mask.addshells.gauss", { "val1": 8, "val2": 0 }) soft_msk = sharp_msk.process("mask.addshells.gauss", { "val1": 0, "val2": 8 }) return sharp_msk, soft_msk
def run(self, ips, imgs, para=None): strc = np.ones((para['r'], para['r'], para['r']), dtype=np.uint8) imgs[:] = ndimg.binary_closing(imgs, strc) imgs *= 255
if sym == '.' or sym.islower(): break else: idx = j break # if no break exit msa = [(header, seq[:idx]) for header, seq in msa] # Count gaps gaps = [] for j in range(len(msa[0][1])): gap = sum( [1 if msa[i][1][j] in ['-', '.'] else 0 for i in range(len(msa))]) gaps.append(gap) # Threshold, merge, and size filter to get regions binary = ndimage.binary_closing(np.array(gaps) < 1, structure=[1, 1, 1]) regions = [ region for region, in ndimage.find_objects(ndimage.label(binary)[0]) if (region.stop - region.start) >= 30 ] # Calculate total scores for each sequence over all regions scores = {header: 0 for header, _ in msa} for region in regions: for i in range(region.start, region.stop): # Build model model = {aa: 2 * count for aa, count in prior.items() } # Start with weighted prior "counts" for _, seq in msa: sym = '-' if seq[i] == '.' else seq[
# store the raw image data PlaneDicom[:, :, lstFilesDCM.index(filenameDCM)] = ds.pixel_array print('Processing slices...') for k in range(ConstPixelDims[2]): for i in range(ConstPixelDims[0]): for j in range(ConstPixelDims[1]): if PlaneDicom[i, j, k] > 200 and PlaneDicom[i, j, k] < 15000: PlaneDicom[i, j, k] += 15000 fiducials = [] for i in range(ConstPixelDims[2]): thirdcoord = i * ConstPixelSpacing[2] PlaneDicom[:, :, i] = feature.canny(PlaneDicom[:, :, i], sigma=2) PlaneDicom[:, :, i] = ndimage.binary_closing(PlaneDicom[:, :, i]) PlaneDicom[:, :, i] = morphology.skeletonize(PlaneDicom[:, :, i]) coords = feature.corner_peaks(feature.corner_harris(PlaneDicom[:, :, i]), min_distance=7) new_coords = [] for i in range(len(coords)): for j in range(len(coords)): if j < i and numpy.sqrt( ((coords[i][0] - coords[j][0]) * ConstPixelSpacing[0])**2 + ((coords[i][1] - coords[j][1]) * ConstPixelSpacing[1])**2) < 7: new_coords.append(coords[i]) new_coords.append(coords[j]) new_coords = numpy.array(new_coords) if len(new_coords) <= 1: continue new_coords = numpy.unique(new_coords, axis=0)
for i in range(3): posMat[:,:,i] *= (imLabels[objSlices[objectNum]]==objInds[objectNum]) # posMat = removeNoise(posMat, thresh=500) xyz = posMat[(posMat[:,:,2]>0)*(posMat[:,:,0]!=0),:] # Edge test im1 = imgs[0] im2 = imgs[1] imgs[0] = np.array(imgs[0], dtype=int16) imgs[1] = np.array(imgs[1], dtype=int16) imD = np.array(imgs[0]-imgs[1], dtype=np.int16) diff = (np.diff(np.abs(imD)>10)) diff = nd.binary_closing(diff, iterations=5) # diff = nd.binary_dilation(diff, iterations=3) imshow(im1[:,0:-1]*(1-diff)) im = im1[:,0:-1]*(1-diff) '''#################### Test HOG #########################''' ''' Load example (uncompressed) img ''' # saved = np.load('tmpPerson_close.npz')['arr_0'].tolist() # saved = np.load('tmpPerson1.npz')['arr_0'].tolist() # objects1 = saved['objects']; labelInds1=saved['labels']; out1=saved['out']; d1=saved['d']; com1=saved['com'];featureExt1=saved['features']; posMat=saved['posMat']; xyz=saved['xyz']
plt.text(0.57, 0.8, 'histogram', fontsize=20, transform = plt.gca().transAxes) plt.yticks([]) plt.subplot(133) plt.imshow(binary_img, cmap=plt.cm.gray, interpolation='nearest') plt.axis('off') plt.subplots_adjust(wspace=0.02, hspace=0.3, top=1, bottom=0.1, left=0, right=1) plt.show() # <codecell> # <codecell> open_img = ndimage.binary_opening(binary_img,struct) close_img = ndimage.binary_closing(open_img,struct) # <codecell> imshow(binary_img) # <codecell> imshow(open_img) # <codecell> imshow(close_img) # <codecell>
# You can set thresholds to cut the background noise # Once you are sure you have all stars included use a binary threshold. # (Tip: a threshold of 0.1 seemed to be good, but pick your own) threshold = 0.15 img_bin = img > threshold plt.figure(2) plt.title('img_bin') plt.imshow(img_bin, cmap='gray', interpolation='none') # Now with the binary image use the opening and closing to bring the star # to compacter format. Take care that no star connects to another s1 = np.array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) img_bin1 = nd.binary_closing(img_bin, structure=s1) plt.figure(3) plt.title('img_bin1') plt.imshow(img_bin1, cmap='gray', interpolation='none') # Remove isolated pixels around the moon with closing by a 2 pixel structure s2 = np.array([[0, 0, 0], [0, 1, 1], [0, 0, 0]]) img_bin2 = nd.binary_opening(img_bin1, structure=s2) plt.figure(4) plt.title('img_bin2') plt.imshow(img_bin2, cmap='gray', interpolation='none') # play with all the morphological options in ndimage package to increase the
# Run random walker algorithm # https://scikit-image.org/docs/dev/api/skimage.segmentation.html#skimage.segmentation.random_walker labels = random_walker(eq_img, markers, beta=10, mode='bf') plt.imsave("images/markers.jpg", markers) segm1 = (labels == 1) segm2 = (labels == 2) all_segments = np.zeros((eq_img.shape[0], eq_img.shape[1], 3)) #nothing but denoise img size but blank all_segments[segm1] = (1,0,0) all_segments[segm2] = (0,1,0) #plt.imshow(all_segments) from scipy import ndimage as nd segm1_closed = nd.binary_closing(segm1, np.ones((3,3))) segm2_closed = nd.binary_closing(segm2, np.ones((3,3))) all_segments_cleaned = np.zeros((eq_img.shape[0], eq_img.shape[1], 3)) all_segments_cleaned[segm1_closed] = (1,0,0) all_segments_cleaned[segm2_closed] = (0,1,0) plt.imshow(all_segments_cleaned) plt.imsave("images/random_walker.jpg", all_segments_cleaned)
def brain_masker(in_file, out_file=None, padding=5): """Use grayscale morphological operations to obtain a quick mask of EPI data.""" from pathlib import Path import re import nibabel as nb import numpy as np from scipy import ndimage from skimage.morphology import ball from skimage.filters import threshold_otsu from skimage.segmentation import random_walker # Load data img = nb.load(in_file) data = np.pad(img.get_fdata(dtype="float32"), padding) hdr = img.header.copy() # Cleanup background and invert intensity data[data < np.percentile(data[data > 0], 15)] = 0 data[data > 0] -= data[data > 0].min() datainv = -data.copy() datainv -= datainv.min() datainv /= datainv.max() # Grayscale closing to enhance CSF layer surrounding the brain closed = ndimage.grey_closing(datainv, structure=ball(1)) denoised = ndimage.median_filter(closed, footprint=ball(3)) th = threshold_otsu(denoised) # Rough binary mask closedbin = np.zeros_like(closed) closedbin[closed < th] = 1 closedbin = ndimage.binary_opening(closedbin, ball(3)).astype("uint8") label_im, nb_labels = ndimage.label(closedbin) sizes = ndimage.sum(closedbin, label_im, range(nb_labels + 1)) mask = sizes == sizes.max() closedbin = mask[label_im] closedbin = ndimage.binary_closing(closedbin, ball(5)).astype("uint8") # Prepare markers markers = np.ones_like(closed, dtype="int8") * 2 markers[1:-1, 1:-1, 1:-1] = 0 closedbin_dil = ndimage.binary_dilation(closedbin, ball(5)) markers[closedbin_dil] = 0 closed_eroded = ndimage.binary_erosion(closedbin, structure=ball(5)) markers[closed_eroded] = 1 # Run random walker closed[closed > 0.0] -= closed[closed > 0.0].min() segtarget = (2 * closed / closed.max()) - 1.0 labels = random_walker(segtarget, markers, spacing=img.header.get_zooms()[:3], return_full_prob=True)[..., padding:-padding, padding:-padding, padding:-padding] out_mask = Path(out_file or "brain_mask.nii.gz").absolute() hdr.set_data_dtype("uint8") img.__class__((labels[0, ...] >= 0.5).astype("uint8"), img.affine, hdr).to_filename(out_mask) out_probseg = re.sub(r"\.nii(\.gz)$", r"_probseg.nii\1", str(out_mask).replace("_mask.", ".")) hdr.set_data_dtype("float32") img.__class__((labels[0, ...]), img.affine, hdr).to_filename(out_probseg) out_brain = re.sub(r"\.nii(\.gz)$", r"_brainmasked.nii\1", str(out_mask).replace("_mask.", ".")) data = np.asanyarray(img.dataobj) data[labels[0, ...] < 0.5] = 0 img.__class__(data, img.affine, img.header).to_filename(out_brain) return str(out_brain), str(out_probseg), str(out_mask)
def getBinaryImage(self): self.ploting = False HEDAB = rgb2hed(self.image) R = self.image[:, :, 0] G = self.image[:, :, 1] B = self.image[:, :, 2] H = HEDAB[:, :, 0] E = HEDAB[:, :, 1] DAB = HEDAB[:, :, 2] BR = B * 2 / ((1 + R + G) * (1 + B + R + G)) #Blue-ratio image V = self.getV() #From HSV (L, L2) = self.getL() #From CIELAB and CIELUV BRSmoothed = ndimage.gaussian_filter(BR, 1) LSmoothed = ndimage.gaussian_filter(L, 1) VSmoothed = ndimage.gaussian_filter(V, 1) HSmoothed = ndimage.gaussian_filter(H, 1) ESmoothed = ndimage.gaussian_filter(E, 1) RSmoothed = ndimage.gaussian_filter(R, 1) DABSmoothed = ndimage.gaussian_filter(DAB, 1) imLLog = self.filterImage(gaussian_laplace(LSmoothed, 9), 85) == False imVLog = self.filterImage(gaussian_laplace(VSmoothed, 9), 85) == False imELog = self.filterImage(gaussian_laplace(ESmoothed, 9), 84) == False imRLog = self.filterImage(gaussian_laplace(RSmoothed, 9), 84) == False imDABLog = self.filterImage(gaussian_laplace(DABSmoothed, 9), 50) imHLog = self.filterImage(gaussian_laplace(HSmoothed, 9), 8) imLog = self.filterImage(gaussian_laplace(BRSmoothed, 9), 9) imR = self.filterImage(R, 2.5) imB = self.filterImage(B, 10.5) imV = self.filterImage(V, 6.5) imL = self.filterImage(L, 2.5) imL2 = self.filterImage(L2, 2.5) imE = self.filterImage(E, 18) imH = self.filterImage(H, 95) == False imDAB = self.filterImage(DAB, 55) == False imBR = self.filterImage(BR, 63) == False binaryImg = imR & imV & imB & imL & imL2 & imE & imH & imDAB & imLog & imBR & imLLog & imVLog & imELog & imHLog & imRLog & imDABLog openImg = ndimage.binary_opening(binaryImg, iterations=2) closedImg = ndimage.binary_closing(openImg, iterations=8) if self.ploting: plt.imshow(self.image) plt.show() plt.imshow(imR) plt.show() plt.imshow(imV) plt.show() plt.imshow(imB) plt.show() plt.imshow(imL) plt.show() plt.imshow(closedImg) plt.show() BRVL = np.zeros(self.image.shape) BRVL[:, :, 0] = BR BRVL[:, :, 1] = V BRVL[:, :, 2] = L / rangeL #ResizeHEDAB, from 0 to 1. HEDAB[:, :, 0] = (H - minH) / rangeH HEDAB[:, :, 1] = (E - minE) / rangeE HEDAB[:, :, 2] = (DAB - minDAB) / rangeDAB return (BinaryImageWorker(closedImg, self.rows, self.columns), RGBImageWorker(HEDAB, self.rows, self.columns), RGBImageWorker(BRVL, self.rows, self.columns), BinaryImageWorker(binaryImg, self.rows, self.columns))
def get_qualifying_clusters(rImage, strat_dbz, conv_dbz, int_dbz, min_length, conv_buffer, min_size=10, strat_buffer=0): """Combines the logic of get_intense_cells, connect_intense_cells, and connect_stratiform_to_lines to return pixels associated with qualifying slices. Stratiform >= 4 (20 dBZ) Convection >= 8 (40 dBZ) Intense >= 10 (50 dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. strat_dbz: int Threshold used to identify stratiform pixels (Multiply value by 5 to get dBZ) conv_dbz: int Threshold used to identify convective pixels (Multiply value by 5 to get dBZ) int_dbz: int Threshold used to identify intense pixels (Multiply value by 5 to get dBZ) min_length: int Minimum length for a qualifying merged lines (Multiply value by 2 to get km) conv_buffer: int Distance within which intense cells are merged (Multiply value by 2 (pixel distance to km) and then multiply by minimum search disk radius (3) to get buffer size in km) min_size: int Minimum size for an intense cell to be considered in line-building process. strat_buffer: int Distance within which stratiform pixels are merged with qualifying merged lines. (Multiply value by 2 to account for pixel distance and then multiply by minimum search disk radius of 3 to get buffer size in km) conv_buffer: integer Distance to search for nearby intense cells. Returns ------- regions: list A list of regionprops for each qualifying slice. See scikit-image.measure.regionprops for more information. """ convection = 1 * (rImage >= conv_dbz) stratiform = 1 * (rImage >= strat_dbz) labeled_image, _ = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=min_size, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage) for region in regions: if np.max(region.intensity_image) < int_dbz: ymin, xmin = np.min(region.coords[:, 0]), np.min(region.coords[:, 1]) y, x = np.where(region.intensity_image > 0) labeled_image[ymin + y, xmin + x] = 0 thresholded_image = 1 * binary_closing( labeled_image > 0, structure=disk(3), iterations=int(conv_buffer)) labeled_image, _ = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage) for region in regions: if region.major_axis_length < min_length: ymin, xmin = np.min(region.coords[:, 0]), np.min(region.coords[:, 1]) y, x = np.where(region.intensity_image > 0) labeled_image[ymin + y, xmin + x] = 0 strat_mask = 1 * stratiform * (binary_dilation( 1 * (labeled_image > 0), structure=disk(3), iterations=strat_buffer)) thresholded_image = 1 * (labeled_image > 0) + strat_mask #thresholded_image = watershed(strat_mask, labeled_image, mask=strat_mask) labeled_image, _ = label(1 * (thresholded_image > 0), np.ones((3, 3))) labeled_image *= stratiform regions = regionprops(labeled_image, intensity_image=thresholded_image) for region in regions: if np.max(region.intensity_image) < 2: ymin, xmin = np.min(region.coords[:, 0]), np.min(region.coords[:, 1]) y, x = np.where(region.intensity_image > 0) labeled_image[ymin + y, xmin + x] = 0 return regionprops(labeled_image, intensity_image=rImage)
im[(points[0]).astype(np.int), (points[1]).astype(np.int)] = 1 im = ndimage.gaussian_filter(im, sigma=l / (4. * n)) mask = (im > im.mean()).astype(np.float) mask += 0.1 * im img = mask + 0.2 * np.random.randn(*mask.shape) hist, bin_edges = np.histogram(img, bins=60) bin_centers = 0.5 * (bin_edges[:-1] + bin_edges[1:]) binary_img = img > 0.5 # Remove small white regions open_img = ndimage.binary_opening(binary_img) # Remove small black hole close_img = ndimage.binary_closing(open_img) plt.imshow(close_img) """###Edge Detection using Canny Edge Detector""" import cv2 import numpy as np from matplotlib import pyplot as plt plt.figure(figsize=(16, 16)) img_gs = cv2.imread('face.png', cv2.IMREAD_GRAYSCALE) cv2.imwrite('gs.jpg', img_gs) edges = cv2.Canny(img_gs, 100, 200) plt.subplot(121), plt.imshow(img_gs) plt.title('Original Gray Scale Image') plt.subplot(122), plt.imshow(edges) plt.title('Edge Image') plt.show()
def binary_denoise(img): return ndimage.binary_closing(np.pad(img, 9, mode='reflect'), ball_5)[9:-9, 9:-9]
def export_image(img, outfile=None, img_format='fits', pad_image=False, img_type='gaus_resid', mask_dilation=0, clobber=False): """Write an image to a file. Returns True if successful, False if not. outfile - name of resulting file; if None, file is named automatically. img_type - type of image to export; see below img_format - format of resulting file: 'fits' or 'casa' incl_wavelet - include wavelet Gaussians in model and residual images? clobber - overwrite existing file? The following images may be exported: 'ch0' - image used for source detection 'rms' - rms map image 'mean' - mean map image 'pi' - polarized intensity image 'gaus_resid' - Gaussian model residual image 'gaus_model' - Gaussian model image 'shap_resid' - Shapelet model residual image 'shap_model' - Shapelet model image 'psf_major' - PSF major axis FWHM image (FWHM in arcsec) 'psf_minor' - PSF minor axis FWHM image (FWHM in arcsec) 'psf_pa' - PSF position angle image (degrees east of north) 'psf_ratio' - PSF peak-to-total flux ratio (in units of 1/beam) 'psf_ratio_aper' - PSF peak-to-aperture flux ratio (in units of 1/beam) 'island_mask' - Island mask image (0 = outside island, 1 = inside island) """ import os import functions as func from const import fwsig import mylogger mylog = mylogger.logging.getLogger("PyBDSF." + img.log + "ExportImage") # First some checking: if not 'gausfit' in img.completed_Ops and 'gaus' in img_type: print '\033[91mERROR\033[0m: Gaussians have not been fit. Please run process_image first.' return False elif not 'shapelets' in img.completed_Ops and 'shap' in img_type: print '\033[91mERROR\033[0m: Shapelets have not been fit. Please run process_image first.' return False elif not 'polarisation' in img.completed_Ops and 'pi' in img_type: print '\033[91mERROR\033[0m: Polarization properties have not been calculated. Please run process_image first.' return False elif not 'psf_vary' in img.completed_Ops and 'psf' in img_type: print '\033[91mERROR\033[0m: PSF variations have not been calculated. Please run process_image first.' return False elif not 'collapse' in img.completed_Ops and 'ch0' in img_type: print '\033[91mERROR\033[0m: ch0 image has not been calculated. Please run process_image first.' return False elif not 'rmsimage' in img.completed_Ops and ('rms' in img_type or 'mean' in img_type): print '\033[91mERROR\033[0m: Mean and rms maps have not been calculated. Please run process_image first.' return False elif not 'make_residimage' in img.completed_Ops and ('resid' in img_type or 'model' in img_type): print '\033[91mERROR\033[0m: Residual and model maps have not been calculated. Please run process_image first.' return False format = img_format.lower() if (format in ['fits', 'casa']) == False: print '\033[91mERROR\033[0m: img_format must be "fits" or "casa"' return False filename = outfile if filename is None or filename == '': filename = img.imagename + '_' + img_type + '.' + format if os.path.exists(filename) and clobber == False: print '\033[91mERROR\033[0m: File exists and clobber = False.' return False if format == 'fits': use_io = 'fits' if format == 'casa': use_io = 'rap' bdir = '' try: if img_type == 'ch0': func.write_image_to_file(use_io, filename, img.ch0_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'rms': func.write_image_to_file(use_io, filename, img.rms_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'mean': func.write_image_to_file(use_io, filename, img.mean_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'pi': func.write_image_to_file(use_io, filename, img.ch0_pi_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'psf_major': func.write_image_to_file(use_io, filename, img.psf_vary_maj_arr * fwsig, img, bdir, pad_image, clobber=clobber) elif img_type == 'psf_minor': func.write_image_to_file(use_io, filename, img.psf_vary_min_arr * fwsig, img, bdir, pad_image, clobber=clobber) elif img_type == 'psf_pa': func.write_image_to_file(use_io, filename, img.psf_vary_pa_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'psf_ratio': func.write_image_to_file(use_io, filename, img.psf_vary_ratio_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'psf_ratio_aper': func.write_image_to_file(use_io, filename, img.psf_vary_ratio_aper_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'gaus_resid': im = img.resid_gaus_arr func.write_image_to_file(use_io, filename, im, img, bdir, pad_image, clobber=clobber) elif img_type == 'gaus_model': im = img.model_gaus_arr func.write_image_to_file(use_io, filename, im, img, bdir, pad_image, clobber=clobber) elif img_type == 'shap_resid': func.write_image_to_file(use_io, filename, img.resid_shap_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'shap_model': func.write_image_to_file(use_io, filename, img.model_shap_arr, img, bdir, pad_image, clobber=clobber) elif img_type == 'island_mask': import numpy as N import scipy.ndimage as nd island_mask_bool = img.pyrank + 1 > 0 if mask_dilation > 0: # Dilate the mask by specified number of iterations island_mask_bool = nd.binary_dilation(island_mask_bool, iterations=mask_dilation) # Perform a binary closing to remove small holes/gaps. The # structure array is chosen to be about the size of the # beam (assuming a normally sampled psf), so that holes/gaps # smaller than the beam are removed. pbeam = int(round(img.beam2pix(img.beam)[0] * 1.5)) island_mask_bool = nd.binary_closing(island_mask_bool, structure=N.ones( (pbeam, pbeam))) # Check for telescope, needed for CASA clean masks if img._telescope is None: print '\033[91mWARNING\033[0m: Telescope is unknown. Mask may not work correctly in CASA.' island_mask = N.array(island_mask_bool, dtype=N.float32) func.write_image_to_file(use_io, filename, island_mask, img, bdir, pad_image, clobber=clobber, is_mask=True) else: print "\n\033[91mERROR\033[0m: img_type not recognized." return False if filename == 'SAMP': print '--> Image sent to SMAP hub' else: print '--> Wrote file ' + repr(filename) if use_io == 'rap': # remove the temporary fits file used as a pyrap template import os os.remove(filename + '.fits') return True except RuntimeError, err: # Catch and log error mylog.error(str(err)) # Re-throw error if the user is not in the interactive shell if img._is_interactive_shell: return False else: raise
def test_morphology_fft_closing_3D(self): im = ps.generators.blobs(shape=[100, 100, 100]) truth = spim.binary_closing(im, structure=ball(3)) test = ps.tools.fftmorphology(im, strel=ball(3), mode='closing') assert sp.all(truth == test)
def singleFluorescence2Neurons(Image, bgSize, neuronSize, threshold, xC, yC, shift, prevLocs): """Calculate fluorescene for the two brightest objects in non-ratiometric images.""" imSize = Image.shape # deal with RGB images if len(imSize) == 3: Image = rgb2gray(Image) # -- Check if box needs to be cropped as it's ranging beyond the image bgImage, xMin, yMin = cropImage(Image, xC, yC, bgSize, imSize) # --- Determine position of neuron; might not be centered due to cropping height, width = bgImage.shape xNeuron = xC - xMin yNeuron = yC - yMin # --- Get number of total pixels in the BG box and determine an intensity # --- threshold at which N % of the pixels have less intensity threshold = np.percentile(bgImage, [threshold, (100 + threshold) / 2.]) # ------ find two objects in the search area mask = np.where(bgImage > threshold[0], 1, 0) mask = ndimage.binary_opening(mask, structure=np.ones((4, 4))) mask = ndimage.binary_closing(mask) # --- Individually label all connected regions and get their center of mass label_im, nb_labels = ndimage.label(mask) centroids = ndimage.measurements.center_of_mass(bgImage, label_im, xrange(1, nb_labels + 1)) # --- select brightest object by default (mean brightness) meanBrightness = ndimage.measurements.mean(bgImage, label_im, xrange(1, nb_labels + 1)) print 'Number of objects found', nb_labels # plt.imshow(label_im) # plt.show() if nb_labels > 1: # if at least two are found, use the brightest ones ind = np.argpartition(meanBrightness, -2)[-2:] ind = ind[np.argsort(meanBrightness[ind])] yNewNeuron1, xNewNeuron1 = centroids[ind[0]] yNewNeuron2, xNewNeuron2 = centroids[ind[1]] neuronObject1 = np.where(label_im == ind[0] + 1, 0, 1) neuronArea1 = np.sum(neuronObject1) neuronObject2 = np.where(label_im == ind[1] + 1, 0, 1) neuronArea2 = np.sum(neuronObject2) vec1 = np.array([prevLocs[0] - prevLocs[2], prevLocs[1] - prevLocs[3]]) vec2 = np.array([xNewNeuron2 - xNewNeuron1, yNewNeuron2 - yNewNeuron1]) # P2-P1 = direction from P1 to P2- vec2 from neuron1 to neuron2 # detect neuron identity via angle angle1 = np.arccos( np.clip( np.dot(vec1 / np.linalg.norm(vec1), vec2 / np.linalg.norm(vec2)), -1, 1)) angle2 = np.arccos( np.clip( np.dot(vec1 / np.linalg.norm(vec1), -vec2 / np.linalg.norm(vec2)), -1, 1)) # switch idenity if angle2 > angle1: tmp = yNewNeuron2, xNewNeuron2 yNewNeuron2, xNewNeuron2 = yNewNeuron1, xNewNeuron1 yNewNeuron1, xNewNeuron1 = tmp tmp = neuronObject2 neuronObject2 = neuronObject1 neuronObject1 = tmp if nb_labels == 1: # if only one object found, assign same values to both loc = np.argmax(meanBrightness) yNewNeuron1, xNewNeuron1 = centroids[loc] xNewNeuron2, yNewNeuron2 = prevLocs[-2] - xMin, prevLocs[-1] - yMin neuronObject1 = np.where(label_im == loc + 1, 0, 1) neuronArea1 = np.sum(neuronObject1) neuronObject2 = neuronObject1 neuronArea2 = neuronArea1 elif nb_labels == 0: # if nothing is found, use bg yNewNeuron1, xNewNeuron1 = yNeuron, xNeuron yNewNeuron2, xNewNeuron2 = yNewNeuron1, xNewNeuron1 loc = -1 neuronObject1 = np.where(label_im == loc + 1, 0, 1) neuronArea1 = np.sum(neuronObject1) neuronObject2 = np.where(label_im == loc + 1, 0, 1) neuronArea2 = np.sum(neuronObject2) # --- Get average of the 2 neurons fluorescence --- tmp_neuron = np.ma.masked_array(bgImage, neuronObject1) newNeuronAverage1 = np.ma.average(tmp_neuron[tmp_neuron > threshold[1]]) tmp_neuron = np.ma.masked_array(bgImage, neuronObject2) newNeuronAverage2 = np.ma.average(tmp_neuron[tmp_neuron > threshold[1]]) # --- remove both neuron objetcs from field of view, we assume bg is the same bgLevel = calculateWith2Masks(bgImage, xNewNeuron1, yNewNeuron1, xNewNeuron2, yNewNeuron2, neuronSize, imSize) # for each of the 2 neuron there are green components return 1,1,1,1,1,\ bgLevel, newNeuronAverage1, xNewNeuron1+xMin, yNewNeuron1+yMin, neuronArea1, \ 1,1,1,1,1,\ bgLevel, newNeuronAverage2, xNewNeuron2+xMin, yNewNeuron2+yMin, neuronArea2, \
macro = 0 micro = 0 itc = 0 negative = 0 for file in files: #for file in ["mask_patient_039_node_1.jpg"]: if os.path.isdir(path + file): continue print file node_list = [] node_list.append(file.split(".")[0] + ".tif") img = cv2.imread(path + file) img[img < 180] = 0 image_open = ndimage.binary_opening(img[:, :, 0], structure=np.ones((5, 5))) image_close = ndimage.binary_closing(image_open, structure=np.ones((5, 5))) image_ = np.where(image_close == True, 255, 0) boxes = find_boxes(image_close) #(x1, y1, x2, y2) #length = [max(box[1]-box[0], box[3]-box[2]) for box in boxes] length = [(box[1] - box[0]) * (box[3] - box[2]) for box in boxes] length.sort() print length gt = file_dict[file.split(".")[0] + ".tif"] if len(length) == 0: label = "negative" negative += 1 elif length[-1] >= 200 or (len(length) > 50 and length[-5] >= 30): label = "macro" macro += 1 elif length[-1] >= 50 or (len(length) > 1 and length[-2] >= 30): label = "micro"
# Now set the spatial connectivity requirements. # The spatial pixel scales in the sim headers are SUPER small # choosing this major axis gives an appropriately sized. 5x5 kernel beam = Beam(major=1e-3 * u.arcmin) kernel = beam.as_tophat_kernel(pixscale) kernel_pix = (kernel.array > 0).sum() # Avoid edge effects in closing by padding by 1 in each axis mask = np.pad(mask, ((0, 0), (1, 1), (1, 1)), 'constant', constant_values=False) for i in ProgressBar(mask.shape[0]): mask[i] = nd.binary_opening(mask[i], kernel) mask[i] = nd.binary_closing(mask[i], kernel) mask[i] = mo.remove_small_objects(mask[i], min_size=kernel_pix, connectivity=2) mask[i] = mo.remove_small_holes(mask[i], min_size=kernel_pix, connectivity=2) # Remove padding mask = mask[:, 1:-1, 1:-1] # Each region must contain a point above the peak_snr labels, num = nd.label(mask, np.ones((3, 3, 3))) for n in range(1, num + 1): pts = np.where(labels == n) if np.nanmax(snr[pts]) < peak_snr: mask[pts] = False
def fetch_icbm152_brain_gm_mask(data_dir=None, threshold=0.2, resume=True, n_iter=2, verbose=1): """Downloads ICBM152 template first, then loads the 'gm' mask. .. versionadded:: 0.2.5 Parameters ---------- %(data_dir)s threshold : float, optional Values of the ICBM152 grey-matter template above this threshold will be included. Default=0.2 %(resume)s n_iter: int, optional, Default = 2 Number of repetitions of dilation and erosion steps performed in scipy.ndimage.binary_closing function. .. versionadded:: 0.8.1 %(verbose)s Returns ------- gm_mask_img : Nifti1Image, image corresponding to the brain grey matter from ICBM152 template. Notes ----- This function relies on ICBM152 templates where we particularly pick grey matter template and threshold the template at .2 to take one fifth of the values. Then, do a bit post processing such as binary closing operation to more compact mask image. .. note:: It is advised to check the mask image with your own data processing. See Also -------- nilearn.datasets.fetch_icbm152_2009: for details regarding the ICBM152 template. nilearn.datasets.load_mni152_template: for details about version of MNI152 template and related. """ # Fetching ICBM152 grey matter mask image icbm = fetch_icbm152_2009(data_dir=data_dir, resume=resume, verbose=verbose) gm = icbm['gm'] gm_img = check_niimg(gm) gm_data = get_data(gm_img) # getting one fifth of the values gm_mask = (gm_data > threshold).astype("int8") gm_mask = ndimage.binary_closing(gm_mask, iterations=n_iter) gm_mask_img = new_img_like(gm_img, gm_mask) return gm_mask_img
while True: # Capture frame-by-frame try: frame = cam.read() except IOError: #print('No frame') continue Profiler.ENABLED = False with Profiler('all') as profiler: with profiler('detect') as p: res = vision.ColorDetectResult(frame) with profiler('bluecut') as p: is_blue = (res.im == Colors.BLUE) is_blue = ndimage.binary_opening(is_blue, structure=np.ones((5, 5))) is_blue = ndimage.binary_closing(is_blue, structure=np.ones((5, 5))) x, y = np.meshgrid(np.arange(cam.shape[1]), np.arange(cam.shape[0])) blue_below = np.cumsum(is_blue[::-1], axis=0)[::-1] res.mask_out((blue_below > 0) & ~is_blue) with Profiler('fill'): red_blobs = vision.BlobDetector(res, Colors.RED, 1000) green_blobs = vision.BlobDetector(res, Colors.GREEN, 1000) blue_blobs = vision.BlobDetector(res, Colors.BLUE, 2000) frame = np.copy(frame) for blob in red_blobs.blobs + blue_blobs.blobs + green_blobs.blobs: y, x = blob.pos color = tuple(map(int, Colors.to_rgb(blob.color)))
def treshold(image3D, tresholdValue): tres = image3D > tresholdValue kernel = skimage.morphology.diamond(3).astype(np.uint8) closing = ndimage.binary_closing(tres, structure=kernel) return ndimage.binary_opening(closing, structure=kernel)
def tiraBuraco(img_mask, num): return ndi.binary_closing(img_mask, iterations=num)
def closed_mask_roi(mask): closed_mask = ndi.binary_closing(mask, structure=np.ones((3, 3)), iterations=8) roi = ndi.find_objects(closed_mask, max_label=1)[0] return roi
def split_segmentation(infile, lbl=1, close=True, close_cube_size=5, close_iter=1, min_region_size=100): """ Splits the segmentation in connected regions with at least the given size (number of voxels). Args: infile (str): the segmentation input file in one of the formats: '.mrc' '.em' or '.vti'. lbl (int, optional) the label to be considered, 0 will be ignored, default 1 close (boolean, optional): if True (default), closes small holes in the segmentation first close_cube_size (int, optional): if close is True, gives the size of the cube structuring element used for closing, default 5 close_iter (int, optional): if close is True, gives the number of iterations the closing should be repeated, default 1 min_region_size (int, optional): gives the minimal number of voxels a region has to have in order to be considered, default 100 Returns: a list of regions, where each item is a binary ndarray with the same shape as the segmentation but contains one region """ # Load the segmentation numpy array from a file and get only the requested # labels as 1 and the background as 0: seg = io.load_tomo(infile) assert(isinstance(seg, np.ndarray)) data_type = seg.dtype binary_seg = (seg == lbl).astype(data_type) # If requested, close small holes in the segmentation: outfile = infile if close: outfile = ("%s%s_closed_size%s_iter%s.mrc" % (infile[0:-4], lbl, close_cube_size, close_iter)) if not isfile(outfile): from scipy import ndimage cube = np.ones((close_cube_size, close_cube_size, close_cube_size)) binary_seg = ndimage.binary_closing( binary_seg, structure=cube, iterations=close_iter ).astype(data_type) # Write the closed binary segmentation into a file: io.save_numpy(binary_seg, outfile) print ("Closed the binary segmentation and saved it into the file " "%s" % outfile) else: # the '.mrc' file already exists binary_seg = io.load_tomo(outfile) print ("The closed binary segmentation was loaded from the file " "%s" % outfile) # Label each connected region of the binary segmentation: label_seg = label(binary_seg) # Get only regions with at least the given size: regions = [] for i, region in enumerate(regionprops(label_seg)): region_area = region.area if region_area >= min_region_size: print "%s. region has %s voxels and pass" % (i + 1, region_area) # Get the region coordinates and make an ndarray with same shape as # the segmentation and 1 at those coordinates: region_ndarray = np.zeros(shape=tuple(seg.shape), dtype=data_type) # 2D array with 3 columns: x, y, z and number of rows corresponding # to the number of voxels in the region region_coords = region.coords for i in xrange(region_coords.shape[0]): # iterate over the rows region_ndarray[region_coords[i, 0], region_coords[i, 1], region_coords[i, 2]] = 1 regions.append(region_ndarray) else: print ("%s. region has %s voxels and does NOT pass" % (i + 1, region_area)) print "%s regions passed." % len(regions) return regions, outfile
def run(self, ips, snap, img, para=None): strc = np.ones((para['h'], para['w']), dtype=np.uint8) ndimg.binary_closing(snap, strc, output=img) img *= 255
def dark_area_mask(mf,phigh=99.5, th_scale=0.1): mask = mf > np.percentile(mf,phigh)*th_scale return remove_small_regions(binary_opening(binary_closing(mask)))
def find_lines(rImage, conv_buffer, min_length=50): """Combines the logic of get_intense_cells and connect_intense_cells to return pixels associated with qualifying merged lines. Stratiform >= 4 (20 dBZ) Convection >= 8 (40 dBZ) Intense >= 10 (50 dBZ) Parameters ---------- rImage: (N, M) ndarray Radar Image from which to extract qualifying lines. conv_buffer: integer Distance to search for nearby intense cells. min_length: integer Minimum size requirment to be considered an MCS. Default is 50 (100 km with 2 km pixels) Returns ------- labeled_image: (N, M) ndarray Binary image of pixels in qualifying merged lines. Same dimensions as rImage. """ convection = 1 * (rImage >= 8) stratiform = 1 * (rImage >= 4) labeled_image, _ = label(convection, np.ones((3, 3), dtype=int)) remove_small_objects(labeled_image, min_size=10, connectivity=2, in_place=True) regions = regionprops(labeled_image, intensity_image=rImage) for region in regions: if np.max(region.intensity_image) < 10: ymin, xmin = np.min(region.coords[:, 0]), np.min(region.coords[:, 1]) y, x = np.where(region.intensity_image > 0) labeled_image[ymin + y, xmin + x] = 0 thresholded_image = 1 * binary_closing( labeled_image > 0, structure=disk(3), iterations=int(conv_buffer)) labeled_image, _ = label(thresholded_image, np.ones((3, 3))) regions = regionprops(labeled_image, intensity_image=rImage) for region in regions: if region.major_axis_length < min_length: ymin, xmin = np.min(region.coords[:, 0]), np.min(region.coords[:, 1]) y, x = np.where(region.intensity_image > 0) labeled_image[ymin + y, xmin + x] = 0 return labeled_image
def detect(self, frame, **kwargs): if not self.isAlone: self._stop_other_pupil_detectors() self.isAlone = True result = {} ellipse = {} eye_id = self.g_pool.eye_id result["id"] = eye_id result["topic"] = f"pupil.{eye_id}.{self.identifier}" ellipse["center"] = (0.0, 0.0) ellipse["axes"] = (0.0, 0.0) ellipse["angle"] = 0.0 result["ellipse"] = ellipse result["diameter"] = 0.0 result["location"] = ellipse["center"] result["confidence"] = 0.0 result["timestamp"] = frame.timestamp result["method"] = self.method result["norm_pos"] = [0.0, 0.0] #[np.nan,np.nan] img = frame.gray debugOutputWindowName = None if self.g_pool.ellseg_reverse: img = np.flip(img, axis=0) if self.g_pool.ellseg_debug: cv2.imshow('EYE' + str(eye_id) + ' INPUT', img) debugOutputWindowName = 'EYE' + str(eye_id) + ' OUTPUT' else: cv2.destroyWindow('EYE' + str(eye_id) + ' INPUT') customEllipse = self.g_pool.ellseg_customellipse values = self.detector_ritnet_2d.detect(img) if not values: return result # Ellseg results are obtained - begin obtaining or returning final ellipse seg_map = values[0] origSeg_map = np.copy(seg_map) ellseg_pupil_ellipse = values[1] #iris_ellipse = values[2] seg_entropy = values[3] if self.g_pool.ellseg_reverse: seg_map = np.flip(seg_map, axis=0) seg_entropy = np.flip(seg_entropy, axis=0) # Change format of ellseg ellipse to meet PL conventions height, width = seg_map.shape ellseg_pupil_ellipse[1] = (-ellseg_pupil_ellipse[1] + (2 * height / 2)) ellseg_pupil_ellipse[4] = ellseg_pupil_ellipse[4] * -1 # initialize entropy mask seg_entropy_mask = np.divide(seg_entropy, np.log2(CHANNELS)) pupil_entropy_mask = seg_entropy_mask pupil_entropy_mask[seg_map != 2] = 0 origSeg_map = np.copy(seg_map) # OPTION 1: If custom ellipse setting is NOT toggled on if not customEllipse: ## Prepare pupil mask for pupil labs ellipse fit # background, iris, pupil seg_map[np.where(seg_map == 0)] = 255 seg_map[np.where(seg_map == 1)] = 128 seg_map[np.where(seg_map == 2)] = 0 seg_map = np.array(seg_map, dtype=np.uint8) framedup = lambda: None setattr(framedup, 'gray', seg_map) setattr(framedup, 'bgr', frame.bgr) setattr(framedup, 'width', frame.width) setattr(framedup, 'height', frame.height) setattr(framedup, 'timestamp', frame.timestamp) ## Apply pupil labs ellipse fit to mask final_result = super().detect(framedup) if self.g_pool.ellseg_debug: final_result_ellipse = final_result["ellipse"] elcenter = final_result_ellipse["center"] elaxes = final_result_ellipse["axes"] # axis diameters seg_map_debug = np.stack((np.copy(seg_map), ) * 3, axis=-1) cv2.ellipse( seg_map_debug, (round(elcenter[0]), round(elcenter[1])), (round(elaxes[0] / 2), round( elaxes[1] / 2)), # convert diameters to radii final_result_ellipse["angle"], 0, 360, (255, 0, 0), 1) cv2.imshow(debugOutputWindowName, seg_map_debug) pl_pupil_ellipse = [ final_result["ellipse"]["center"][0], final_result["ellipse"]["center"][1], final_result["ellipse"]["axes"][0] / 2.0, final_result["ellipse"]["axes"][1] / 2.0, final_result["ellipse"]["angle"] ] if self.g_pool.calcCustomConfidence: # origSeg_map[np.where(origSeg_map == 0)] = 0 # origSeg_map[np.where(origSeg_map == 1)] = 0 # origSeg_map[np.where(origSeg_map == 2)] = 255 # origSeg_map = np.array(origSeg_map, dtype=np.uint8) seg_map[np.where(seg_map == 0)] = 254 seg_map[np.where(seg_map == 255)] = 0 seg_map[np.where(seg_map == 128)] = 0 seg_map = np.array(seg_map, dtype=np.uint8) if self.g_pool.save_masks: final_result['confidence'] = self.calcConfidence( pl_pupil_ellipse, seg_map, debug_confidence_timestamp=frame.timestamp) else: final_result['confidence'] = self.calcConfidence( pl_pupil_ellipse, seg_map, debug_confidence_timestamp=None) if np.isnan(final_result['confidence']): final_result['confidence'] = 0.0 elif self.g_pool.entropy_confidence: self.ellipse_true_support_min_dist = 5 # be a LOT more strict since we're working with tight edges # Modify confidence based on entropy SIMPLE_CONF = False if SIMPLE_CONF: test_conf = (-np.tan(np.mean(pupil_entropy_mask)) / np.tan(1)) + 1 final_result['confidence'] else: #final_result['confidence'] = test_conf #print(test_conf) thresh = np.max(pupil_entropy_mask) pupil_entropy_mask = (pupil_entropy_mask - np.min(pupil_entropy_mask)) / ( np.max(pupil_entropy_mask) - np.min(pupil_entropy_mask)) #hist, bins = np.histogram(pupil_entropy_mask[pupil_entropy_mask > 0].flatten(), np.linspace(0,1,20)) #print("---------------") #print(hist) #print(bins) #print("---------------") thresh = np.mean( pupil_entropy_mask[pupil_entropy_mask > 0]) #print("THRESH: ",thresh) #print("ZEROS: ",len(pupil_entropy_mask[pupil_entropy_mask == 0])) entropy_edges = pupil_entropy_mask entropy_edges[pupil_entropy_mask >= thresh] = 1 entropy_edges[pupil_entropy_mask < thresh] = 0 entropy_edges = np.uint8(entropy_edges) entropy_edges_temp = entropy_edges entropy_edges = binary_closing(entropy_edges, structure=np.ones( (10, 10))) entropy_edges_temp = np.uint8( np.logical_xor(entropy_edges, entropy_edges_temp)) entropy_edges_temp[entropy_edges_temp != 0] = 255 cv2.imshow('EYE' + str(eye_id) + ' ENTROPY DIFF', entropy_edges_temp) entropy_edges = np.uint8(entropy_edges) entropy_edges[entropy_edges != 0] = 255 #entropy_edges = np.uint8(np.round(np.power(pupil_entropy_mask, 1/3.5))*255) font = cv2.FONT_HERSHEY_SIMPLEX orgPP = (10, 15) orgPPDiff = (10, 35) orgIouDiff = (10, 55) fontScale = 0.5 color = 255 thickness = 2 if self.g_pool.save_masks: final_edges = np.flip(np.transpose( np.nonzero(entropy_edges)), axis=1) final_result['confidence'] = self.calcConfidence( pl_pupil_ellipse, seg_map, debug_confidence_timestamp=frame.timestamp, final_edges=final_edges) if len( final_edges) else 0.0 entropy_edges = cv2.putText( entropy_edges, "CONF: " + "{:.4f}".format(final_result['confidence']), orgPP, font, fontScale, color, thickness, cv2.LINE_AA) cv2.imshow( 'EYE' + str(eye_id) + ' ENTROPY', entropy_edges ) # This "edge detector" is elliptical in all good frames and not elliptical in all bad frames else: final_edges = np.flip(np.transpose( np.nonzero(entropy_edges)), axis=1) final_result['confidence'] = self.calcConfidence( pl_pupil_ellipse, seg_map, debug_confidence_timestamp=None, final_edges=final_edges) if len( final_edges) else 0.0 entropy_edges = cv2.putText( entropy_edges, "CONF: " + "{:.4f}".format(final_result['confidence']), orgPP, font, fontScale, color, thickness, cv2.LINE_AA) cv2.imshow( 'EYE' + str(eye_id) + ' ENTROPY', entropy_edges ) # This "edge detector" is elliptical in all good frames and not elliptical in all bad frames conf_rounded = int( math.ceil(final_result['confidence'] * 100 / 10.0)) * 10 / 100 print(conf_rounded) fname = "{}.png".format(frame.timestamp) imOutDir = os.path.join( self.g_pool.capture. source_path[0:self.g_pool.capture.source_path. rindex("\\") + 1], "eye" + str(self.g_pool.eye_id) + "_entropy/{:0.2f}".format(conf_rounded)) os.makedirs(imOutDir, exist_ok=True) final_result_ellipse = final_result["ellipse"] elcenter = ( final_result_ellipse["center"][0], frame.height - final_result_ellipse["center"][1] ) if self.g_pool.ellseg_reverse else final_result_ellipse[ "center"] elaxes = final_result_ellipse["axes"] # axis diameters elangle = 180 - final_result_ellipse[ "angle"] if self.g_pool.ellseg_reverse else final_result_ellipse[ "angle"] img_with_ellipse = np.stack((np.copy(img), ) * 3, axis=-1) cv2.ellipse( img_with_ellipse, (round(elcenter[0]), round(elcenter[1])), (round(elaxes[0] / 2), round( elaxes[1] / 2)), # convert diameters to radii final_result_ellipse["angle"], 0, 360, (255, 0, 0), 1) final_entropy_out = cv2.hconcat([ img_with_ellipse, np.stack((np.copy(entropy_edges), ) * 3, axis=-1) ]) cv2.imwrite('{}/{}'.format(imOutDir, fname), final_entropy_out) if self.g_pool.save_masks: fname = "eye-{}_{:0.3f}_{}.png".format( eye_id, final_result['confidence'], frame.timestamp) self.saveMaskAsImage(img, seg_map, pl_pupil_ellipse, fileName=fname, flipImage=self.g_pool.ellseg_reverse) if final_result['diameter'] < self.g_pool.ellseg_pupil_size_min: # write out image imOutDir = os.path.join( self.g_pool.capture.source_path[0:self.g_pool.capture. source_path.rindex("\\") + 1], "eye" + str(self.g_pool.eye_id) + "_eliminated_frame") os.makedirs(imOutDir, exist_ok=True) im = np.zeros((frame.height, frame.width, 3)) im[:, :, 0] = img im[:, :, 1] = img im[:, :, 2] = img final_result_ellipse = final_result["ellipse"] elcenter = final_result_ellipse["center"] elaxes = final_result_ellipse["axes"] # axis diameters cv2.ellipse( im, (round(elcenter[0]), round(elcenter[1])), (round(elaxes[0] / 2), round( elaxes[1] / 2)), # convert diameters to radii final_result_ellipse["angle"], 0, 360, (255, 0, 0), 1) fileName = "eye-{}_{:0.3f}_{}.png".format( self.g_pool.eye_id, final_result['confidence'], frame.timestamp) cv2.imwrite("{}/{}".format(imOutDir, fileName), im) # end write out image final_result["ellipse"] = { "center": (0.0, 0.0), "axes": (0.0, 0.0), "angle": 0.0 } final_result["diameter"] = 0.0 final_result["location"] = (0.0, 0.0) final_result['confidence'] = 0.0 return final_result elif customEllipse: # OPTION 2: If custom ellipse setting is toggled on ######################################### ### Ellipse data transformations # background, iris, pupil seg_map[np.where(seg_map == 0)] = 0 seg_map[np.where(seg_map == 1)] = 0 seg_map[np.where(seg_map == 2)] = 255 seg_map = np.array(seg_map, dtype=np.uint8) openCVformatPupil = np.copy(ellseg_pupil_ellipse) if (ellseg_pupil_ellipse[4]) > np.pi / 2.0: ellseg_pupil_ellipse[4] = ellseg_pupil_ellipse[4] - np.pi / 2.0 if (ellseg_pupil_ellipse[4]) < -np.pi / 2.0: ellseg_pupil_ellipse[4] = ellseg_pupil_ellipse[4] + np.pi / 2.0 ellseg_pupil_ellipse[4] = np.rad2deg(ellseg_pupil_ellipse[4]) ######################################### if self.g_pool.ellseg_debug: seg_map_debug = np.stack((np.copy(seg_map), ) * 3, axis=-1) cv2.ellipse(seg_map_debug, (round( ellseg_pupil_ellipse[0]), round(ellseg_pupil_ellipse[1])), (round(ellseg_pupil_ellipse[2]), round(ellseg_pupil_ellipse[3])), ellseg_pupil_ellipse[4], 0, 360, (255, 0, 0), 1) cv2.imshow(debugOutputWindowName, seg_map_debug) confidence = self.calcConfidence(ellseg_pupil_ellipse, seg_map) if self.g_pool.save_masks == True: fname = "eye-{}_{:0.3f}.png".format(eye_id, confidence) self.saveMaskAsImage(img, seg_map, openCVformatPupil, fname, eye_id) eye_id = self.g_pool.eye_id result["id"] = eye_id result["topic"] = f"pupil.{eye_id}.{self.identifier}" ellipse["center"] = (ellseg_pupil_ellipse[0], ellseg_pupil_ellipse[1]) ellipse["axes"] = (ellseg_pupil_ellipse[2] * 2, ellseg_pupil_ellipse[3] * 2) ellipse["angle"] = ellseg_pupil_ellipse[4] result["ellipse"] = ellipse result["diameter"] = ellseg_pupil_ellipse[2] * 2 result["location"] = ellipse["center"] result["confidence"] = confidence result["timestamp"] = frame.timestamp #logger.debug(result) location = result["location"] norm_pos = normalize(location, (frame.width, frame.height), flip_y=True) result["norm_pos"] = norm_pos try: self.g_pool.ellSegDetector[str(self.g_pool.eye_id)] = result except: self.g_pool.ellSegDetector = {str(self.g_pool.eye_id): result} if result['diameter'] < self.g_pool.ellseg_pupil_size_min: # write out image imOutDir = os.path.join( self.g_pool.capture.source_path[0:self.g_pool.capture. source_path.rindex("\\") + 1], "eye" + str(self.g_pool.eye_id) + "_eliminated_frame") os.makedirs(imOutDir, exist_ok=True) im = np.zeros((frame.height, frame.width, 3)) im[:, :, 0] = img im[:, :, 1] = img im[:, :, 2] = img final_result_ellipse = result["ellipse"] elcenter = final_result_ellipse["center"] elaxes = final_result_ellipse["axes"] # axis diameters cv2.ellipse( im, (round(elcenter[0]), round(elcenter[1])), (round(elaxes[0] / 2), round( elaxes[1] / 2)), # convert diameters to radii final_result_ellipse["angle"], 0, 360, (255, 0, 0), 1) fileName = "eye-{}_{:0.3f}_{}.png".format( self.g_pool.eye_id, result['confidence'], frame.timestamp) cv2.imwrite("{}/{}".format(imOutDir, fileName), im) # end write out image result["ellipse"] = { "center": (0.0, 0.0), "axes": (0.0, 0.0), "angle": 0.0 } result["diameter"] = 0.0 result["location"] = (0.0, 0.0) result['confidence'] = 0.0 return result
def preprocess_data(data): data = data.astype(np.int) data = ndi.binary_closing(data, iterations=1).astype(np.int) data = np.asarray(ndi.binary_fill_holes(data), dtype='uint8') return data