def segment3DLungs(image_path): # Lungs Parameters params = {} # Parameters for intensity (fixed) params['lungMinValue'] = 0 params['lungMaxValue'] = 800 params['lungThreshold'] = -900 # Parameters for lung segmentation (fixed) params['xRangeRatio1'] = 0.4 params['xRangeRatio2'] = 0.75 params['zRangeRatio1'] = 0.5 params['zRangeRatio2'] = 0.75 # Load image Img = nib.load(image_path) I = Img.get_data() # Intensity thresholding & Morphological operations M = np.zeros(I.shape) M[I > params['lungMinValue']] = 1 M[I > params['lungMaxValue']] = 0 struct_s = ndimage.generate_binary_structure(3, 1) struct_m = ndimage.iterate_structure(struct_s, 2) struct_l = ndimage.iterate_structure(struct_s, 3) M = ndimage.binary_closing(M, structure=struct_s, iterations=1) M = ndimage.binary_opening(M, structure=struct_m, iterations=1) # Estimate lung filed of view [m, n, p] = I.shape medx = int(m / 2) medy = int(n / 2) xrange1 = int(m / 2 * params['xRangeRatio1']) xrange2 = int(m / 2 * params['xRangeRatio2']) zrange1 = int(p * params['zRangeRatio1']) zrange2 = int(p * params['zRangeRatio2']) # Select largest connected components M = measure.label(M) label1 = M[medx - xrange2:medx - xrange1, medy, zrange1:zrange2] label2 = M[medx + xrange1:medx + xrange2, medy, zrange1:zrange2] label1 = stats.mode(label1[label1 > 0])[0][0] label2 = stats.mode(label2[label2 > 0])[0][0] M[M == label1] = -1 M[M == label2] = -1 M[M > 0] = 0 M = M * -1 SegImage = nib.Nifti1Image(M, Img.affine, Img.header) return SegImage
def compute_mask(aparc, labels=[0, 5000]): import nibabel as nb import numpy as np import os.path as op import scipy.ndimage as nd segnii = nb.load(aparc) seg = segnii.get_data() mask = np.ones_like(seg, dtype=np.uint8) for l in labels: mask[seg == l] = 0 struct = nd.iterate_structure( nd.generate_binary_structure(3, 1), 4) mask = nd.binary_dilation(mask, structure=struct).astype(np.uint8) mask = nd.binary_closing(mask, structure=struct) mask = nd.binary_fill_holes(mask, structure=struct).astype(np.uint8) mask[mask > 0] = 1 mask[mask <= 0] = 0 hdr = segnii.get_header().copy() hdr.set_data_dtype(np.uint8) hdr.set_xyzt_units('mm', 'sec') out_file = op.abspath('nobstem_mask.nii.gz') nii = nb.Nifti1Image(mask, segnii.get_affine(), hdr).to_filename( out_file) return out_file
def watershed(self, mask, sigma=0.5, watershed_line=True): """ Run watershed segmentation to generate segment label mask. Args: mask (np.ndarray[bool]) - binary foreground mask sigma (float) - parameter for smoothing distance mask watershed_line (bool) - if True, include 1px line between contours """ # define distances distances = distance_transform_edt(mask) distances = gaussian_filter(distances, sigma=sigma) # run segmentation connectivity = iterate_structure(generate_binary_structure(2, 1), 1) markers = self.get_segment_mask(distances, self.seeds) self.labels = watershed(-distances, markers=markers, mask=mask, connectivity=connectivity, watershed_line=watershed_line)
def compute_mask(aparc, labels=[0, 5000]): import nibabel as nb import numpy as np import os.path as op import scipy.ndimage as nd segnii = nb.load(aparc) seg = segnii.get_data() mask = np.ones_like(seg, dtype=np.uint8) for l in labels: mask[seg == l] = 0 struct = nd.iterate_structure(nd.generate_binary_structure(3, 1), 4) mask = nd.binary_dilation(mask, structure=struct).astype(np.uint8) mask = nd.binary_closing(mask, structure=struct) mask = nd.binary_fill_holes(mask, structure=struct).astype(np.uint8) mask[mask > 0] = 1 mask[mask <= 0] = 0 hdr = segnii.get_header().copy() hdr.set_data_dtype(np.uint8) hdr.set_xyzt_units("mm", "sec") out_file = op.abspath("nobstem_mask.nii.gz") nii = nb.Nifti1Image(mask, segnii.get_affine(), hdr).to_filename(out_file) return out_file
def get_peaks(img): """ create a binary structure to specify shape. use this shape to filter peaks from the spectrogram image passed as param. erode the background and apply xor operation to get final image(a 2d array) containing only peaks. find the amplitude at the found peaks. extract time and frequency location from the original spectrogram using these found peak. filter through the peaks. i used only the peaks whose amps are more than the avg amp. zip all these into a list and return for hashing :param img: :return: list of tuples containing time and frequency location in the structure [(time,freq)...] """ struct = generate_binary_structure(2, 2) neighbor = iterate_structure(structure=struct, iterations=20) avg_peak = np.mean(img) maximas = maximum_filter(input=img, footprint=neighbor) == img background = img == 0 eroded_back = binary_erosion(input=background, structure=neighbor, border_value=1) peaks = maximas ^ eroded_back # this will return a numpy array containing boolean values that are true at the peaks amps = img[peaks] t, f = np.where(peaks) zipped_peaks = list(zip(t, f, amps)) # filtering through our peaks time_loc = [zips[0] for zips in zipped_peaks if zips[2] >= avg_peak] freq_loc = [zips[1] for zips in zipped_peaks if zips[2] >= avg_peak] return list(zip(time_loc, freq_loc))
def seperate_skull(image): marker_internal, marker_external, marker_watershed = generate_markers( image) sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) watershed = morphology.watershed(sobel_gradient, marker_watershed) outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) outline += ndimage.black_tophat(outline, structure=blackhat_struct) lungfilter = np.bitwise_or(marker_internal, outline) lungfilter = ndimage.morphology.binary_closing( lungfilter, structure=np.ones((5, 5)), iterations=3) segmented = np.where(lungfilter == 1, image, -2000 * np.ones((512, 512))) return segmented
def granulometry(BW, T=35, filename="simu"): # total original area A = ndimage.measurements.sum(BW) # number of objects label, N = ndimage.measurements.label(BW) area = np.zeros((T, ), dtype=np.float) number = np.zeros((T, ), dtype=np.float) """ Warning: the structuring elements must verify B(n) = B(n-1) o B(1). """ se = ndimage.generate_binary_structure(2, 1) for i in np.arange(T): SE = ndimage.iterate_structure(se, i - 1) m = ndimage.morphology.binary_erosion(BW, structure=SE) G = ndimage.morphology.binary_propagation(m, mask=BW) area[i] = 100 * ndimage.measurements.sum(G) / A label, n = ndimage.measurements.label(G) number[i] = 100 * n / N # beware of integer division plt.figure() plt.plot(area, label='Area') plt.plot(number, label='Number') plt.legend() plt.savefig("granulo_" + filename + "1.pdf", bbox_inches='tight') plt.show() plt.figure() plt.plot(-np.diff(area), label='Area derivative') plt.plot(-np.diff(number), label='Number derivative') plt.legend() plt.savefig("granulo_" + filename + "2.pdf", bbox_inches='tight') plt.show()
def build_mask_sphere(self, sphere_radius): """build_mask_sphere """ sphere_vertex_number_1d = np.ceil(2.0 * sphere_radius / self.mesh_voxel_size).astype('int') sphere_element = ndimage.generate_binary_structure(3,1) sphere = ndimage.iterate_structure(sphere_element, np.ceil(sphere_vertex_number_1d / 3).astype('int')) return sphere
def find_local_max(img, d_rad, threshold=1e-15): """ This is effectively a replacement for pkfnd in the matlab/IDL code. The output of this function is meant to be feed into :py:func:`~subpixel_centroid` The magic of numpy means this should work for any dimension data. :param img: an ndarray representing the data to find the local maxes :param d_rad: the radius of the dilation, the smallest possible spacing between local maximum :param threshold: optional, voxels < threshold are ignored. :rtype: (d,N) array of the local maximums. """ d_rad = int(d_rad) img = np.array(np.squeeze(img)) # knock out singleton dimensions img[img < threshold] = -np.inf # mask out pixels below threshold dim = img.ndim # get the dimension of data # make structuring element s = ndimage.generate_binary_structure(dim, 1) # scale it up to the desired size d_struct = ndimage.iterate_structure(s, int(d_rad)) dilated_img = ndimage.grey_dilation(img, footprint=d_struct, cval=0, mode='constant') # do the dilation # find the locations that are the local maximum # TODO clean this up local_max = np.where(np.exp(img - dilated_img) > (1 - 1e-15)) # the extra [::-1] is because matplotlib and ndimage disagree an xy vs yx return np.vstack(local_max[::-1])
def subpixel_centroid(img, local_maxes, mask_rad, struct_shape='circle'): ''' This is effectively a replacement for cntrd in the matlab/IDL code. Works for 2D data only. Accelerated by numba. :param img: the data :param local_maxes: a (d,N) array with the location of the local maximums (as generated by :py:func:`~find_local_max`) :param mask_rad: the radius of the mask used for the averaging. :param struct_shape: ['circle' | 'diamond'] Shape of mask over each particle. :rtype: (d,N) array of positions, (d,) array of masses, (d,) array of r2, ''' # First, check that all local maxes are within 'mask_rad' of the image # edges. Otherwise we will be going outside the bounds of the array in # _refine_centroids_loop() if not all(_local_max_within_bounds(img.shape, local_maxes, mask_rad)): raise IndexError( 'One or more local maxes are too close to the image edge. Use local_max_crop().' ) # Make coordinate order compatible with upcoming code local_maxes = local_maxes[::-1] # do some data checking/munging img = np.squeeze(img) # knock out singleton dimensions dim = img.ndim if dim > 2: raise ValueError('Use subpixel_centroid_nd() for dimension > 2') so = [slice(-mask_rad, mask_rad + 1)] * dim # Make circular structuring element if struct_shape == 'circle': d_struct = (np.sum(np.mgrid[so]**2, 0) <= mask_rad**2).astype(np.int8) elif struct_shape == 'diamond': s = ndimage.generate_binary_structure(dim, 1) # scale it up to the desired size d_struct = ndimage.iterate_structure(s, int(mask_rad)) else: raise ValueError('Shape must be diamond or circle') offset_masks = np.array([d_struct * os for os in np.mgrid[so]]).astype(np.int8) r2_mask = np.zeros(d_struct.shape) for o in offset_masks: r2_mask += o**2 r2_mask = np.sqrt(r2_mask).astype(float) results = _refine_centroids_loop(img, local_maxes, mask_rad, offset_masks, d_struct, r2_mask) pos = (results[0:2, :] + local_maxes)[::-1, :] #m = results[2,:] #r2 = results[3,:] #return pos, m, r2 peaks = np.array(3, pos.shape[1]) peaks[0:2, :] = pos peaks[2, :] = img[peaks[0, :], peaks[1, :]] #for i in range(0,pos.shape[1]): # x = pos[0][i] #y = pos[1][i] # try: peaks.append([x, y, img[y,x]]) #except: pass return peaks
def draw_spheres(self,mid_mask,thres): all_mask = mid_mask * (self.img > thres) struct = ndimage.generate_binary_structure(3, 2) struct_iter = ndimage.iterate_structure(struct,self.s_iter).astype(int) spheres = ndimage.binary_dilation(all_mask, structure = struct_iter, iterations=self.d_iter).astype(all_mask[:,:,:].dtype) struct_size = np.sum(struct_iter) return spheres, all_mask, struct_size
def seperate_lungs(self, image): segemented_array = np.zeros(image.shape) marker_internal, marker_external, marker_watershed = self.generate_marker( image) # value of gradient (slope) in X and Y-direction sobel_filtered_dx = ndimage.sobel( image, 1) # vertical derivate ( detects horizontal edges) sobel_filtered_dy = ndimage.sobel( image, 0) # horizontal derivate (detects vertical edges) sobel_gradient = np.hypot( sobel_filtered_dx, sobel_filtered_dy ) # magnitude of gradient, gets rids of a (-)ve sign sobel_gradient *= 255.0 / np.max( sobel_gradient ) # normalize (This is our landscape image and we will fit it with water) watershed = morphology.watershed(sobel_gradient, marker_watershed) outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) # Creation of the disk-kernel blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 7) outline += ndimage.black_tophat(outline, structure=blackhat_struct) lungfilter = np.bitwise_or(marker_internal, outline) lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones( (5, 5)), iterations=3) segmented = np.where(lungfilter == 1, image, -2000 * np.ones( (512, 512))) segemented_array = segmented return segmented
def _findPeaks(spectrogram, min_amplitude=DEFAULT_MIN_AMPLITUDE): # Находим локальные максимумы, используя binary_erosion neighbourhood_structure = generate_binary_structure(2, 1) neighborhood = iterate_structure(neighbourhood_structure, Extractor.PEAK_NEIGHBORHOOD_SIZE) local_max = maximum_filter(spectrogram, footprint=neighborhood) == spectrogram background = (spectrogram == 0) eroded_background = binary_erosion(background, structure=neighborhood, border_value=1) detected_peaks = local_max - eroded_background # Формируем массив пик peak_amplitudes = spectrogram[detected_peaks].flatten() peak_frequencies, peak_times = np.where(detected_peaks) raw_peaks = [] for i in range( 0, min(peak_amplitudes.size, peak_frequencies.size, peak_times.size)): raw_peaks.append( SpectogramPeak(peak_frequencies[i], peak_times[i], peak_amplitudes[i])) # Выбираем только пики с нормальной для нас амплитудой peaks = [peak for peak in raw_peaks if peak.amplitude > min_amplitude] return peaks
def detect_peaks(self, spectrogram): """ Takes an image of the spectrogram and detect the peaks using the local maximum filter. Input: spectrogram: a matrix of time-frequency strengths from matplotlibs specgram method. peak_sensitivity: How large of a neighborhood structure to consider when looking for peaks Returns a boolean mask of the peaks (i.e. 1 when the pixel's value is the neighborhood maximum, 0 otherwise) """ # define a connected neighborhood and find max values in neighborhood neighborhood_structure = generate_binary_structure(2, 1) neighborhood = ndi.iterate_structure(neighborhood_structure, self.peak_sensitivity) local_max = ndi.filters.maximum_filter( spectrogram, footprint=neighborhood) == spectrogram background = (spectrogram == 0) eroded_background = binary_erosion(background, structure=neighborhood, border_value=1) detected_peaks = local_max != eroded_background if self.min_peak_amplitude: filtered_peak_locations = self.filter_peaks_by_size( detected_peaks, spectrogram) else: filtered_peak_locations = detected_peaks return filtered_peak_locations
def _process_image(filename, out_format, resize=None, dilate=None, require_binary_output=False): """Process a single image file. Args: filename: string, path to an image file e.g., '/path/to/example.JPG'. out_format: string, output format type e.g., 'PNG', 'JPEG' Returns: image_buffer: string, encoding of image in out_format height: integer, image height in pixels. width: integer, image width in pixels. """ # Read the image file. with tf.gfile.FastGFile(filename, 'rb') as f: raw_image_data = f.read() # Convert any format to PNG for consistency. #pil_img = Image.open(StringIO(raw_image_data)) pil_img = Image.open(BytesIO(raw_image_data)) # dilate image if requested so - create structering element of appropriate size if dilate is not None: dilation_se = iterate_structure(generate_binary_structure(2, 1), (int)((dilate - 1) / 2)) im = binary_dilation(np.array(pil_img), structure=dilation_se) pil_img = Image.fromarray(np.uint8(im) * 255) if resize is not None: pil_img = pil_img.resize( resize[::-1] ) # NOTE: use reversed order of resize to make input consistent with tensorflow # if output should be in binary then we must do binarization to remove interpolation values from resize if require_binary_output: im = (np.array(pil_img) > 0) pil_img = Image.fromarray(np.uint8(im) * 255) try: #image_data = StringIO() image_data = BytesIO() pil_img.save(image_data, out_format) except Exception as e: print("exception inside _process_image:", e) height = pil_img.size[1] width = pil_img.size[0] if pil_img.mode in ['RGBA', 'CMYK']: num_chanels = 4 elif pil_img.mode in ['RGB', 'LAB', 'HSV', 'YCbCr']: num_chanels = 3 else: num_chanels = 1 return image_data.getvalue(), height, width, num_chanels
def find_local_max(img, d_rad, threshold=1e-15, inplace=False): """ This is effectively a replacement for pkfnd in the matlab/IDL code. The output of this function is meant to be feed into :py:func:`~subpixel_centroid` The magic of numpy means this should work for any dimension data. :param img: an ndarray representing the data to find the local maxes :param d_rad: the radius of the dilation, the smallest possible spacing between local maximum :param threshold: optional, voxels < threshold are ignored. :param inplace: If True, `img` is modified. :rtype: (d,N) array of the local maximums. """ d_rad = int(d_rad) # knock out singleton dimensions, # and prepare to change values in thresholding step. img = np.array(np.squeeze(img)) if not inplace: img = img.copy() # Otherwise we could mess up use of 'img' by subsequent code. img[img < threshold] = -np.inf # mask out pixels below threshold dim = img.ndim # get the dimension of data # make structuring element s = ndimage.generate_binary_structure(dim, 1) # scale it up to the desired size d_struct = ndimage.iterate_structure(s, int(d_rad)) dilated_img = ndimage.grey_dilation(img, footprint=d_struct, cval=0, mode='constant') # do the dilation # find the locations that are the local maximum # TODO clean this up maxima = np.vstack(np.where(np.exp(img - dilated_img) > (1 - 1e-15))).T count=0 while True: duplicates = KDTree(maxima, 30).query_pairs(d_rad) if len(duplicates) == 0: break count += len(duplicates) to_drop = [] for pair in duplicates: # Take the average position. # This is just a starting point, so we won't go into subpx precision here. merged = maxima[pair[0]] merged = maxima[[pair[0], pair[1]]].mean(0).astype(int) maxima[pair[0]] = merged # overwrite one to_drop.append(pair[1]) # queue other to be dropped maxima = np.delete(maxima, to_drop, 0) # the extra [::-1] is because matplotlib and ndimage disagree an xy vs yx. # Finally, there should be nothing within 'd_rad' of the edges of the image print '%i peaks were removed' %count return np.vstack(maxima).T[::-1]
def get_filtered_lung(image): # Creation of the internal Marker marker_internal = image < -500 marker_internal = segmentation.clear_border(marker_internal) marker_internal_labels = measure.label(marker_internal) areas = [r.area for r in measure.regionprops(marker_internal_labels)] areas.sort() if len(areas) > 2: for region in measure.regionprops(marker_internal_labels): if region.area < areas[-2]: for coordinates in region.coords: marker_internal_labels[coordinates[0], coordinates[1]] = 0 marker_internal = marker_internal_labels > 0 # Creation of the external Marker external_a = ndimage.binary_dilation(marker_internal, iterations=10) external_b = ndimage.binary_dilation(marker_internal, iterations=55) marker_external = external_b ^ external_a # Creation of the Watershed Marker matrix marker_watershed = np.zeros((512, 512), dtype=np.int) marker_watershed += marker_internal * 255 marker_watershed += marker_external * 128 # Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) # Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) # Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) # Performing Black-Tophat Morphology for reinclusion # Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) # Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) # Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) # Close holes in the lungfilter # fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5, 5)), iterations=3) return lungfilter
def local_maxima(image, radius, separation, threshold): ndim = image.ndim threshold -= 1 # The intersection of the image with its dilation gives local maxima. if not np.issubdtype(image.dtype, np.integer): raise TypeError("Perform dilation on exact (i.e., integer) data.") #footprint = self.binary_mask(radius, ndim) s = ndimage.generate_binary_structure(ndim, 2) # scale it up to the desired size footprint = ndimage.iterate_structure(s, int(radius)) dilation = ndimage.grey_dilation(image, footprint=footprint, mode='constant') maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T[:, ::-1] if not np.size(maxima) > 0: #warnings.warn("Image contains no local maxima.", UserWarning) return np.empty((0, ndim)) # Flat peaks return multiple nearby maxima. Eliminate duplicates. if len(maxima) > 0: while True: duplicates = cKDTree(maxima, 30).query_pairs(separation) if len(duplicates) == 0: break to_drop = [] for pair in duplicates: # Take the average position. # This is just a starting point, so we won't go into subpx precision here. merged = maxima[pair[0]] merged = maxima[[pair[0], pair[1]]].mean(0).astype(int) maxima[pair[0]] = merged # overwrite one to_drop.append(pair[1]) # queue other to be dropped maxima = np.delete(maxima, to_drop, 0) # Do not accept peaks near the edges. shape = np.array(image.shape) margin = int(separation) // 2 near_edge = np.any((maxima < margin) | (maxima > (shape - margin)), 1) maxima = maxima[~near_edge] #if not np.size(maxima) > 0: #warnings.warn("All local maxima were in the margins.", UserWarning) #x, y = maxima[:,0], maxima[:,1] #max_val = image[x,y].reshape(1,len(maxima)) #peaks = np.concatenate((maxima,max_val), axis = 1) return maxima[:, 0], maxima[:, 1], image[maxima[:, 0], maxima[:, 1]].reshape(1, len(maxima))
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1): struct = nd.generate_binary_structure(boundary.ndim, connectivity) gtd = nd.binary_dilation(boundary, struct, margin) struct_m = nd.iterate_structure(struct, margin) pred_dil = nd.grey_dilation(pred, footprint=struct_m) missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil)) for m in missing: pred_dil.ravel()[np.flatnonzero(pred == m)[0]] = m prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel()) _, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel()) return zip(ts, prec, rec)
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1): struct = nd.generate_binary_structure(boundary.ndim, connectivity) gtd = nd.binary_dilation(boundary, struct, margin) struct_m = nd.iterate_structure(struct, margin) pred_dil = nd.grey_dilation(pred, footprint=struct_m) missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil)) for m in missing: pred_dil.ravel()[np.flatnonzero(pred==m)[0]] = m prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel()) _, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel()) return zip(ts, prec, rec)
def local_maxima(image, radius, separation, percentile=64): """Find local maxima whose brightness is above a given percentile.""" ndim = image.ndim # Compute a threshold based on percentile. not_black = image[np.nonzero(image)] if len(not_black) == 0: warnings.warn("Image is completely black.", UserWarning) return np.empty((0, ndim)) threshold = stats.scoreatpercentile(not_black, percentile) # The intersection of the image with its dilation gives local maxima. if not np.issubdtype(image.dtype, np.integer): raise TypeError("Perform dilation on exact (i.e., integer) data.") #footprint = binary_mask(radius, ndim, separation) s = ndimage.generate_binary_structure(ndim, 2) # scale it up to the desired size footprint = ndimage.iterate_structure(s, int(d_rad)) dilation = ndimage.grey_dilation(image, footprint=footprint, mode='constant') maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T if not np.size(maxima) > 0: warnings.warn("Image contains no local maxima.", UserWarning) return np.empty((0, ndim)) # Flat peaks return multiple nearby maxima. Eliminate duplicates. while True: duplicates = cKDTree(maxima, 30).query_pairs(separation) if len(duplicates) == 0: break to_drop = [] for pair in duplicates: # Take the average position. # This is just a starting point, so we won't go into subpx precision here. merged = maxima[pair[0]] merged = maxima[[pair[0], pair[1]]].mean(0).astype(int) maxima[pair[0]] = merged # overwrite one to_drop.append(pair[1]) # queue other to be dropped maxima = np.delete(maxima, to_drop, 0) # Do not accept peaks near the edges. shape = np.array(image.shape) margin = int(separation) // 2 near_edge = np.any((maxima < margin) | (maxima > (shape - margin)), 1) maxima = maxima[~near_edge] if not np.size(maxima) > 0: warnings.warn("All local maxima were in the margins.", UserWarning) # Return coords in as a numpy array shaped so it can be passed directly # to the DataFrame constructor. return maxima
def separate_lungs(image, return_list=None, iteration=-1): """ This only takes in a 2D slice to make he lung segmentation and takes really long to run. But supposedly will get all corner cases. Not sure if mask from this is very good. Looks like the mask might be too dilated. :param image: :param return_list: :param iteration: :return: """ #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers( image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5, 5)), iterations=3) # #Apply the lungfilter (note the filtered areas being assigned -2000 HU) # segmented = np.where(lungfilter == 1, image, -2000*np.ones((512, 512))) if iteration >= 0 and return_list: return_list[iteration] = lungfilter else: return lungfilter
def subpixel_centroid(img, local_maxes, mask_rad, struct_shape='circle'): ''' This is effectively a replacement for cntrd in the matlab/IDL code. Works for 2D data only. Accelerated by numba. :param img: the data :param local_maxes: a (d,N) array with the location of the local maximums (as generated by :py:func:`~find_local_max`) :param mask_rad: the radius of the mask used for the averaging. :param struct_shape: ['circle' | 'diamond'] Shape of mask over each particle. :rtype: (d,N) array of positions, (d,) array of masses, (d,) array of r2, ''' # First, check that all local maxes are within 'mask_rad' of the image # edges. Otherwise we will be going outside the bounds of the array in # _refine_centroids_loop() if not all(_local_max_within_bounds(img.shape, local_maxes, mask_rad)): raise IndexError('One or more local maxes are too close to the image edge. Use local_max_crop().') # Make coordinate order compatible with upcoming code local_maxes = local_maxes[::-1] # do some data checking/munging img = np.squeeze(img) # knock out singleton dimensions dim = img.ndim if dim > 2: raise ValueError('Use subpixel_centroid_nd() for dimension > 2') so = [slice(-mask_rad, mask_rad + 1)] * dim # Make circular structuring element if struct_shape == 'circle': d_struct = (np.sum(np.mgrid[so]**2, 0) <= mask_rad**2).astype(np.int8) elif struct_shape == 'diamond': s = ndimage.generate_binary_structure(dim, 1) # scale it up to the desired size d_struct = ndimage.iterate_structure(s, int(mask_rad)) else: raise ValueError('Shape must be diamond or circle') offset_masks = np.array([d_struct * os for os in np.mgrid[so]]).astype(np.int8) r2_mask = np.zeros(d_struct.shape) for o in offset_masks: r2_mask += o ** 2 r2_mask = np.sqrt(r2_mask).astype(float) results = _refine_centroids_loop(img, local_maxes, mask_rad, offset_masks, d_struct, r2_mask) pos = (results[0:2,:] + local_maxes)[::-1,:] #m = results[2,:] #r2 = results[3,:] #return pos, m, r2 peaks=[] for i in range(0,pos.shape[1]): x = pos[0][i] y = pos[1][i] peaks.append([x, y, img[y,x]]) return peaks
def make_dilation_kernel(dil_param): kernel = ndimage.generate_binary_structure(2, 1) kernel = ndimage.iterate_structure(kernel, dil_param) z_component = np.zeros(kernel.shape, dtype=kernel.dtype) width = kernel.shape[-1] mid = width // 2 z_component[mid, mid] = 1 # kernel = np.stack((z_component,kernel,z_component),axis=0) kernel = np.stack((kernel, kernel, kernel), axis=0) return kernel.reshape((1, 1, 3, width, width))
def get_upd_flag(self, i_t, distance, entropy): """ :param i_t: :param distance: sum of M nearest neighbours' distances, D :param entropy: entropy of physics guided state evolution estimate distribution, E :return: pf_upd_flag: dimension (n_lat, n_lon) pf_upd_flag[i,j] = 1 denotes skip estimate correction for the subarea(i,j), i,e the equation: E - alpha * D > 0 sustain. """ pf_upd_flag = np.zeros((self.n_lat, self.n_lon)) if self.flag_empty(i_t): return pf_upd_flag # need to be modified # all areas update if self.alg_upd == 1: pf_upd_flag = np.ones((self.n_lat, self.n_lon)) # update the collected data areas elif self.alg_upd == 2: idx = self.data.smp_cnt_upd[i_t] > 0 pf_upd_flag[idx] = 1 # update collected data areas + good compensated data areas + dilation --- need to modify elif self.alg_upd == 3: # didn't make sense # idx = self.data.ver_re_err_adp[i_t-1] >= self.ver_re_err_th # pf_upd_flag[idx] = 1 idx = np.nonzero(self.data.smp_cnt_upd[i_t] > 0) pf_upd_flag[idx] = 1 # didn't make sense # idx = np.nonzero(self.data.ver_var_adp[i_t-1] >= self.ver_var_th) # pf_upd_flag[idx] = 1 # idx = np.nonzero(pf_upd_flag > 0) struct = generate_binary_structure(2, 1) # se = strel('square',3); se = iterate_structure(struct, 3).astype(int) pf_upd_flag = binary_dilation(pf_upd_flag, structure=struct) # use ver_re_err and ver_var at i_t - 1 to get the update flag matrix # for adaptive scheme elif self.alg_upd == 4: # TODO distance - entropy???? feature_tmp = np.reshape(distance - entropy, (self.n_lat, self.n_lon)) idx = feature_tmp <= 0 pf_upd_flag[idx] = 1 idx1 = self.data.smp_cnt_upd[i_t] > 0 pf_upd_flag[idx1] = 1 return pf_upd_flag
def seperate_lungs(image): """ Function conducts lung segmentation process using watershed algorithm :param image: a pixel array :return: segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed: np ndarrays of segmented lungs (segmented) and other markers used in the process """ # Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = SegmentationA.generate_markers( image) # Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) # Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) # Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) # Performing Black-Tophat Morphology for reinclusion # Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) # Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) # Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) # Close holes in the lungfilter # fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones( (5, 5)), iterations=3) # Apply the lungfilter (note the filtered areas being assigned -2000 HU) segmented = np.where(lungfilter == 1, image, -2000 * np.ones( (len(image), len(image[0])))) return segmented, lungfilter
def fill_border_holes_with_black_hat(self, outline): blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] blackhat_struct = ndimage.iterate_structure(blackhat_struct, 2) outline += ndimage.black_tophat(outline, structure=blackhat_struct) return outline
def local_maxima(self, image, radius, separation, threshold): ndim = image.ndim threshold -= 1 # The intersection of the image with its dilation gives local maxima. if not np.issubdtype(image.dtype, np.integer): raise TypeError("Perform dilation on exact (i.e., integer) data.") #footprint = self.binary_mask(radius, ndim) s = ndimage.generate_binary_structure(ndim, 2) # scale it up to the desired size footprint = ndimage.iterate_structure(s, int(radius)) dilation = ndimage.grey_dilation(image, footprint=footprint, mode='constant') maxima = np.vstack(np.where((image == dilation) & (image > threshold))).T[:,::-1] if not np.size(maxima) > 0: #warnings.warn("Image contains no local maxima.", UserWarning) return np.empty((0, ndim)) # Flat peaks return multiple nearby maxima. Eliminate duplicates. if len(maxima) > 0: while True: duplicates = cKDTree(maxima, 30).query_pairs(separation) if len(duplicates) == 0: break to_drop = [] for pair in duplicates: # Take the average position. # This is just a starting point, so we won't go into subpx precision here. merged = maxima[pair[0]] merged = maxima[[pair[0], pair[1]]].mean(0).astype(int) maxima[pair[0]] = merged # overwrite one to_drop.append(pair[1]) # queue other to be dropped maxima = np.delete(maxima, to_drop, 0) # Do not accept peaks near the edges. shape = np.array(image.shape) margin = int(separation) // 2 near_edge = np.any((maxima < margin) | (maxima > (shape - margin)), 1) maxima = maxima[~near_edge] #if not np.size(maxima) > 0: #warnings.warn("All local maxima were in the margins.", UserWarning) x, y = maxima[:,0], maxima[:,1] max_val = image[x,y].reshape(len(maxima),1) peaks = np.concatenate((maxima,max_val), axis = 1) return peaks
def gradient_threshold(in_file, in_segm, thresh=1.0, out_file=None): """ Compute a threshold from the histogram of the magnitude gradient image """ import os.path as op import numpy as np import nibabel as nb from scipy import ndimage as sim struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2) if out_file is None: fname, ext = op.splitext(op.basename(in_file)) if ext == '.gz': fname, ext2 = op.splitext(fname) ext = ext2 + ext out_file = op.abspath('{}_gradmask{}'.format(fname, ext)) imnii = nb.load(in_file) hdr = imnii.get_header().copy() hdr.set_data_dtype(np.uint8) # pylint: disable=no-member data = imnii.get_data().astype(np.float32) mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member mask[data > 15.] = 1 segdata = nb.load(in_segm).get_data().astype(np.uint8) segdata[segdata > 0] = 1 segdata = sim.binary_dilation(segdata, struc, iterations=2, border_value=1).astype(np.uint8) # pylint: disable=no-member mask[segdata > 0] = 1 mask = sim.binary_closing(mask, struc, iterations=2).astype(np.uint8) # pylint: disable=no-member # Remove small objects label_im, nb_labels = sim.label(mask) artmsk = np.zeros_like(mask) if nb_labels > 2: sizes = sim.sum(mask, label_im, list(range(nb_labels + 1))) ordered = list(reversed(sorted(zip(sizes, list(range(nb_labels + 1)))))) for _, label in ordered[2:]: mask[label_im == label] = 0 artmsk[label_im == label] = 1 mask = sim.binary_fill_holes(mask, struc).astype(np.uint8) # pylint: disable=no-member nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file) return out_file
def segment_lung(params, I, I_affine): ##################################################### # Intensity thresholding & Morphological operations ##################################################### M = np.zeros(I.shape) M[I > params['lungMinValue']] = 1 M[I > params['lungMaxValue']] = 0 struct_s = ndimage.generate_binary_structure(3, 1) struct_m = ndimage.iterate_structure(struct_s, 2) M = ndimage.binary_closing(M, structure=struct_s, iterations=1) M = ndimage.binary_opening(M, structure=struct_m, iterations=1) ##################################################### # Estimate lung filed of view ##################################################### [m, n, p] = I.shape medx = int(m / 2) medy = int(n / 2) xrange1 = int(m / 2 * params['xRangeRatio1']) xrange2 = int(m / 2 * params['xRangeRatio2']) zrange1 = int(p * params['zRangeRatio1']) zrange2 = int(p * params['zRangeRatio2']) ##################################################### # Select largest connected components & save nii ##################################################### M = measure.label(M) label1 = M[medx - xrange2:medx - xrange1, medy, zrange1:zrange2] label2 = M[medx + xrange1:medx + xrange2, medy, zrange1:zrange2] label1 = stats.mode(label1[label1 > 0])[0][0] label2 = stats.mode(label2[label2 > 0])[0][0] M[M == label1] = -1 M[M == label2] = -1 M[M > 0] = 0 M = M * -1 M = ndimage.binary_closing(M, structure=struct_m, iterations=1) M = ndimage.binary_fill_holes(M) Mlung = np.int8(M) nib.Nifti1Image(Mlung, I_affine).to_filename('./result/sample_lungaw.nii.gz') return Mlung
def gradient_threshold(in_file, thresh=1.0, out_file=None): """ Compute a threshold from the histogram of the magnitude gradient image """ import os.path as op import numpy as np import nibabel as nb from scipy import ndimage as sim thresh *= 1e-2 if out_file is None: fname, ext = op.splitext(op.basename(in_file)) if ext == '.gz': fname, ext2 = op.splitext(fname) ext = ext2 + ext out_file = op.abspath('%s_gradmask%s' % (fname, ext)) imnii = nb.load(in_file) data = imnii.get_data() hist, bin_edges = np.histogram(data[data > 0], bins=128, density=True) # pylint: disable=no-member # Find threshold at 1% frequency for i, freq in reversed(list(enumerate(hist))): binw = bin_edges[i+1] - bin_edges[i] if (freq * binw) >= thresh: out_thresh = 0.5 * binw break mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member mask[data > out_thresh] = 1 struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2) mask = sim.binary_opening(mask, struc).astype(np.uint8) # pylint: disable=no-member # Remove small objects label_im, nb_labels = sim.label(mask) if nb_labels > 2: sizes = sim.sum(mask, label_im, range(nb_labels + 1)) ordered = list(reversed(sorted(zip(sizes, range(nb_labels + 1))))) for _, label in ordered[2:]: mask[label_im == label] = 0 mask = sim.binary_closing(mask, struc).astype(np.uint8) # pylint: disable=no-member mask = sim.binary_fill_holes(mask, struc).astype(np.uint8) # pylint: disable=no-member hdr = imnii.get_header().copy() hdr.set_data_dtype(np.uint8) # pylint: disable=no-member nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file) return out_file
def applyImageThresholds(self, image, highThreshold=None, lowThreshold=None, regularizationWidth=2): """Restrict image values to be between upper and lower limits. This method flags all pixels in an image that are outside of the given threshold values. The threshold values are taken from a reference image, so noisy pixels are likely to get flagged. In order to exclude those noisy pixels, the array of flags is eroded and dilated, which removes isolated pixels outside of the thresholds from the list of pixels to be modified. Pixels that remain flagged after this operation have their values set to the appropriate upper or lower threshold value. Parameters ---------- image : `numpy.ndarray` The image to apply the thresholds to. The values will be modified in place. highThreshold : `numpy.ndarray`, optional Array of upper limit values for each pixel of ``image``. lowThreshold : `numpy.ndarray`, optional Array of lower limit values for each pixel of ``image``. regularizationWidth : `int`, optional Minimum radius of a region to include in regularization, in pixels. """ # Generate the structure for binary erosion and dilation, which is used # to remove noise-like pixels. Groups of pixels with a radius smaller # than ``regularizationWidth`` will be excluded from regularization. filterStructure = ndimage.iterate_structure( ndimage.generate_binary_structure(2, 1), regularizationWidth) if highThreshold is not None: highPixels = image > highThreshold if regularizationWidth > 0: # Erode and dilate ``highPixels`` to exclude noisy pixels. highPixels = ndimage.morphology.binary_opening( highPixels, structure=filterStructure) image[highPixels] = highThreshold[highPixels] if lowThreshold is not None: lowPixels = image < lowThreshold if regularizationWidth > 0: # Erode and dilate ``lowPixels`` to exclude noisy pixels. lowPixels = ndimage.morphology.binary_opening( lowPixels, structure=filterStructure) image[lowPixels] = lowThreshold[lowPixels]
def gradient_threshold(in_file, thresh=1.0, out_file=None): """ Compute a threshold from the histogram of the magnitude gradient image """ import os.path as op import numpy as np import nibabel as nb from scipy import ndimage as sim thresh *= 1e-2 if out_file is None: fname, ext = op.splitext(op.basename(in_file)) if ext == '.gz': fname, ext2 = op.splitext(fname) ext = ext2 + ext out_file = op.abspath('%s_gradmask%s' % (fname, ext)) imnii = nb.load(in_file) data = imnii.get_data() hist, bin_edges = np.histogram(data[data > 0], bins=128, density=True) # pylint: disable=no-member # Find threshold at 1% frequency for i, freq in reversed(list(enumerate(hist))): binw = bin_edges[i + 1] - bin_edges[i] if (freq * binw) >= thresh: out_thresh = 0.5 * binw break mask = np.zeros_like(data, dtype=np.uint8) # pylint: disable=no-member mask[data > out_thresh] = 1 struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 2) mask = sim.binary_opening(mask, struc).astype(np.uint8) # pylint: disable=no-member # Remove small objects label_im, nb_labels = sim.label(mask) if nb_labels > 2: sizes = sim.sum(mask, label_im, range(nb_labels + 1)) ordered = list(reversed(sorted(zip(sizes, range(nb_labels + 1))))) for _, label in ordered[2:]: mask[label_im == label] = 0 mask = sim.binary_closing(mask, struc).astype(np.uint8) # pylint: disable=no-member mask = sim.binary_fill_holes(mask, struc).astype(np.uint8) # pylint: disable=no-member hdr = imnii.get_header().copy() hdr.set_data_dtype(np.uint8) # pylint: disable=no-member nb.Nifti1Image(mask, imnii.get_affine(), hdr).to_filename(out_file) return out_file
def testGetCenterAndRfromImgBinary(self): structOri = generate_binary_structure(2, 1).astype(int) iterations = 7 donut = iterate_structure(structOri, iterations) dY, dX = donut.shape cornerX = 10 cornerY = 20 imgBinary = np.zeros((120, 120), dtype=int) imgBinary[cornerY:cornerY + dY, cornerX:cornerX + dX] = donut x, y, r = self.centroid.getCenterAndRfromImgBinary(imgBinary) self.assertEqual(x, cornerX + iterations) self.assertEqual(y, cornerY + iterations) self.assertAlmostEqual(r, 5.9974, places=3)
def get_segmented_lungs(image): #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers( image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3, 3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] #blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) blackhat_struct = ndimage.iterate_structure( blackhat_struct, 14 ) # <- retains more of the area, 12 works well. Changed to 14, 12 still excluded some parts. #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5, 5)), iterations=3) #Apply the lungfilter (note the filtered areas being assigned threshold_min HU) segmented = np.where(lungfilter == 1, image, threshold_min * np.ones(image.shape)) #return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed return segmented
def subpixel_centroid(img, local_maxes, mask_rad): ''' This is effectively a replacement for cntrd in the matlab/IDL code. Should work for any dimension data :param img: the data :param local_maxes: a (d,N) array with the location of the local maximums (as generated by :py:func:`~find_local_max`) :param mask_rad: the radius of the mask used for the averaging. :rtype: (d,N) array of positions, (d,) array of masses, (d,) array of r2, ''' local_maxes = local_maxes[::-1] # do some data checking/munging mask_rad = int(mask_rad) img = np.squeeze(img) # knock out singleton dimensions # make sure local_maxes.shape makes sense dim = img.ndim s = ndimage.generate_binary_structure(dim, 1) # scale it up to the desired size d_struct = ndimage.iterate_structure(s, int(mask_rad)) so = [slice(-mask_rad, mask_rad + 1)] * dim offset_masks = [d_struct * os for os in np.mgrid[so]] r2_mask = np.zeros(d_struct.shape) for o in offset_masks: r2_mask += o ** 2 r2_mask = np.sqrt(r2_mask) shifts_lst = [] mass_lst = [] r2_lst = [] for loc in itertools.izip(*local_maxes): window = [slice(p - mask_rad, p + mask_rad + 1) for p in loc] img_win = img[window] mass = np.sum(img_win * d_struct) mass_lst.append(mass) shifts_lst.append([np.sum(img_win * o) / mass for o in offset_masks]) r2_lst.append(np.sum(r2_mask * img_win)) sub_pixel = np.array(shifts_lst).T + local_maxes return sub_pixel[::-1], mass_lst, r2_lst
def segment_lung(params, I, I_affine): ##################################################### # Intensity thresholding & Morphological operations ##################################################### M = np.zeros(I.shape) M[I > params["lungMinValue"]] = 1 M[I > params["lungMaxValue"]] = 0 struct_s = ndimage.generate_binary_structure(3, 1) struct_m = ndimage.iterate_structure(struct_s, 2) M = ndimage.binary_closing(M, structure=struct_s, iterations=1) M = ndimage.binary_opening(M, structure=struct_m, iterations=1) ##################################################### # Estimate lung filed of view ##################################################### [m, n, p] = I.shape medx = int(m / 2) medy = int(n / 2) xrange1 = int(m / 2 * params["xRangeRatio1"]) xrange2 = int(m / 2 * params["xRangeRatio2"]) zrange1 = int(p * params["zRangeRatio1"]) zrange2 = int(p * params["zRangeRatio2"]) ##################################################### # Select largest connected components & save nii ##################################################### M = measure.label(M) label1 = M[medx - xrange2:medx - xrange1, medy, zrange1:zrange2] label2 = M[medx + xrange1:medx + xrange2, medy, zrange1:zrange2] label1 = stats.mode(label1[label1 > 0])[0][0] label2 = stats.mode(label2[label2 > 0])[0][0] M[M == label1] = -1 M[M == label2] = -1 M[M > 0] = 0 M = M * -1 M = ndimage.binary_closing(M, structure=struct_m, iterations=1) M = ndimage.binary_fill_holes(M) Mlung = np.int8(M) # Note: Skip writing "lungaw.nii.gz" to disk, as we don't use it return Mlung
def get_segmented_lungs(image): #Creation of the markers as shown above: marker_internal, marker_external, marker_watershed = generate_markers(image) #Creation of the Sobel-Gradient sobel_filtered_dx = ndimage.sobel(image, 1) sobel_filtered_dy = ndimage.sobel(image, 0) sobel_gradient = np.hypot(sobel_filtered_dx, sobel_filtered_dy) sobel_gradient *= 255.0 / np.max(sobel_gradient) #Watershed algorithm watershed = morphology.watershed(sobel_gradient, marker_watershed) #Reducing the image created by the Watershed algorithm to its outline outline = ndimage.morphological_gradient(watershed, size=(3,3)) outline = outline.astype(bool) #Performing Black-Tophat Morphology for reinclusion #Creation of the disk-kernel and increasing its size a bit blackhat_struct = [[0, 0, 1, 1, 1, 0, 0], [0, 1, 1, 1, 1, 1, 0], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [1, 1, 1, 1, 1, 1, 1], [0, 1, 1, 1, 1, 1, 0], [0, 0, 1, 1, 1, 0, 0]] #blackhat_struct = ndimage.iterate_structure(blackhat_struct, 8) blackhat_struct = ndimage.iterate_structure(blackhat_struct, 14) # <- retains more of the area, 12 works well. Changed to 14, 12 still excluded some parts. #Perform the Black-Hat outline += ndimage.black_tophat(outline, structure=blackhat_struct) #Use the internal marker and the Outline that was just created to generate the lungfilter lungfilter = np.bitwise_or(marker_internal, outline) #Close holes in the lungfilter #fill_holes is not used here, since in some slices the heart would be reincluded by accident lungfilter = ndimage.morphology.binary_closing(lungfilter, structure=np.ones((5,5)), iterations=3) #Apply the lungfilter (note the filtered areas being assigned threshold_min HU) segmented = np.where(lungfilter == 1, image, threshold_min*np.ones(image.shape)) #return segmented, lungfilter, outline, watershed, sobel_gradient, marker_internal, marker_external, marker_watershed return segmented
def applyImageThresholds(self, image, highThreshold=None, lowThreshold=None, regularizationWidth=2): """Restrict image values to be between upper and lower limits. This method flags all pixels in an image that are outside of the given threshold values. The threshold values are taken from a reference image, so noisy pixels are likely to get flagged. In order to exclude those noisy pixels, the array of flags is eroded and dilated, which removes isolated pixels outside of the thresholds from the list of pixels to be modified. Pixels that remain flagged after this operation have their values set to the appropriate upper or lower threshold value. Parameters ---------- image : `numpy.ndarray` The image to apply the thresholds to. The values will be modified in place. highThreshold : `numpy.ndarray`, optional Array of upper limit values for each pixel of ``image``. lowThreshold : `numpy.ndarray`, optional Array of lower limit values for each pixel of ``image``. regularizationWidth : `int`, optional Minimum radius of a region to include in regularization, in pixels. """ # Generate the structure for binary erosion and dilation, which is used to remove noise-like pixels. # Groups of pixels with a radius smaller than ``regularizationWidth`` # will be excluded from regularization. filterStructure = ndimage.iterate_structure(ndimage.generate_binary_structure(2, 1), regularizationWidth) if highThreshold is not None: highPixels = image > highThreshold if regularizationWidth > 0: # Erode and dilate ``highPixels`` to exclude noisy pixels. highPixels = ndimage.morphology.binary_opening(highPixels, structure=filterStructure) image[highPixels] = highThreshold[highPixels] if lowThreshold is not None: lowPixels = image < lowThreshold if regularizationWidth > 0: # Erode and dilate ``lowPixels`` to exclude noisy pixels. lowPixels = ndimage.morphology.binary_opening(lowPixels, structure=filterStructure) image[lowPixels] = lowThreshold[lowPixels]
def wiggle_room_precision_recall(pred, boundary, margin=2, connectivity=1): """Voxel-wise, continuous value precision recall curve allowing drift. Voxel-wise precision recall evaluates predictions against a ground truth. Wiggle-room precision recall (WRPR, "warper") allows calls from nearby voxels to be counted as correct. Specifically, if a voxel is predicted to be a boundary within a dilation distance of `margin` (distance defined according to `connectivity`) of a true boundary voxel, it will be counted as a True Positive in the Precision, and vice-versa for the Recall. Parameters ---------- pred : np.ndarray of float, arbitrary shape The prediction values, expressed as probability of observing a boundary (i.e. a voxel with label 1). boundary : np.ndarray of int, same shape as pred The true boundary map. 1 indicates boundary, 0 indicates non-boundary. margin : int, optional The number of dilations that define the margin. default: 2. connectivity : {1, ..., pred.ndim}, optional The morphological voxel connectivity (defined as in SciPy) for the dilation step. Returns ------- ts, pred, rec : np.ndarray of float, shape `(len(np.unique(pred)+1),)` The prediction value thresholds corresponding to each precision and recall value, the precision values, and the recall values. """ struct = nd.generate_binary_structure(boundary.ndim, connectivity) gtd = nd.binary_dilation(boundary, struct, margin) struct_m = nd.iterate_structure(struct, margin) pred_dil = nd.grey_dilation(pred, footprint=struct_m) missing = np.setdiff1d(np.unique(pred), np.unique(pred_dil)) for m in missing: pred_dil.ravel()[np.flatnonzero(pred == m)[0]] = m prec, _, ts = precision_recall_curve(gtd.ravel(), pred.ravel()) _, rec, _ = precision_recall_curve(boundary.ravel(), pred_dil.ravel()) return list(zip(ts, prec, rec))
def solvePoissonEq(self, inst, I1, I2, iOutItr=0): if self.PoissonSolver == 'fft': '''Poisson Solver using an FFT ''' # this is the only place iOutItr is used. cliplevel = self.sumclipSequence[iOutItr] aperturePixelSize = \ (inst.apertureDiameter * inst.sensorFactor / inst.sensorSamples) v, u = np.mgrid[ -0.5 / aperturePixelSize:(0.5) / aperturePixelSize: 1 / self.padDim / aperturePixelSize, -0.5 / aperturePixelSize:(0.5) / aperturePixelSize: 1 / self.padDim / aperturePixelSize] if self.debugLevel >= 3: print('iOuter=%d, cliplevel=%4.2f' % (iOutItr, cliplevel)) print(v.shape) u2v2 = -4 * (np.pi**2) * (u * u + v * v) # Set origin to Inf and 0 to result in 0 at origin after filtering ctrIdx = np.floor(self.padDim / 2) u2v2[ctrIdx, ctrIdx] = np.inf self.createSignal(inst, I1, I2, cliplevel) # find the indices for a ring of pixels # just ouside and just inside the # aperture for use in setting dWdn = 0 struct = ndimage.generate_binary_structure(2, 1) struct = ndimage.iterate_structure(struct, self.boundaryT) # print struct ApringOut = np.logical_xor(ndimage.morphology.binary_dilation( self.pMask, structure=struct), self.pMask).astype(int) ApringIn = np.logical_xor(ndimage.morphology.binary_erosion( self.pMask, structure=struct), self.pMask).astype(int) bordery, borderx = np.nonzero(ApringOut) if (self.compMode == 'zer'): zc = np.zeros((self.numTerms, self.innerItr)) # print "ZC ONE",zc.shape # ************************************************************** # initial BOX 3 - put signal in boundary (since there's no existing # Sestimate, S just equals self.S S = self.S.copy() for jj in range(int(self.innerItr)): # ************************************************************* # BOX 4 - forward filter: forward FFT, divide by u2v2, inverse # FFT SFFT = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(S))) # print SFFT.shape, u2v2.shape W = np.fft.fftshift(np.fft.irfft2(np.fft.fftshift(SFFT / u2v2), s=S.shape)) # ************************************************************* # BOX 5 - Wavefront estimate # (includes zeroing offset & masking to the aperture size) West = tools.extractArray(W, inst.sensorSamples) # print "WEST", West.shape, W.shape offset = West[self.pMask == 1].mean() West = West - offset West[self.pMask == 0] = 0 if (self.compMode == 'zer'): zc[:, jj] = tools.ZernikeMaskedFit( West, inst.xSensor, inst.ySensor, self.numTerms, self.pMask, self.zobsR) # ************************************************************ # BOX 6 - set dWestimate/dn = 0 around boundary WestdWdn0 = West.copy() # do a 3x3 average around each border pixel, # including only those pixels inside the aperture for ii in range(len(borderx)): reg = West[borderx[ii] - self.boundaryT: borderx[ii] + self.boundaryT + 1, bordery[ii] - self.boundaryT: bordery[ii] + self.boundaryT + 1] intersectIdx = ApringIn[borderx[ii] - self.boundaryT: borderx[ii] + self.boundaryT + 1, bordery[ii] - self.boundaryT: bordery[ii] + self.boundaryT + 1] WestdWdn0[borderx[ii], bordery[ii]] =\ reg[np.nonzero(intersectIdx)].mean() # *********************************************************** # BOX 7 - Take Laplacian to find sensor signal estimate Wxx = np.zeros((inst.sensorSamples, inst.sensorSamples)) Wyy = np.zeros((inst.sensorSamples, inst.sensorSamples)) Wt = WestdWdn0.copy() Wxx[:, 1:-1] = (Wt[:, 0:-2] - 2 * Wt[:, 1:-1] + Wt[:, 2:]) /\ aperturePixelSize**2 Wyy[1:-1, :] = (Wt[0:-2, :] - 2 * Wt[1:-1, :] + Wt[2:, :]) /\ aperturePixelSize**2 del2W = Wxx + Wyy Sest = tools.padArray(del2W, self.padDim) # ******************************************************** # BOX 3 - Put signal back inside boundary, # leaving the rest of Sestimate Sest[self.pMaskPad == 1] = self.S[self.pMaskPad == 1] S = Sest self.West = West.copy() if (self.compMode == 'zer'): self.zc = zc elif self.PoissonSolver == 'exp': self.getdIandI(I1, I2) xSensor = inst.xSensor * self.cMask ySensor = inst.ySensor * self.cMask F = np.zeros(self.numTerms) dZidx = np.zeros((self.numTerms, inst.sensorSamples, inst.sensorSamples)) dZidy = dZidx.copy() aperturePixelSize = \ (inst.apertureDiameter * inst.sensorFactor / inst.sensorSamples) zcCol = np.zeros(self.numTerms) for i in range(int(self.numTerms)): zcCol[i] = 1 # we integrate, instead of decompose, integration is faster. # Also, decomposition is ill-defined on m.cMask. # Using m.pMask, the two should give same results. if (self.zobsR > 0): F[i] = np.sum(np.sum( self.dI * tools.ZernikeAnnularEval( zcCol, xSensor, ySensor, self.zobsR))) * aperturePixelSize**2 dZidx[i, :, :] = tools.ZernikeAnnularGrad( zcCol, xSensor, ySensor, self.zobsR, 'dx') dZidy[i, :, :] = tools.ZernikeAnnularGrad( zcCol, xSensor, ySensor, self.zobsR, 'dy') else: F[i] = np.sum(np.sum( self.dI * tools.ZernikeEval( zcCol, xSensor, ySensor))) * aperturePixelSize**2 dZidx[i, :, :] = tools.ZernikeGrad( zcCol, xSensor, ySensor, 'dx') dZidy[i, :, :] = tools.ZernikeGrad( zcCol, xSensor, ySensor, 'dy') zcCol[i] = 0 self.Mij = np.zeros((self.numTerms, self.numTerms)) for i in range(self.numTerms): for j in range(self.numTerms): self.Mij[i, j] = aperturePixelSize**2 /\ (inst.apertureDiameter / 2)**2 * \ np.sum(np.sum( self.image * (dZidx[i, :, :].squeeze() * dZidx[j, :, :].squeeze() + dZidy[i, :, :].squeeze() * dZidy[j, :, :].squeeze()))) dz = 2 * inst.focalLength * \ (inst.focalLength - inst.offset) / inst.offset self.zc = np.zeros(self.numTerms) idx = [x - 1 for x in self.ZTerms] # phi in GN paper is phase, phi/(2pi)*lambda=W zc_tmp = np.dot(np.linalg.pinv(self.Mij[:, idx][idx]), F[idx]) / dz self.zc[idx] = zc_tmp if (self.zobsR > 0): self.West = tools.ZernikeAnnularEval( np.concatenate(([0, 0, 0], self.zc[3:])), xSensor, ySensor, self.zobsR) else: self.West = tools.ZernikeEval( np.concatenate(([0, 0, 0], self.zc[3:])), xSensor, ySensor)
def compensate(self, inst, algo, zcCol, model): """Calculate the image compensated from the affection of wavefront. Parameters ---------- inst : Instrument Instrument to use. algo : Algorithm Algorithm to solve the Poisson's equation. It can by done by the fast Fourier transform or serial expansion. zcCol : numpy.ndarray Coefficients of wavefront. model : str Optical model. It can be "paraxial", "onAxis", or "offAxis". Raises ------ RuntimeError input:size zcCol in compensate needs to be a numTerms row column vector. """ # Check the condition of inputs numTerms = algo.getNumOfZernikes() if ((zcCol.ndim == 1) and (len(zcCol) != numTerms)): raise RuntimeError("input:size", "zcCol in compensate needs to be a %d row column vector. \n" % numTerms) # Dimension of image sm, sn = self._image.getImg().shape # Dimenstion of projected image on focal plane projSamples = sm # Let us create a look-up table for x -> xp first. luty, lutx = np.mgrid[-(projSamples/2 - 0.5):(projSamples/2 + 0.5), -(projSamples/2 - 0.5):(projSamples/2 + 0.5)] sensorFactor = inst.getSensorFactor() lutx = lutx/(projSamples/2/sensorFactor) luty = luty/(projSamples/2/sensorFactor) # Set up the mapping lutxp, lutyp, J = self._aperture2image(inst, algo, zcCol, lutx, luty, projSamples, model) show_lutxyp = self._showProjection(lutxp, lutyp, sensorFactor, projSamples, raytrace=False) if (np.all(show_lutxyp <= 0)): self.caustic = True return # Calculate the weighting center (x, y) and radius realcx, realcy = self._image.getCenterAndR_ef()[0:2] # Extend the dimension of image by 20 pixel in x and y direction show_lutxyp = padArray(show_lutxyp, projSamples+20) # Get the binary matrix of image on pupil plane if raytrace=False struct0 = generate_binary_structure(2, 1) struct = iterate_structure(struct0, 4) struct = binary_dilation(struct, structure=struct0, iterations=2).astype(int) show_lutxyp = binary_dilation(show_lutxyp, structure=struct) show_lutxyp = binary_erosion(show_lutxyp, structure=struct) # Extract the region from the center of image and get the original one show_lutxyp = extractArray(show_lutxyp, projSamples) # Calculate the weighting center (x, y) and radius projcx, projcy = self._image.getCenterAndR_ef(image=show_lutxyp.astype(float))[0:2] # Shift the image to center of projection on pupil # +(-) means we need to move image to the right (left) shiftx = projcx - realcx # +(-) means we need to move image upward (downward) shifty = projcy - realcy self._image.updateImage(np.roll(self._image.getImg(), int(np.round(shifty)), axis=0)) self._image.updateImage(np.roll(self._image.getImg(), int(np.round(shiftx)), axis=1)) # Construct the interpolant to get the intensity on (x', p') plane # that corresponds to the grid points on (x,y) yp, xp = np.mgrid[-(sm/2 - 0.5):(sm/2 + 0.5), -(sm/2 - 0.5):(sm/2 + 0.5)] xp = xp/(sm/2/sensorFactor) yp = yp/(sm/2/sensorFactor) # Put the NaN to be 0 for the interpolate to use lutxp[np.isnan(lutxp)] = 0 lutyp[np.isnan(lutyp)] = 0 # Construct the function for interpolation ip = RectBivariateSpline(yp[:, 0], xp[0, :], self._image.getImg(), kx=1, ky=1) # Construct the projected image by the interpolation lutIp = np.zeros(lutxp.shape[0]*lutxp.shape[1]) for ii, (xx, yy) in enumerate(zip(lutxp.ravel(), lutyp.ravel())): lutIp[ii] = ip(yy, xx) lutIp = lutIp.reshape(lutxp.shape) # Calaculate the image on focal plane with compensation based on flux # conservation # I(x, y)/I'(x', y') = J = (dx'/dx)*(dy'/dy) - (dx'/dy)*(dy'/dx) self._image.updateImage(lutIp * J) if (self.defocalType == DefocalType.Extra): self._image.updateImage(np.rot90(self._image.getImg(), k=2)) # Put NaN to be 0 holdedImg = self._image.getImg() holdedImg[np.isnan(holdedImg)] = 0 self._image.updateImage(holdedImg) # Check the compensated image has the problem or not. # The negative value means the over-compensation from wavefront error if (np.any(self._image.getImg() < 0) and np.all(self.image0 >= 0)): print("WARNING: negative scale parameter, image is within caustic, zcCol (in um)=\n") self.caustic = True # Put the overcompensated part to be 0 holdedImg = self._image.getImg() holdedImg[holdedImg < 0] = 0 self._image.updateImage(holdedImg)
def _run_interface(self, runtime): from scipy import ndimage as sim fmap_nii = nb.load(self.inputs.in_file) data = np.squeeze(fmap_nii.get_data().astype(np.float32)) # Despike / denoise (no-mask) if self.inputs.despike: data = _despike2d(data, self.inputs.despike_threshold) mask = None if isdefined(self.inputs.in_mask): masknii = nb.load(self.inputs.in_mask) mask = masknii.get_data().astype(np.uint8) # Dilate mask if self.inputs.mask_erode > 0: struc = sim.iterate_structure(sim.generate_binary_structure(3, 2), 1) mask = sim.binary_erosion( mask, struc, iterations=self.inputs.mask_erode ).astype(np.uint8) # pylint: disable=no-member self._results['out_file'] = genfname(self.inputs.in_file, suffix='enh') datanii = nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header) if self.inputs.unwrap: data = _unwrap(data, self.inputs.in_magnitude, mask) self._results['out_unwrapped'] = genfname(self.inputs.in_file, suffix='unwrap') nb.Nifti1Image(data, fmap_nii.affine, fmap_nii.header).to_filename( self._results['out_unwrapped']) if not self.inputs.bspline_smooth: datanii.to_filename(self._results['out_file']) return runtime else: from fmriprep.utils import bspline as fbsp from statsmodels.robust.scale import mad # Fit BSplines (coarse) bspobj = fbsp.BSplineFieldmap(datanii, weights=mask, njobs=self.inputs.njobs) bspobj.fit() smoothed1 = bspobj.get_smoothed() # Manipulate the difference map diffmap = data - smoothed1.get_data() sderror = mad(diffmap[mask > 0]) LOGGER.info('SD of error after B-Spline fitting is %f', sderror) errormask = np.zeros_like(diffmap) errormask[np.abs(diffmap) > (10 * sderror)] = 1 errormask *= mask nslices = 0 try: errorslice = np.squeeze(np.argwhere(errormask.sum(0).sum(0) > 0)) nslices = errorslice[-1] - errorslice[0] except IndexError: # mask is empty, do not refine pass if nslices > 1: diffmapmsk = mask[..., errorslice[0]:errorslice[-1]] diffmapnii = nb.Nifti1Image( diffmap[..., errorslice[0]:errorslice[-1]] * diffmapmsk, datanii.affine, datanii.header) bspobj2 = fbsp.BSplineFieldmap(diffmapnii, knots_zooms=[24., 24., 4.], njobs=self.inputs.njobs) bspobj2.fit() smoothed2 = bspobj2.get_smoothed().get_data() final = smoothed1.get_data().copy() final[..., errorslice[0]:errorslice[-1]] += smoothed2 else: final = smoothed1.get_data() nb.Nifti1Image(final, datanii.affine, datanii.header).to_filename( self._results['out_file']) return runtime
def _solvePoissonEq(self, I1, I2, iOutItr=0): """Solve the Poisson's equation by Fourier transform (differential) or serial expansion (integration). There is no convergence for fft actually. Need to add the difference comparison and X-alpha method. Need to discuss further for this. Parameters ---------- I1 : Image Intra- or extra-focal image. I2 : Image Intra- or extra-focal image. iOutItr : int, optional ith number of outer loop iteration which is important in "fft" algorithm. (the default is 0.) Returns ------- numpy.ndarray Coefficients of normal/ annular Zernike polynomials. numpy.ndarray Estimated wavefront. """ # Calculate the aperature pixel size apertureDiameter = self._inst.getApertureDiameter() sensorFactor = self._inst.getSensorFactor() dimOfDonut = self._inst.getDimOfDonutOnSensor() aperturePixelSize = apertureDiameter*sensorFactor/dimOfDonut # Calculate the differential Omega dOmega = aperturePixelSize**2 # Solve the Poisson's equation based on the type of algorithm numTerms = self.getNumOfZernikes() zobsR = self.getObsOfZernikes() PoissonSolver = self.getPoissonSolverName() if (PoissonSolver == "fft"): # Use the differential method by fft to solve the Poisson's # equation # Parameter to determine the threshold of calculating I0. sumclipSequence = self.getSignalClipSequence() cliplevel = sumclipSequence[iOutItr] # Generate the v, u-coordinates on pupil plane padDim = self.getFftDimension() v, u = np.mgrid[ -0.5/aperturePixelSize: 0.5/aperturePixelSize: 1./padDim/aperturePixelSize, -0.5/aperturePixelSize: 0.5/aperturePixelSize: 1./padDim/aperturePixelSize] # Show the threshold and pupil coordinate information if (self.debugLevel >= 3): print("iOuter=%d, cliplevel=%4.2f" % (iOutItr, cliplevel)) print(v.shape) # Calculate the const of fft: # FT{Delta W} = -4*pi^2*(u^2+v^2) * FT{W} u2v2 = -4 * (np.pi**2) * (u*u + v*v) # Set origin to Inf to result in 0 at origin after filtering ctrIdx = int(np.floor(padDim/2.0)) u2v2[ctrIdx, ctrIdx] = np.inf # Calculate the wavefront signal Sini = self._createSignal(I1, I2, cliplevel) # Find the just-outside and just-inside indices of a ring in pixels # This is for the use in setting dWdn = 0 boundaryT = self.getBoundaryThickness() struct = generate_binary_structure(2, 1) struct = iterate_structure(struct, boundaryT) ApringOut = np.logical_xor(binary_dilation(self.pMask, structure=struct), self.pMask).astype(int) ApringIn = np.logical_xor(binary_erosion(self.pMask, structure=struct), self.pMask).astype(int) bordery, borderx = np.nonzero(ApringOut) # Put the signal in boundary (since there's no existing Sestimate, # S just equals self.S as the initial condition of SCF S = Sini.copy() for jj in range(self.getNumOfInnerItr()): # Calculate FT{S} SFFT = np.fft.fftshift(np.fft.fft2(np.fft.fftshift(S))) # Calculate W by W=IFT{ FT{S}/(-4*pi^2*(u^2+v^2)) } W = np.fft.fftshift(np.fft.irfft2(np.fft.fftshift(SFFT/u2v2), s=S.shape)) # Estimate the wavefront (includes zeroing offset & masking to # the aperture size) # Take the estimated wavefront West = extractArray(W, dimOfDonut) # Calculate the offset offset = West[self.pMask == 1].mean() West = West - offset West[self.pMask == 0] = 0 # Set dWestimate/dn = 0 around boundary WestdWdn0 = West.copy() # Do a 3x3 average around each border pixel, including only # those pixels inside the aperture for ii in range(len(borderx)): reg = West[borderx[ii] - boundaryT: borderx[ii] + boundaryT + 1, bordery[ii] - boundaryT: bordery[ii] + boundaryT + 1] intersectIdx = ApringIn[borderx[ii] - boundaryT: borderx[ii] + boundaryT + 1, bordery[ii] - boundaryT: bordery[ii] + boundaryT + 1] WestdWdn0[borderx[ii], bordery[ii]] = \ reg[np.nonzero(intersectIdx)].mean() # Take Laplacian to find sensor signal estimate (Delta W = S) del2W = laplace(WestdWdn0)/dOmega # Extend the dimension of signal to the order of 2 for "fft" to # use Sest = padArray(del2W, padDim) # Put signal back inside boundary, leaving the rest of # Sestimate Sest[self.pMaskPad == 1] = Sini[self.pMaskPad == 1] # Need to recheck this condition S = Sest # Define the estimated wavefront # self.West = West.copy() # Calculate the coefficient of normal/ annular Zernike polynomials if (self.getCompensatorMode() == "zer"): xSensor, ySensor = self._inst.getSensorCoor() zc = ZernikeMaskedFit(West, xSensor, ySensor, numTerms, self.pMask, zobsR) else: zc = np.zeros(numTerms) elif (PoissonSolver == "exp"): # Use the integration method by serial expansion to solve the # Poisson's equation # Calculate I0 and dI I0, dI = self._getdIandI(I1, I2) # Get the x, y coordinate in mask. The element outside mask is 0. xSensor, ySensor = self._inst.getSensorCoor() xSensor = xSensor * self.cMask ySensor = ySensor * self.cMask # Create the F matrix and Zernike-related matrixes F = np.zeros(numTerms) dZidx = np.zeros((numTerms, dimOfDonut, dimOfDonut)) dZidy = dZidx.copy() zcCol = np.zeros(numTerms) for ii in range(int(numTerms)): # Calculate the matrix for each Zk related component # Set the specific Zk cofficient to be 1 for the calculation zcCol[ii] = 1 F[ii] = np.sum(dI*ZernikeAnnularEval(zcCol, xSensor, ySensor, zobsR))*dOmega dZidx[ii, :, :] = ZernikeAnnularGrad(zcCol, xSensor, ySensor, zobsR, "dx") dZidy[ii, :, :] = ZernikeAnnularGrad(zcCol, xSensor, ySensor, zobsR, "dy") # Set the specific Zk cofficient back to 0 to avoid interfering # other Zk's calculation zcCol[ii] = 0 # Calculate Mij matrix, need to check the stability of integration # and symmetry later Mij = np.zeros([numTerms, numTerms]) for ii in range(numTerms): for jj in range(numTerms): Mij[ii, jj] = np.sum(I0*(dZidx[ii, :, :].squeeze()*dZidx[jj, :, :].squeeze() + dZidy[ii, :, :].squeeze()*dZidy[jj, :, :].squeeze())) Mij = dOmega/(apertureDiameter/2.)**2 * Mij # Calculate dz focalLength = self._inst.getFocalLength() offset = self._inst.getDefocalDisOffset() dz = 2*focalLength*(focalLength-offset)/offset # Define zc zc = np.zeros(numTerms) # Consider specific Zk terms only idx = (self.getZernikeTerms() - 1).tolist() # Solve the equation: M*W = F => W = M^(-1)*F zc_tmp = np.linalg.lstsq(Mij[:, idx][idx], F[idx], rcond=None)[0]/dz zc[idx] = zc_tmp # Estimate the wavefront surface based on z4 - z22 # z0 - z3 are set to be 0 instead West = ZernikeAnnularEval(np.concatenate(([0, 0, 0], zc[3:])), xSensor, ySensor, zobsR) return zc, West
# <codecell> a = imread('cam1.10000').astype(np.ubyte) # <codecell> imshow(a,cmap=cm.gray) # <codecell> b = where(a>50,1,0).astype(np.ubyte) # <codecell> struct = array([[1, 1, 1], [1, 1, 1], [1, 1, 1]]) struct = ndimage.iterate_structure(struct,2) c = ndimage.binary_dilation(b,structure=struct,iterations=5) c = ndimage.binary_fill_holes(c,structure=struct) # <codecell> imshow(np.c_[b,c],cmap=cm.gray) # <codecell> n = 10 l = 256 img = ndimage.gaussian_filter(a, sigma=l/(4.*n)) # mask = (im > im.mean()).astype(np.float) # mask += 0.1 * im # img = mask + 0.2*np.random.randn(*mask.shape)
def compensate(self, inst, algo, zcCol, oversample, model): if ((zcCol.ndim == 1) and (len(zcCol) != algo.numTerms)): raise Exception( 'input:size', 'zcCol in compensate needs to be a %d \ row column vector\n' % algo.numTerms) sm, sn = self.image.shape projSamples = sm * oversample # Let us create a look-up table for x -> xp first. luty, lutx = np.mgrid[ -(projSamples / 2 - 0.5):(projSamples / 2 + 0.5), -(projSamples / 2 - 0.5):(projSamples / 2 + 0.5)] lutx = lutx / (projSamples / 2 / inst.sensorFactor) luty = luty / (projSamples / 2 / inst.sensorFactor) # set up the mapping lutxp, lutyp, J = aperture2image( self, inst, algo, zcCol, lutx, luty, projSamples, model) # print "J",J.shape show_lutxyp = showProjection( lutxp, lutyp, inst.sensorFactor, projSamples, 0) if (np.all(show_lutxyp<=0)): self.caustic = 1 return realcx, realcy, tmp = getCenterAndR_ef(self.image) show_lutxyp = padArray(show_lutxyp, projSamples + 20) struct0 = ndimage.generate_binary_structure(2, 1) struct = ndimage.iterate_structure(struct0, 4) struct = ndimage.morphology.binary_dilation(struct, structure=struct0) struct = ndimage.morphology.binary_dilation( struct, structure=struct0).astype(int) show_lutxyp = ndimage.morphology.binary_dilation( show_lutxyp, structure=struct) show_lutxyp = ndimage.morphology.binary_erosion( show_lutxyp, structure=struct) show_lutxyp = extractArray(show_lutxyp, projSamples) projcx, projcy, tmp = getCenterAndR_ef(show_lutxyp.astype(float)) projcx = projcx / (oversample) projcy = projcy / (oversample) # +(-) means we need to move image to the right (left) shiftx = (projcx - realcx) # +(-) means we need to move image upward (downward) shifty = (projcy - realcy) self.image = np.roll(self.image, int(np.round(shifty)), axis=0) self.image = np.roll(self.image, int(np.round(shiftx)), axis=1) # let's construct the interpolant, # to get the intensity on (x',p') plane # that corresponds to the grid points on (x,y) yp, xp = np.mgrid[-(sm / 2 - 0.5):(sm / 2 + 0.5), - (sm / 2 - 0.5):(sm / 2 + 0.5)] xp = xp / (sm / 2 / inst.sensorFactor) yp = yp / (sm / 2 / inst.sensorFactor) # xp = reshape(xp,sm^2,1); # yp = reshape(yp,sm^2,1); # self.image = reshape(self.image,sm^2,1); # # FIp = TriScatteredInterp(xp,yp,self.image,'nearest'); # lutIp = FIp(lutxp, lutyp); lutxp[np.isnan(lutxp)] = 0 lutyp[np.isnan(lutyp)] = 0 # lutIp=interp2(xp,yp,self.image,lutxp,lutyp) # print xp.shape, yp.shape, self.image.shape # print lutxp.ravel() # print xp[:,0],yp[0,:] ip = interpolate.RectBivariateSpline( yp[:, 0], xp[0, :], self.image, kx=1, ky=1) # ip = interpolate.interp2d(xp, yp, self.image) # ip = interpolate.interp2d(xp, yp, self.image) # print lutxp.shape, lutyp.shape # lutIp = ip(0.5, -0.5) # print lutIp, 'lutIp1' # lutIp = ip([-0.1],[-0.1]) # print lutIp, 'lutIp2' # lutIp = ip(np.array(0.5,-0.1), np.array(-0.5, -0.1)) # print lutIp, 'lutIp12',lutxp.ravel()[0:10] lutIp = np.zeros(lutxp.shape[0] * lutxp.shape[1]) for i, (xx, yy) in enumerate(zip(lutxp.ravel(), lutyp.ravel())): lutIp[i] = ip(yy, xx) lutIp = lutIp.reshape(lutxp.shape) self.image = lutIp * J if (self.type == 'extra'): self.image = np.rot90(self.image, k=2) # if we want the compensator to drive down tip-tilt # self.image = offsetImg(-shiftx, -shifty, self.image); # self.image=circshift(self.image,[round(-shifty) round(-shiftx)]); self.image[np.isnan(self.image)] = 0 # self.image < 0 will not be physical, remove that region # x(self.image<0) = NaN; self.caustic = 0 if (np.any(self.image<0) and np.all(self.image0>=0)): print( 'WARNING: negative scale parameter, \ image is within caustic, zcCol (in um)=\n') # for i in range(len(zcCol)): # print zcCol[i] # print('%5.2f '%(zcCol[i]*1.e6)) # print('\n'); self.caustic = 1 self.image[self.image < 0] = 0 if (oversample > 1): self.downResolution(self, oversample, sm, sn)
def _filter_struc(size): base = sn.generate_binary_structure(2, 1) struc = sn.iterate_structure(base, size) return struc
# pare down to set of slices that are of interest (optional) if len(args.evalSliceExpr): evalSlices = eval(args.evalSliceExpr) X = X[evalSlices] # preprocessing. This includes volume normalization (optional) and thresholding (optional) selectedPixels = numpy.logical_and(args.xLowerBound <= X, X <= args.xUpperBound) # Note: I observed strange behavior when running the erosion # operator on the entire tensor in a single call. So for now, # I'll do this a slice at a time until I can figure out what the # situation is with the tensor. if args.threshDilationKernel > 0: kernel = ndimage.generate_binary_structure(2,1) kernel = ndimage.iterate_structure(kernel, args.threshDilationKernel).astype(int) for ii in range(selectedPixels.shape[0]): selectedPixels[ii,:,:] = ndimage.binary_dilation(selectedPixels[ii,:,:], structure=kernel, iterations=1) else: print '[em_evaluate]: no threshold dilation will be applied' if args.threshErosionKernel > 0: kernel = ndimage.generate_binary_structure(2,1) kernel = ndimage.iterate_structure(kernel, args.threshErosionKernel).astype(int) for ii in range(selectedPixels.shape[0]): selectedPixels[ii,:,:] = ndimage.binary_erosion(selectedPixels[ii,:,:], structure=kernel, iterations=1) else: print '[em_evaluate]: no threshold erosion will be applied' lowerPixels = numpy.logical_and(numpy.logical_not(selectedPixels), X < args.xLowerBound) upperPixels = numpy.logical_and(numpy.logical_not(selectedPixels), X > args.xUpperBound)