def load_image(path, df, mask = False): if mask: image_name = path.split('/')[-1].split('.')[0] df_copy = df[df['patientId'] == image_name] output = df_copy['Target'].tolist()[0] if output == 1: output = [1] else: output = [0] return torch.from_numpy(np.array(output)) else: try: np_image = pydicom.dcmread(path).pixel_array np_image = np_image.astype(np.float64) np_image /= 255 g1 = gaussian_gradient_magnitude(np_image, sigma=[.1, .9]) g2 = gaussian_gradient_magnitude(np_image, sigma=[.9, .1]) np_image = np.dstack((np.expand_dims(np_image, axis=2), np.expand_dims(g1, axis=2), np.expand_dims(g2, axis=2))) except: import traceback traceback.print_exc() np_image = np.zeros((1024, 1024, 3)) return torch.from_numpy(np_image).float().permute([2, 0, 1])
def test_multiple_modes(): # Test that the filters with multiple mode cababilities for different # dimensions give the same result as applying a single mode. arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) mode1 = 'reflect' mode2 = ['reflect', 'reflect'] assert_equal(sndi.gaussian_filter(arr, 1, mode=mode1), sndi.gaussian_filter(arr, 1, mode=mode2)) assert_equal(sndi.prewitt(arr, mode=mode1), sndi.prewitt(arr, mode=mode2)) assert_equal(sndi.sobel(arr, mode=mode1), sndi.sobel(arr, mode=mode2)) assert_equal(sndi.laplace(arr, mode=mode1), sndi.laplace(arr, mode=mode2)) assert_equal(sndi.gaussian_laplace(arr, 1, mode=mode1), sndi.gaussian_laplace(arr, 1, mode=mode2)) assert_equal(sndi.maximum_filter(arr, size=5, mode=mode1), sndi.maximum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.minimum_filter(arr, size=5, mode=mode1), sndi.minimum_filter(arr, size=5, mode=mode2)) assert_equal(sndi.gaussian_gradient_magnitude(arr, 1, mode=mode1), sndi.gaussian_gradient_magnitude(arr, 1, mode=mode2)) assert_equal(sndi.uniform_filter(arr, 5, mode=mode1), sndi.uniform_filter(arr, 5, mode=mode2))
def calculate_features(data): assert len(data.shape) == 3 and data.shape[0] > 1 # N-channel, 2D images features = [] for channel in range(data.shape[0] - 1): # Ignore the labels channel im = data[channel, :, :].astype('float32') # NB our features are chosen in a subjective and unprincipled way: features.extend(( # These could be calculated more efficiently... im, ndi.gaussian_filter(im, sigma=1), ndi.gaussian_filter(im, sigma=2), ndi.gaussian_filter(im, sigma=4), ndi.gaussian_gradient_magnitude(im, sigma=1), ndi.gaussian_gradient_magnitude(im, sigma=2), ndi.gaussian_gradient_magnitude(im, sigma=4), ndi.sobel(ndi.gaussian_filter(im, sigma=1), axis=-1), ndi.sobel(ndi.gaussian_filter(im, sigma=2), axis=-1), ndi.sobel(ndi.gaussian_filter(im, sigma=4), axis=-1), ndi.sobel(ndi.gaussian_filter(im, sigma=1), axis=-2), ndi.sobel(ndi.gaussian_filter(im, sigma=2), axis=-2), ndi.sobel(ndi.gaussian_filter(im, sigma=4), axis=-2), ndi.gaussian_laplace(im, sigma=1), ndi.gaussian_laplace(im, sigma=2), ndi.gaussian_laplace(im, sigma=4), ndi.convolve(ndi.gaussian_filter(im, sigma=1), weights=((-1, 0, 0), (0, 2, 0), (0, 0, -1))), ndi.convolve(ndi.gaussian_filter(im, sigma=2), weights=((-1, 0, 0), (0, 2, 0), (0, 0, -1))), ndi.convolve(ndi.gaussian_filter(im, sigma=1), weights=((0, 0, -1), (0, 2, 0), (-1, 0, 0))), ndi.convolve(ndi.gaussian_filter(im, sigma=2), weights=((0, 0, -1), (0, 2, 0), (-1, 0, 0))), )) return np.stack(features, axis=2) # Not my usual byteorder
def gaussian_kernel(self,xvalues,yvalues,r200,normalization=100,scale=10,xres=200,yres=220,xmax=6.0,ymax=5000.0,adj=20): """ Uses a 2D gaussian kernel to estimate the density of the phase space. As of now, the maximum radius extends to 6Mpc and the maximum velocity allowed is 5000km/s The "q" parameter is termed "scale" here which we have set to 10 as default, but can go as high as 50. "normalization" is simply H0 "x/yres" can be any value, but are recommended to be above 150 "adj" is a custom value and changes the size of uniform filters when used (not normally needed) """ self.x_scale = xvalues/xmax*xres self.y_scale = ((yvalues+ymax)/(normalization*scale))/((ymax*2.0)/(normalization*scale))*yres img = np.zeros((xres+1,yres+1)) self.x_range = np.linspace(0,xmax,xres+1) self.y_range = np.linspace(-ymax,ymax,yres+1) for j in range(xvalues.size): img[self.x_scale[j],self.y_scale[j]] += 1 #Estimate kernel sizes #Uniform #self.ksize = 3.12/(xvalues.size)**(1/6.0)*((np.var(self.x_scale[xvalues<r200])+np.var(self.y_scale[xvalues<r200]))/2.0)**0.5/adj #if self.ksize < 3.5: # self.ksize = 3.5 #Gaussian self.ksize_x = (4.0/(3.0*xvalues.size))**(1/5.0)*np.std(self.x_scale[xvalues<r200]) self.ksize_y = (4.0/(3.0*yvalues.size))**(1/5.0)*np.std(self.y_scale[xvalues<r200]) #smooth with estimated kernel sizes #img = ndi.uniform_filter(img, (self.ksize,self.ksize))#,mode='reflect') self.img = ndi.gaussian_filter(img, (self.ksize_y,self.ksize_x),mode='reflect') self.img_grad = ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x)) self.img_inf = ndi.gaussian_gradient_magnitude(ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x)), (self.ksize_y,self.ksize_x))
def gborders(img, alpha=1.0, sigma=1.0): """Stopping criterion for image borders.""" # The norm of the gradient. gausgradm = np.zeros(img.shape, np.double) gaussian_gradient_magnitude(img, sigma, output=gausgradm, mode='constant') thr = 2500 mask = gausgradm > thr gausgradm[mask] = thr return 1.0/np.sqrt(1.0 + alpha*gausgradm)
def predict_image(loc_model, bin_edge_model_with_contact, edge_model_with_contact, edge_model_without_contact, np_image, image_id): image_gradient = gaussian_gradient_magnitude(np_image, sigma=.4) results = [] results.extend(predict_subimages(np_image, image_gradient, False, 0, loc_model)) results.extend(predict_subimages(np_image, image_gradient, False, 1, loc_model)) results.extend(predict_subimages(np_image, image_gradient, False, 2, loc_model)) results.extend(predict_subimages(np_image, image_gradient, False, 3, loc_model)) result_array = np.dstack(results) result_mean = np.nanmean(result_array, 2) print(result_mean.shape) prediction_f = np.vectorize(lambda t: 1 if t > confidence_threshold else 0) nuclie_predictions = prediction_f(result_mean) edges_with_contact = [] edges_with_contact.extend(predict_subimages(np_image, image_gradient, False, 0, edge_model_with_contact)) edges_with_contact.extend(predict_subimages(np_image, image_gradient, False, 1, edge_model_with_contact)) edges_with_contact.extend(predict_subimages(np_image, image_gradient, False, 2, edge_model_with_contact)) edges_with_contact.extend(predict_subimages(np_image, image_gradient, False, 3, edge_model_with_contact)) edge_array_with_contact = np.dstack(edges_with_contact) edges_without_contact = [] edges_without_contact.extend(predict_subimages(np_image, image_gradient, False, 0, edge_model_without_contact)) edges_without_contact.extend(predict_subimages(np_image, image_gradient, False, 1, edge_model_without_contact)) edges_without_contact.extend(predict_subimages(np_image, image_gradient, False, 2, edge_model_without_contact)) edges_without_contact.extend(predict_subimages(np_image, image_gradient, False, 3, edge_model_without_contact)) edge_array_without_contact = np.dstack(edges_with_contact) edge_mean_with_contact = np.nanmean(edge_array_with_contact, 2) edge_predictions_with_contact = prediction_f(edge_mean_with_contact) edge_mean_without_contact = np.nanmean(edge_array_without_contact, 2) edge_predictions_without_contact = prediction_f(edge_mean_without_contact) bin_model_input = np.subtract(nuclie_predictions, np.add(edge_predictions_with_contact, edge_predictions_without_contact)) bin_model_input = (bin_model_input > 0).astype(int) bin_model_input = binary_opening(bin_model_input, iterations=1) bin_gradient = gaussian_gradient_magnitude(bin_model_input, sigma=.4) bin_edge_with_contact = [] bin_edge_with_contact.extend(predict_subimages(bin_model_input, bin_gradient, False, 0, bin_edge_model_with_contact)) bin_edge_with_contact.extend(predict_subimages(bin_model_input, bin_gradient, False, 1, bin_edge_model_with_contact)) bin_edge_with_contact.extend(predict_subimages(bin_model_input, bin_gradient, False, 2, bin_edge_model_with_contact)) bin_edge_with_contact.extend(predict_subimages(bin_model_input, bin_gradient, False, 3, bin_edge_model_with_contact)) bin_edge_with_contact = np.dstack(bin_edge_with_contact) bin_edge_with_contact = np.nanmean(bin_edge_with_contact, 2) bin_edge_with_contact = prediction_f(bin_edge_with_contact) input_dict = {'output_n':nuclie_predictions, 'edges_with_contact': edge_predictions_with_contact, 'bin_edge_with_contact':bin_edge_with_contact, 'edges_without_contact': edge_predictions_without_contact, 'image_id':image_id, 'np_image':np_image, 'bin_edge_model_with_contact':bin_edge_model_with_contact} output_dicts, clusters = get_outputs(input_dict) #output_dicts.extend() return output_dicts, clusters, nuclie_predictions, edge_predictions_with_contact, edge_predictions_without_contact
def pruning(self, skeleton_img, sigma): skeleton_img_mat = image_conversion.cv2array(skeleton_img) # Ausgabe-Array fuer das Ergebnis der Gradientberechnung gradient_output = numpy.empty_like(skeleton_img_mat) # Gradienten-Berechnung ndimage.gaussian_gradient_magnitude(skeleton_img_mat, sigma, gradient_output) # Normalisierung gradient_output /= gradient_output.max() # Array ins Bild umwandeln grad_img = image_conversion.array2cv(gradient_output) # Schwellwertbasierte Segmentierung des Gradientbildes dist_gradient_thresh = cv.CreateImage(cv.GetSize(grad_img), 8, 1) cv.InRangeS(grad_img, 0.6, 1, dist_gradient_thresh) return dist_gradient_thresh
def CE_e(img, radius=None, stop_condition=40): ''' Contrast Enhancement of Medical X-Ray ImageUsing Morphological Operators with OptimalStructuring Element, https://arxiv.org/pdf/1905.08545.pdf :param img: 2D np array, image :param radius: int [1-N], radius of the structuring element used for morphology operations :param stop_condition: int, value to which Edge content (EC) difference is compared, if EC difference is smaller then 'stop_condition' current value of radius consider to be optimal (recommended: 10-100 depending on the problem) :return: 2D np array, Contrast enhanced image normalized in between values [0-1] ''' A = minmax(img) ECA = np.sum(gaussian_gradient_magnitude(A, 1)) prevEC = 0 # radius adapt to the image if radius is None: convMtx = [np.Inf] for r in range(1,15): # define SE as B B = selem.disk(r) # opening and closing operations defined in the paper Atop = A - opening(A, selem=B) Abot = closing(A, selem=B) - A Aenhanced = A + Atop - Abot Aenhanced = np.clip(Aenhanced, a_min=0, a_max=None) # Edge content calculations EC = np.sum(gaussian_gradient_magnitude(Aenhanced, 1)) # min max scaling processed image Aenhanced_normed = (Aenhanced - np.min(Aenhanced))/(np.max(Aenhanced)-np.min(Aenhanced)) # stopping condition conv = EC - prevEC convMtx.append(conv) if convMtx[-2]-convMtx[-1] < stop_condition: break prevEC = EC # pre-defined radius else: print("Radius =", radius) B = selem.disk(radius) Atop = A - opening(A, selem=B) Abot = closing(A, selem=B) - A Aenhanced = A + Atop - Abot Aenhanced = np.clip(Aenhanced, a_min=0, a_max=None) EC = np.sum(gaussian_gradient_magnitude(Aenhanced, 1)) Aenhanced_normed = (Aenhanced - np.min(Aenhanced)) / (np.max(Aenhanced) - np.min(Aenhanced)) print("EC =", EC) return Aenhanced_normed
def ggf1(infile, outfile, sigma): ''' Creates a log-scaled, smoothed, gaussian gradient filtered image (in that order) from a fits file :param infile: fits image file to read in :param outfile: fits file to create :param sigma: sigma value for gaussian used in filtering :return: both fits file and png image ''' #First we will just read in the data and get the information we need file_temp = fits.open(infile) data = file_temp[0].data data[data <= 0] = np.mean(data) data_log = np.arcsinh(data) #np.log(data) #log data #data_log = np.ma.array(data_log,mask=np.isnan(data_log)) data_filtered = scim.gaussian_gradient_magnitude(data_log, sigma) #filter data #Update fits file to save new filtered data file_temp[0].data = data_filtered if os.path.isfile(outfile + '.img'): os.remove(outfile + '.img') os.remove(outfile + '.png') file_temp.writeto(outfile + '.img') plt.imshow(data_filtered) plt.colorbar() #plt.savefig(outfile+'.png') plt.clf() return data_filtered
def defocus_sweep(z1_min, z1_max, N, z, data, mask, whitefield, basis, x_pixel_size, y_pixel_size, translations, ls): """ Sweep over possible defocus values """ z1s = np.linspace(z1_min, z1_max, N) Os = [] it = tqdm.trange(z1s.shape[0], desc='sweeping defocus') for i in it: z1 = z1s[i] # generate pixel mapping pixel_map, pixel_translations, res = st.generate_pixel_map( mask.shape, translations, basis, x_pixel_size, y_pixel_size, z, z1, None, None, None, verbose=False) # generate reference image I0 = st.make_object_map(data.astype(whitefield.dtype), mask, whitefield, pixel_translations, pixel_map, ls)[0] Os.append(np.mean(gaussian_gradient_magnitude(I0, sigma=ls)**2)) z1 = z1s[np.argmax(Os)] return np.array(Os), z1
def generate_input_image_and_masks(): folders = glob.glob(files_loc + 'stage1_train/*/') random.shuffle(folders) for folder in folders: try: image_location = glob.glob(folder + 'images/*')[0] mask_locations = glob.glob(folder + 'masks/*') start_image = Image.open(image_location).convert('LA') np_image = np.array(start_image.getdata())[:, 0] np_image = np_image.reshape(start_image.size[1], start_image.size[0]) resized_np_image = resize(np_image, full_image_read_size) # resized_np_image = np.array(start_image.getdata())[:,0] # resized_np_image = resized_np_image.reshape(resized_image.size[0], resized_image.size[1]) np_gradient_image = gaussian_gradient_magnitude(np_image, sigma=.4) # np_image = imageio.imread(image_location) masks = [] resized_masks = [] for i in mask_locations: mask_image = Image.open(i) np_mask = np.array(mask_image.getdata()) np_mask = np_mask.reshape(start_image.size[1], start_image.size[0]) masks.append(np_mask) resized_np_mask = resize(np_mask, full_image_read_size) resized_masks.append(resized_np_mask) except OSError: continue yield np_image, np_gradient_image, masks, resized_np_image, resized_masks
def get_image_arrays_for_edge_detection_training(input_image, size, masks): inputs = [] result_dicts = [] adj_image = np.pad(input_image, size // 2, mode='constant') np_gradient_image = gaussian_gradient_magnitude(adj_image, sigma=.4) for i in range(input_image.shape[0]): for j in range(input_image.shape[1]): inputs.append((i, j)) if len(inputs) > sample_per_image_edge_model: inputs = random.sample(inputs, sample_per_image_edge_model) for i in inputs: result_dicts.append( get_image_array_for_edge_detection(adj_image, np_gradient_image, size, i[0], i[1], masks)) result_dicts = [i for i in result_dicts if i] df = pd.DataFrame.from_dict(result_dicts) try: positive_matches = df[df['output'] > 0] negative_matches = df[df['output'] == 0] negative_matches = negative_matches.sample(n=positive_matches.shape[0]) df = pd.concat([positive_matches, negative_matches], ignore_index=True) except: traceback.print_exc() #TODO: handle case with more positives than negatives pass df = df.sample(frac=1) return df
def remove_dust(self): self.seq = [] for i in xrange(0,self.n_frames): # greyscale conversion img = np.average(self.reader.get_data(i),axis=2) self.seq.append(img) self.seq = np.array(self.seq) #var = np.var(self.seq, axis=0) #min = np.min(self.seq, axis=0) #max = np.max(self.seq, axis=0) #delta = max - min #var = stats.variation(self.seq, axis=0) #gmean = stats.gmean(self.seq, axis=0) a = np.average(self.seq, axis=0) #grad = ndimage.gaussian_gradient_magnitude(a , 0.25) #map = ndimage.prewitt(a) map = ndimage.gaussian_laplace(a,2.5) * ndimage.gaussian_gradient_magnitude(a , 0.25) cutoff = np.percentile(map,99.9) map[map<cutoff]=0 map[map>0]=1 #map = grad #map[map>300]=300 fig = plt.figure(figsize=(20,8), frameon=False) fig.subplots_adjust(hspace=0) fig.subplots_adjust(wspace=0) ax1 = fig.add_subplot(1, 2, 1) ax1.imshow(map,interpolation='nearest') ax1.set_title('variance') ax2 = fig.add_subplot(1, 2, 2) ax2.imshow(self.seq[0], cmap='Greys_r',interpolation='nearest') ax2.set_title('img') fig.set_tight_layout(True) plt.show()
def VesicleEdge_phc(img, x0, y0, r0, N=100, phi1=0, phi2=2 * np.pi, sigma=1): Xedge = np.empty(N) Yedge = np.empty(N) for i, phi in enumerate(np.linspace(phi1, phi2, N)): x = x0 + r0 * np.cos(phi) y = y0 + r0 * np.sin(phi) if x < 0: x = 0 y = y0 + (x - x0) * np.tan(phi) elif x > img.shape[1] - 1: x = img.shape[1] - 1 y = y0 + (x - x0) * np.tan(phi) if y < 0: y = 0 x = x0 + (y - y0) / np.tan(phi) elif y > img.shape[0] - 1: y = img.shape[1] - 1 x = x0 + (y - y0) / np.tan(phi) point1 = np.asarray(((y0, x0), (PIX_ERR, PIX_ERR))) point2 = np.asarray(((y, x), (PIX_ERR, PIX_ERR))) metric, metric_err, line = section_profile(img, point1, point2) grad = gaussian_gradient_magnitude(line, sigma) pos = np.argmax(grad) Xedge[i] = x0 + pos * np.cos(phi) * metric Yedge[i] = y0 + pos * np.sin(phi) * metric return Xedge, Yedge
def inverse_gaussian_gradient(image, alpha=100.0, sigma=5.0): """Inverse of gradient magnitude. Compute the magnitude of the gradients in the image and then inverts the result in the range [0, 1]. Flat areas are assigned values close to 1, while areas close to borders are assigned values close to 0. This function or a similar one defined by the user should be applied over the image as a preprocessing step before calling `morphological_geodesic_active_contour`. Parameters ---------- image : (M, N) or (L, M, N) array Grayscale image or volume. alpha : float, optional Controls the steepness of the inversion. A larger value will make the transition between the flat areas and border areas steeper in the resulting array. sigma : float, optional Standard deviation of the Gaussian filter applied over the image. Returns ------- gimage : (M, N) or (L, M, N) array Preprocessed image (or volume) suitable for `morphological_geodesic_active_contour`. """ gradnorm = ndi.gaussian_gradient_magnitude(image, sigma, mode='nearest') return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def get_image_arrays_for_full_location_training(input_image, masks): input_image = normalize_image(input_image) gradient = gaussian_gradient_magnitude(input_image, sigma=.4) mask_sum = functools.reduce(operator.add, masks) vectorized = np.vectorize(lambda t: 1 if t > 0 else 0) mask_sum = vectorized(mask_sum) output = [] output.extend( get_subimages(input_image, gradient, mask_sum, transpose=False, rotation=0)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=False, rotation=1)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=False, rotation=2)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=False, rotation=3)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=True, rotation=0)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=True, rotation=1)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=True, rotation=2)) output.extend( get_subimages(input_image, gradient, mask_sum, transpose=True, rotation=3)) return pd.DataFrame(output)
def getFilterResponses(im, filterSize=7, DogScales=[3, 5], GaussianScales=[1]): """ im: Nx3 channel image , N: number of samples """ print("Computing Lab images...") im = color.rgb2lab(im) responses = [] num_channels = im.shape[3] for k in range(num_channels): for i in GaussianScales: a = ndi.gaussian_filter(im[:, :, :, k], sigma=i) responses.append( np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2]))) # print("responses size: ", np.shape(responses)) b = ndi.laplace(a) responses.append( np.reshape(b, (b.shape[0], b.shape[1] * b.shape[2]))) for i in DogScales: a = ndi.gaussian_gradient_magnitude(im[:, :, :, k], sigma=i) responses.append( np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2]))) for j in GaussianScales: t = ndi.gaussian_filter(im[:, :, :, k], sigma=i) a = ndi.sobel(t, axis=0) responses.append( np.reshape(a, (a.shape[0], a.shape[1] * a.shape[2]))) b = ndi.sobel(t, axis=1) responses.append( np.reshape(b, (b.shape[0], b.shape[1] * b.shape[2]))) return np.array(responses)
def execute(self, eopatch): elevation = eopatch[self.feature[0]][self.feature[1]].squeeze() gradient = ndimage.gaussian_gradient_magnitude(elevation, 1) eopatch.add_feature(self.result_feature[0], self.result_feature[1], gradient[..., np.newaxis]) return eopatch
def generate_word_cloud(): # read the mask image d = path.dirname(__file__) if "__file__" in locals() else os.getcwd() twitter_image = np.array( Image.open(path.join(d, "assets", "twitter_mask.png"))) # create mask white is "masked out" twitter_mask = twitter_image.copy() twitter_mask[twitter_mask.sum(axis=2) == 0] = 255 # some finesse: we enforce boundaries between colors so they get less washed out. # For that we do some edge detection in the image edges = np.mean([ gaussian_gradient_magnitude(twitter_mask[:, :, i] / 255., 2) for i in range(3) ], axis=0) twitter_mask[edges > .08] = 255 # the build-in STOPWORDS list will be used, we could more STOPWORDS here. stopwords = set(STOPWORDS) wc = WordCloud(background_color="white", max_words=2000, mask=twitter_mask, stopwords=stopwords, contour_width=3, contour_color='steelblue') # generate word cloud text = fetch_all_as_text(allow_cached=False) twitter_wc = wc.generate(text) fig = px.imshow(twitter_wc) fig.update_yaxes(visible=False) fig.update_xaxes(visible=False) return fig
def watershed_segmentation(img, mask, sigma=4): """ Watershed segmentation of image using annotations as label markers. The image used for the watershed is minus the gradient of the original image, convoluted with a Gaussian for more robustness. Parameters ---------- img : ndarray image to be segmented mask : ndarray of ints binary array, each connected component corresponds to a different object to be segmented sigma : float standard deviation of Gaussian convoluting the gradient. Increase for smoother boundaries. """ if img.ndim > 2: img = color.rgb2gray(img) labels = measure.label(mask) gradient_img = - ndimage.gaussian_gradient_magnitude(img, sigma) output = segmentation.watershed(gradient_img, labels) return output
def do_edge_detection_on_image_list(img_list, sigma=2, verbose=False): '''This function takes a list of images, performs edge detection on the images using a gradient magnitude using Gaussian derivatives and returns the gradient images in a new list Inputs: img_list -> list of images to perform edge detection on sigma -> standard deviation of the gaussian used to perform edge detections verbose -> Boolean flag to display the edge detected images Outputs: edges -> list of images that edge detection was ran on ''' ### Do edge detection on all images # NOTE: Sigma = 2 might be too washed out but Sigma = 1 might be too noisy. Something worth playing around with edges = [ ndimage.gaussian_gradient_magnitude(img, sigma=sigma) for img in img_list ] if verbose: for i, img in enumerate(edges): fig = plt.figure() ax1 = fig.add_subplot(121) ax2 = fig.add_subplot(122) ax1.imshow(img_list[i], cmap='gray') ax1.set_title("Original Image") ax2.imshow(img, cmap='gray') ax2.set_title("Edge Detection of Image") plt.show() return edges
def workImage(self): targetImage = self.parameters["TargetImage"] if not "DetailImage" in self.parameters: self.setWindowTitle("NailedIt - Detail Image") gradmag = ndimage.gaussian_gradient_magnitude( self.np_targetArray, 3) gradmag = gradmag / gradmag.max() self.parameters["DetailImage"] = gradmag self.showImage(targetImage) self.showImage(gradmag, slot=1) self.timer.start(1000) elif not "EdgesImage" in self.parameters: if "edgesImagePath" in self.parameters: img = Image.open(self.parameters["edgesImagePath"]) img = img.resize((self.parameters["proc_width"], self.parameters["proc_height"])) self.parameters["EdgesImage"] = numpy.array( img.getchannel("R"), dtype='float32') / 255 else: self.setWindowTitle("NailedIt - Edges Image") gradmag = ndimage.gaussian_gradient_magnitude( self.np_targetArray, 1.5) gradmag = gradmag / gradmag.max() self.parameters["EdgesImage"] = gradmag self.showImage(self.parameters["EdgesImage"], slot=1) self.timer.start(1000) else: npt = ndimage.filters.gaussian_filter( self.np_targetArray, self.parameters["blurAmount"]) self.blurredTarget = npt #numpy.clip(npt, 0, self.currentDensity)/self.currentDensity self.showImage(self.blurredTarget, slot=1) self.disconnect(self.timer, QtCore.SIGNAL("timeout()"), self.workImage) self.connect(self.timer, QtCore.SIGNAL("timeout()"), self.workPoints) self.timer.start(10) self.mode = "ProcessPoints" self.setWindowTitle("NailedIt - " + self.mode)
def gborders(img, alpha=1.0, sigma=1.0): """ Stopping criterion for image borders. Lower at the border, higher in the center. """ # The norm of the gradient. gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant') return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def __init__(self, *args,**kwargs): im = kwargs.pop('image') super(Variance_no_D_Cmap,self).__init__(*args, **kwargs) fim = ndimage.gaussian_gradient_magnitude(im,2) fim = fim/np.std(fim) alpha = 1e0 self.cmap = np.exp(-fim.flat[self.anchor['imask']]*alpha) + 1e-10
def __init__(self, *args, **kwargs): im = kwargs.pop('image') super(Variance_no_D_Cmap, self).__init__(*args, **kwargs) fim = ndimage.gaussian_gradient_magnitude(im, 2) fim = fim / np.std(fim) alpha = 1e0 self.cmap = np.exp(-fim.flat[self.anchor['imask']] * alpha) + 1e-10
def gborders(img, alpha=1.0, sigma=1.0): """Stopping criterion for image borders.""" # AM: gaussian_gradient_magnitude é uma função de scipy.ndimage.filters para # o calculo multidimensional da magnitude do gradiente usando derivadas Gaussianas. # The norm of the gradient. gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant', cval=0.0) # AM: Definicao da funcao g(I) do Contorno ativo geodesico return 1.0 / np.sqrt(1.0 + alpha * gradnorm)
def _makeComplexityArray(self, sigma1, sigma2, multiplier=.8): h = self.array.shape[0] w = self.array.shape[1] d = (h**2 + w**2)**0.5 gradient = ndimage.gaussian_gradient_magnitude(self.array.sum(axis=2), sigma=sigma1) gradient_max = ndimage.maximum_filter(gradient, size=d * sigma2) self.array_complexity = 1 - gradient_max / gradient_max.max() * multiplier - (1 - multiplier)
def gaussian_kernel(self,xvalues,yvalues,r200,normalization=100,scale=10,res=200,adj=20,see=False): yres = 220 #x_scale = (xvalues-np.min(xvalues))/np.max(xvalues-np.min(xvalues))*res #y_scale = ((yvalues-np.min(yvalues))/(normalization*scale))/np.max(xvalues-np.min(xvalues))*res self.x_scale = xvalues/6.0*res self.y_scale = ((yvalues+5000)/(normalization*scale))/(10000.0/(normalization*scale))*yres #img = np.zeros((int(np.max(x_scale))+1,int(np.max(y_scale))+1)) img = np.zeros((res+1,yres+1)) #x_range = np.linspace(np.min(xvalues),np.max(xvalues),int(np.max(x_scale))+1) #y_range = np.linspace(np.min(yvalues),np.max(yvalues),int(np.max(y_scale))+1) x_range = np.linspace(0,6,res+1) y_range = np.linspace(-5000,5000,yres+1) for j in range(xvalues.size): img[self.x_scale[j],self.y_scale[j]] += 1 #pcolormesh(img.T) #find ksize #xval = xvalues[np.where((xvalues<3) & (yvalues<2000) & (yvalues > -2000))] #yval = yvalues[np.where((xvalues<3) & (yvalues<2000) & (yvalues > -2000))] #x_scale2 = (xval-np.min(xval))/np.max(xval-np.min(xval))*res #y_scale2 = ((yval-np.min(yval))/(normalization*scale))/np.max(xval-np.min(xval))*res #xksize = 3.12/(xvalues.size)**(1.0/6.0)*((np.var(x_scale))/2.0)**0.5/adj #yksize = 3.12/(xvalues.size)**(1.0/6.0)*((np.var(y_scale))/2.0)**0.5/adj self.ksize = 3.12/(xvalues.size)**(1/6.0)*((np.var(self.x_scale[xvalues<r200])+np.var(self.y_scale[xvalues<r200]))/2.0)**0.5/adj self.ksize_x = (4.0/(3.0*xvalues.size))**(1/5.0)*np.std(self.x_scale[xvalues<r200]) self.ksize_y = (4.0/(3.0*yvalues.size))**(1/5.0)*np.std(self.y_scale[xvalues<r200]) if self.ksize < 3.5: self.ksize = 3.5 #ksize = 6.77588630223 #print 'kernel size',ksize #img = ndi.uniform_filter(img, (self.ksize,self.ksize))#,mode='reflect') img = ndi.gaussian_filter(img, (self.ksize_y,self.ksize_x))#,mode='reflect') img_grad = ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x)) img_inf = ndi.gaussian_gradient_magnitude(ndi.gaussian_gradient_magnitude(img, (self.ksize_y,self.ksize_x)), (self.ksize_y,self.ksize_x)) # if see == True: # s = figure() # ax = s.add_subplot(111) # ax.pcolormesh(x_range,y_range,img.T) # show() return (x_range,y_range,img,np.abs(img_grad),np.abs(img_inf))
def defocus_sweep(self, defoci_fs, defoci_ss=None, ls_ri=30, return_sweep=True): """Calculate a set of `reference_image` for each defocus in `defoci` and return a gradient magnitude for each `reference_image` as a figure of merit of it's sharpness (the higher the value the sharper `reference_image` is). `ls_ri` should be large enough in order to supress high frequency noise. Return a sweep image if `return_sweep` is True. Parameters ---------- defoci_fs : numpy.ndarray Array of defocus distances along the fast detector axis [m]. defoci_ss : numpy.ndarray, optional Array of defocus distances along the slow detector axis [m]. ls_ri : float, optional `reference_image` length scale in pixels. return_sweep : bool, optional Return a sweep image if it's True. Returns ------- grad_mag : numpy.ndarray Array of the average values of `reference_image` gradients squared. sweep_img : numpy.ndarray Defocus sweep image. Only if `return_sweep` is True. See Also -------- SpeckleTracking.update_reference : `reference_image` update algorithm. """ if defoci_ss is None: defoci_ss = defoci_fs.copy() grad_mag, sweep_scan = [], [] for defocus_fs, defocus_ss in zip(defoci_fs.ravel(), defoci_ss.ravel()): st_data = self.update_defocus(defocus_fs, defocus_ss) st_obj = st_data.get_st().update_reference(ls_ri=ls_ri, sw_fs=0, sw_ss=0) ri_gm = gaussian_gradient_magnitude(st_obj.reference_image, sigma=ls_ri) sweep_scan.append(st_obj.reference_image) grad_mag.append(np.mean(ri_gm**2)) grad_mag = np.array(grad_mag).reshape(defoci_fs.shape) if return_sweep: shape = tuple( np.max([ref_img.shape for ref_img in sweep_scan], axis=0)) sweep_img = np.zeros((defoci_fs.shape + shape)) for idx, ref_img in zip(np.ndindex(defoci_fs.shape), sweep_scan): sweep_img[idx][:ref_img.shape[0], :ref_img.shape[1]] = ref_img return grad_mag, sweep_img else: return grad_mag
def get_slope(dem, mode='percent'): slope = gaussian_gradient_magnitude(dem, 5, mode='nearest') if mode == 'percent': pass if mode == 'fraction': slope = slope / 100 if mode == 'degrees': slope = rad2deg(arctan(slope / 100)) return slope
def preprocess_images_for_kmeans(input_image, masks): ''' creates large number of identical but differently preprocessed images to have more inputs ''' np_image_t = np.transpose(input_image) g_image = gaussian_gradient_magnitude(input_image, 3) g_image_t = gaussian_gradient_magnitude(np_image_t, 3) input_image = scipy.misc.imresize(input_image, k_means_image_size) np_image_t = scipy.misc.imresize(np_image_t, k_means_image_size) g_image = scipy.misc.imresize(g_image, k_means_image_size) g_image_t = scipy.misc.imresize(g_image_t, k_means_image_size) #pic_part_list = [np.expand_dims(i, axis = 2) for i in gradients] + [np.expand_dims(resized_image, axis = 2)] + [np.expand_dims(g_image, axis=2)] pic_part_list = [np.expand_dims(input_image, axis=2) ] + [np.expand_dims(g_image, axis=2)] pic_part_list_t = [np.expand_dims(np_image_t, axis=2) ] + [np.expand_dims(g_image_t, axis=2)] first_image = np.dstack(pic_part_list) second_image = np.dstack(pic_part_list_t) resized_image2 = np.rot90(first_image, 1) resized_image3 = np.rot90(first_image, 2) resized_image4 = np.rot90(first_image, 3) resized_image2_t = np.rot90(second_image, 1) resized_image3_t = np.rot90(second_image, 2) resized_image4_t = np.rot90(second_image, 3) num_of_results = len(masks) results = [] results.append({'input': first_image, 'output': num_of_results}) results.append({'input': resized_image2, 'output': num_of_results}) results.append({'input': resized_image3, 'output': num_of_results}) results.append({'input': resized_image4, 'output': num_of_results}) results.append({'input': second_image, 'output': num_of_results}) results.append({'input': resized_image2_t, 'output': num_of_results}) results.append({'input': resized_image3_t, 'output': num_of_results}) results.append({'input': resized_image4_t, 'output': num_of_results}) return pd.DataFrame.from_dict(results)
def generate_mask(image_path): colors = np.array(Image.open(image_path)) mask = colors.copy() edges = np.mean([ gaussian_gradient_magnitude(colors[:, :, i] / 255.0, 2) for i in range(3) ], axis=0) mask[edges > 0.8] = 255 return colors, mask
def onEdgeFinding(self,evt): if not self.panel.selectiontool.isTargeting('Auto Create Contours'): return point = self.panel.view2image((evt.m_x,evt.m_y)) ndarray = mrc.read(os.path.join(self.appionloop.params['rundir'], self.appionloop.imgtree[self.index]['filename']+'.dwn.mrc')) mrc.write(ndarray, os.path.join(self.appionloop.params['rundir'], 'beforefilter'+'.dwn.mrc')) negative = False if self.filters: ndarray = ndimage.gaussian_filter(ndarray,1) ndarray = ndimage.gaussian_gradient_magnitude(ndarray,2) markers = [] for i in range(3): for j in range(3): if i!=0 or j!=0: markers.append((point[0]-1+i,point[1]-1+j)) markers = (1,2,3,4,5,6,7,8) #ndarray = ndimage.watershed_ift(ndarray,markers) ndarray = ndimage.laplace(ndarray) ndarray = ndimage.gaussian_filter(ndarray,1) #ndarray = apImage.preProcessImage(ndarray,params=self.appionloop.params) negative = True mrc.write(ndarray, os.path.join(self.appionloop.params['rundir'], 'afterfilter'+'.dwn.mrc')) delta = .1 targets = [] radius = 20 size = 50 rangeSize = 50 maker = PixelCurveMaker() maker._init_(size,rangeSize); for theta in range(size): theta +=0 theta*=math.pi*2/rangeSize for rad in range(size): try: if negative: maker.addData(theta,rad,127-ndarray[int(point[1]+rad*math.sin(theta))][int(point[0]+rad*math.cos(theta))]) else: maker.addData(theta,rad,ndarray[int(point[1]+rad*math.sin(theta))][int(point[0]+rad*math.cos(theta))]) except IndexError: maker.addData(theta,rad,0) maker.makeCalculations() s = self.filterSelectorChoices[self.filterSelector.GetSelection()] dilate = 2 if s == 'Latex Bead': dilate = 0 for theta in range(size): theta += 0 theta*=math.pi*2/rangeSize targets.append((point[0]+(dilate+maker.getData(theta))*math.cos(theta),point[1]+(dilate+maker.getData(theta))*math.sin(theta))) self.addPolyParticle(targets) #this section draws all of the contours that the algorithm considers - useful for debugging '''
def getGradientVideo(I, IDims, sigma = 1): GV = np.zeros(I.shape) for i in range(I.shape[0]): X = np.reshape(I[i, :], IDims) G = rgb2gray(X, False) GM = gaussian_gradient_magnitude(G, sigma) F = np.zeros(IDims) for k in range(F.shape[2]): F[:, :, k] = GM GV[i, :] = F.flatten() return GV
def generate_wordcloud(text_list, mask_image_path=None, max_words=3000, random_state=5): ''' Generates a WordCloud for the given text, and using any specified custom settings. ''' # configure image masking mask = None if mask_image_path: image_mask = np.array(Image.open(mask_image_path)) transformed_mask = image_mask.copy() transformed_mask[transformed_mask.sum(axis=2) == 0] = 255 # enforce boundaries between colors so they get less washed out edges = np.mean([ gaussian_gradient_magnitude(image_mask[:, :, i] / 255., 2) for i in range(3) ], axis=0) transformed_mask[edges > .08] = 255 # generate a WordCloud aggregated_text = " ".join(text_list) wordcloud = WordCloud(max_words=max_words, mask=transformed_mask, random_state=random_state).generate(aggregated_text) # recolor the WordCloud based on the original image's colors if mask_image_path: image_colors = ImageColorGenerator(image_mask) wordcloud.recolor(color_func=image_colors) ''' # plot the generated image plt.figure(figsize=(10, 10)) plt.imshow(wordcloud, interpolation='bilinear') # Original image plt.figure(figsize=(10, 10)) plt.title("Original Image") plt.imshow(image_mask) # Edge Map plt.figure(figsize=(10, 10)) plt.title("Edge map") plt.imshow(edges) # display image plt.axis("off") plt.show() plt.close() ''' return wordcloud
def filterImage(self, filtername, sigma): grey = self.workingimg sigma = int(sigma) if (filtername == '1' or filtername == 'gauf'): greyg = ndi.gaussian_filter(grey, sigma=sigma) self.workingimg = greyg if (filtername == '2' or filtername == 'gagm'): greyg = ndi.gaussian_gradient_magnitude(grey, sigma=sigma) self.workingimg = greyg
def getGradientVideo(I, IDims, sigma=1): GV = np.zeros(I.shape) for i in range(I.shape[0]): X = np.reshape(I[i, :], IDims) G = rgb2gray(X, False) GM = gaussian_gradient_magnitude(G, sigma) F = np.zeros(IDims) for k in range(F.shape[2]): F[:, :, k] = GM GV[i, :] = F.flatten() return GV
def hipass(image,gaussrad,dilrat): # # A derivative of gaussian (suggested radii of 0.5) to get edges # followed by a morphological dilate to widen the effect and pick up extra pixels on edges # This can be used as a mask for timg = np.copy(image).reshape(96,96) gradim = ndimage.gaussian_gradient_magnitude(timg,gaussrad) digradim= ndimage.morphology.grey_dilation(gradim,size=(dilrat,dilrat)) return digradim.flatten()
def test_multiple_modes_gaussian_gradient_magnitude(): # Test gaussian_gradient_magnitude filter for multiple # extrapolation modes arr = np.array([[1., 0., 0.], [1., 1., 0.], [0., 0., 0.]]) expected = np.array([[0.04928965, 0.09745625, 0.06405368], [0.23056905, 0.14025305, 0.04550846], [0.19894369, 0.14950060, 0.06796850]]) modes = ['reflect', 'wrap'] calculated = sndi.gaussian_gradient_magnitude(arr, 1, mode=modes) assert_almost_equal(expected, calculated)
def wall_points_pix_3(img, refsx, axisy, sigma): N = 2 refs = np.array([]) axisy = np.array([113, 115]) for i, refx in enumerate(refsx): prof = img[:, refx] mid = axisy[i] # mid = len(prof)/2 filtered = ndimage.gaussian_gradient_magnitude(ndimage.sobel(prof), sigma) refy = np.asarray((np.argmax(filtered[:mid]), np.argmax(filtered[mid:]) + mid)) dref = np.asarray([PIX_ERR, 0]) rx = np.tile(refx, N) xy = np.column_stack((refy, rx)) # .flatten() drefe = np.repeat(np.expand_dims(dref, 0), N, 0) ref = np.concatenate((xy, drefe), 1) refs = np.append(refs, ref) return refs.reshape(-1, 2, 2)
def task2_4(): img = normalize_intensity(imread(CAMERAMAN)) img = img[30:95, [i for i in range(80, 160)]] # select subsection of image vel_x, vel_y = gradient(f=img) magn_img = gaussian_gradient_magnitude(img, 3) output_path = os.path.join(OUTPUT_DIR, "2_4_gradien_magnitude_" + os.path.split(CAMERAMAN)[-1]) imsave(output_path, magn_img) dim_x, dim_y = len(img[0]), len(img) x, y = range(dim_x), range(dim_y) x, y = meshgrid(x, y) plt.figure() imgplot = plt.imshow(img) imgplot.set_cmap('gray') plt.ylim(dim_y, 0) plt.quiver(x, y, vel_x, vel_y, pivot='middle') plt.show()
def fitFirstCTFNode(pow, rpixelsize, defocus, ht): filter = ndimage.gaussian_filter(pow,3) grad = ndimage.gaussian_gradient_magnitude(filter,3) thr = imagefun.threshold(grad,grad.mean()+3*grad.std()) if defocus: z = abs(defocus) s = calculateFirstNode(ht,z) dmean = max(0.8*s/rpixelsize, 30) else: shape = pow.shape r = 20 center = ( shape[0] / 2, shape[1] / 2 ) grad[center[0]-r: center[0]+r, center[1]-r: center[1]+r] = 0 peak = ndimage.maximum_position(grad) dmean = math.hypot(peak[0] - center[0], peak[1] - center[1]) drange = max(dmean / 4, 10) eparams = find_ast_ellipse(grad,thr,dmean,drange) if eparams: z0, zast, ast_ratio, alpha = getAstigmaticDefocii(eparams,rpixelsize, ht) return z0,zast,ast_ratio, alpha, eparams
def predict_proba_image(self, array_data, w_x, w_y): """Predict class probabilities for X""" all_proba = [] if(self.use_geodesic): pan = array_data[0] self.geodesic_cost = nd.gaussian_gradient_magnitude(pan, self.geodesic_sigma) if(self.n_steps_simple is None and self.n_steps_proba is None): for i in range(self.n_forests): if ((i != 0) and self.add_previous_prob): if(self.use_geodesic): proba = self.geodesic(proba) array_data = numpy.concatenate((array_data,proba)) proba = self.forests_[i].predict_proba_image(array_data, w_x, w_y) all_proba.append(proba) else: i = 0 done = True for step_proba in range(self.n_steps_proba): for step_simple in range(self.n_steps_simple): if (step_proba != 0) and (step_simple == 0): proba = self.forests_[i].predict_proba_image(array_data, w_x, w_y) if(self.use_geodesic): proba = self.geodesic(proba) #if use_geodesic array_data = numpy.concatenate((array_data,proba)) #if (step_proba != 0) and (step_simple=0): proba = self.forests_[i].predict_proba_image(array_data, w_x, w_y) i +=1 #for step_simple #for step_proba if(self.fusion == "mean"): for j in range(1, len(all_proba)): proba += all_proba[j] return proba / self.n_forests else: # (fusion =="last"): return proba
def gdt(binaryImage, image, voxelSpacing=(1.0, 1.0, 1.0), subtractInside=True, includeEDT=True, gamma=32, numIterations=3, boundingMask=None, magnitudes=None): """ Runs geodesic distance transform based on gradient magnitues as the geodesic cost @param boundingMask is a mask provided where its bounding box will be used to restrict where the distance transform is run for performance gain """ if numpy.count_nonzero(binaryImage) == 0: print "[WARN]--->No binary data provided to GDT" distanceMap = numpy.zeros(binaryImage.shape) distanceMap[:] = numpy.NaN return distanceMap if magnitudes is None: image = numpy.asarray(image, dtype=numpy.float64) magnitudes = gaussian_gradient_magnitude(image, 0.5) if includeEDT and gamma != 1: magnitudes *= gamma minBounds, maxBounds = None, None if boundingMask is not None: minBounds, maxBounds = get_bounding_box(boundingMask) binaryImage = binaryImage[minBounds[0]:maxBounds[0], minBounds[1]:maxBounds[1], minBounds[2]:maxBounds[2]] magnitudes = magnitudes[minBounds[0]:maxBounds[0], minBounds[1]:maxBounds[1], minBounds[2]:maxBounds[2]] distanceMap = geodesic_distance_transform(binaryImage, magnitudes, numIterations=numIterations, spacing=voxelSpacing, includeEDT=includeEDT) if subtractInside: distanceMap -= geodesic_distance_transform(logical_not(binaryImage), magnitudes, numIterations=numIterations, spacing=voxelSpacing, includeEDT=includeEDT) if magnitudes is None and includeEDT and gamma != 1: # make it more comparable when different gamma are used distanceMap /= gamma if boundingMask is not None: temp = numpy.zeros(boundingMask.shape) temp[minBounds[0]:maxBounds[0], minBounds[1]:maxBounds[1], minBounds[2]:maxBounds[2]] = distanceMap distanceMap = temp return distanceMap
def wall_points_pix_2(img, refsx, sigma): """ @param img: @param refsx: """ N = 2 # number of walls to find refs = np.array([]) for refx in refsx: prof = img[:, refx] gradprof = ndimage.gaussian_gradient_magnitude(prof, sigma) start, end = split_two_peaks(gradprof, 1) if start > end: start, end = end, start refy = split_two_peaks(prof[start:end], -1) + start dref = np.asarray([PIX_ERR, 0]) rx = np.tile(refx, N) xy = np.column_stack((refy, rx)) # .flatten() drefe = np.repeat(np.expand_dims(dref, 0), N, 0) ref = np.concatenate((xy, drefe), 1) refs = np.append(refs, ref) return refs.reshape(-1, 2, 2)
def test_gaussian_truncate(): # Test that Gaussian filters can be truncated at different widths. # These tests only check that the result has the expected number # of nonzero elements. arr = np.zeros((100, 100), float) arr[50, 50] = 1 num_nonzeros_2 = (sndi.gaussian_filter(arr, 5, truncate=2) > 0).sum() assert_equal(num_nonzeros_2, 21**2) num_nonzeros_5 = (sndi.gaussian_filter(arr, 5, truncate=5) > 0).sum() assert_equal(num_nonzeros_5, 51**2) # Test truncate when sigma is a sequence. f = sndi.gaussian_filter(arr, [0.5, 2.5], truncate=3.5) fpos = f > 0 n0 = fpos.any(axis=0).sum() # n0 should be 2*int(2.5*3.5 + 0.5) + 1 assert_equal(n0, 19) n1 = fpos.any(axis=1).sum() # n1 should be 2*int(0.5*3.5 + 0.5) + 1 assert_equal(n1, 5) # Test gaussian_filter1d. x = np.zeros(51) x[25] = 1 f = sndi.gaussian_filter1d(x, sigma=2, truncate=3.5) n = (f > 0).sum() assert_equal(n, 15) # Test gaussian_laplace y = sndi.gaussian_laplace(x, sigma=2, truncate=3.5) nonzero_indices = np.nonzero(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15) # Test gaussian_gradient_magnitude y = sndi.gaussian_gradient_magnitude(x, sigma=2, truncate=3.5) nonzero_indices = np.nonzero(y != 0)[0] n = nonzero_indices.ptp() + 1 assert_equal(n, 15)
def gdt( img, mask, includeEDT=True, l=1.0 ): if mask.sum() == 0: return irtk.zeros(img.get_header()) voxelSpacing = img.header['pixelSize'][:3][::-1] grad = irtk.Image( nd.gaussian_gradient_magnitude(img, 0.5), img.get_header() ) #irtk.imwrite("gradBefore.nii.gz",grad) grad = l*grad.saturate().rescale(0.0,1.0).as3D() #irtk.imwrite("gradAfter.nii.gz",grad) # distanceMap = geodesic_distance_transform( mask, # grad, # numIterations=3, # spacing=voxelSpacing, # includeEDT=includeEDT ) # distanceMap -= geodesic_distance_transform( logical_not(mask), # grad, # numIterations=3, # spacing=voxelSpacing, # includeEDT=includeEDT ) # return irtk.Image(distanceMap,img.get_header()) # distanceMaps = Parallel(n_jobs=-1)(delayed(_geodesic_distance_transform)( m, # grad, # numIterations=3, # spacing=voxelSpacing, # includeEDT=includeEDT ) # for m in [mask, # logical_not(mask)] # ) # res = irtk.Image(distanceMaps[0]-distanceMaps[1],img.get_header()) res = irtk.Image( _geodesic_distance_transform( mask, grad, numIterations=3, spacing=voxelSpacing, includeEDT=includeEDT ), img.get_header() ).as3D() return res
def multi_label_gdt(labelsData, imageData, specificLabels=None, gamma=32, erosion=0, imageBoundaryClipping=0, voxelSize=(1., 1., 1.), subtractInside=True, includeEDT=True, is2D=False, boundingMask=None, makeIsotropic=False, numJobs=None): """ handles if labelsData.shape != imageData.shape by interpolating """ imageData = numpy.asarray(imageData, dtype=numpy.float64) # make a copy so as not to change original label data replacementValue = 0 if labelsData.min() == -1: replacementValue = -1 labelsData = zero_out_boundary(labelsData.copy(), imageBoundaryClipping, replacementValue=replacementValue) voxelSize = numpy.asarray(voxelSize, dtype=numpy.float64) if makeIsotropic: if not numpy.all(voxelSize == voxelSize[0]): #make isotropic minVSize = min(voxelSize) multipliers = voxelSize / minVSize newShape = numpy.asarray(imageData.shape) * multipliers newShape = tuple(numpy.int16(numpy.round(newShape))) # print "making image isotropic...", imageData.shape, "to", newShape imageData = interpolate_to_shape(imageData, newShape, interpolationType="cubic") voxelSize = (minVSize, minVSize, minVSize) originalDataShape = None if boundingMask is not None and boundingMask != imageData: boundingMask = interpolate_to_shape(boundingMask, imageData.shape) if labelsData.shape != imageData.shape: originalDataShape = labelsData.shape labelsData = interpolate_to_shape(labelsData, imageData.shape, interpolationType="NN") if specificLabels is None: specificLabels = auto_non_background_labels(labelsData) if numJobs is None: numJobs = len(specificLabels) gradientMagnitudes = gaussian_gradient_magnitude(imageData, 0.5) if includeEDT and gamma != 1: gradientMagnitudes *= gamma # handle background label different if its included distanceMaps = [] if 0 in specificLabels: binaryImage = labelsData != 0 binaryImage = dilate_mask(binaryImage, dilation=erosion, is2D=is2D) binaryImage = logical_not(binaryImage) distMap = gdt(binaryImage, imageData, subtractInside=subtractInside, includeEDT=includeEDT, boundingMask=boundingMask, magnitudes=gradientMagnitudes) distanceMaps = [distMap] distanceMaps += Parallel(numJobs)( delayed(eroded_gdt)(labelsData == label, erosion, imageData, spacing=voxelSize, boundingMask=boundingMask, subtractInside=subtractInside, includeEDT=includeEDT, is2D=is2D, magnitudes=gradientMagnitudes) for label in specificLabels if label != 0) if originalDataShape is not None: distanceMaps = Parallel(numJobs)( delayed(resample_data_to_shape)(distanceMaps[i], originalDataShape, interpolationType="linear") for i in range(len(distanceMaps))) distanceMaps = numpy.asarray(distanceMaps) if includeEDT and gamma != 1: distanceMaps /= gamma return distanceMaps
def draw(asw): print asw N,R = 100,50 if sim[asw][0] == 'quasar': flag,R = 'Q',20 if sim[asw][0] == 'galaxy': flag,R = 'G',20 if sim[asw][0] == 'cluster': flag,R = 'C',50 x = linspace(-R,R,N) y = 1*x kappa,arriv = grids(asw,x,y) fig = figure() panel = fig.add_subplot(1,1,1) panel.set_aspect('equal') lev = linspace(0,10,41) pc = panel.contour(x,y,kappa,lev) panel.clabel(pc, inline=1, fontsize=10) savefig(folder+asw+flag+'_kappa.png') fig = figure() panel = fig.add_subplot(1,1,1) rad = linspace(0,R,20)[1:] radq = rad*rad sum = 0*rad for i in range(len(x)): for j in range(len(y)): rsq = x[i]**2 + y[j]**2 for k in range(len(rad)): if rsq < radq[k]: sum[k] += kappa[j,i] dx = x[1]-x[0] for k in range(len(rad)): sum[k] *= dx*dx/(pi*radq[k]) fil = open('figs/'+asw+'.txt','w') for k in range(len(rad)): fil.write('%9.2e %9.2e\n' % (rad[k],sum[k])) fil.close() panel.scatter(rad,sum) panel.set_xlabel('radius in pixels') panel.set_ylabel('average interior $\kappa$') savefig(folder+asw+flag+'_menc.png') #arriv = upsample(x,y,arriv, upsample=10) x1, y1 = gradient(arriv) #print 'x1',x1 #x1, y1 = np.abs(x1), np.abs(y1) sig = 1 x1 = ndimage.gaussian_filter(x1, sigma=sig, mode='wrap') y1 = ndimage.gaussian_filter(y1, sigma=sig, mode='wrap') z1 = x1*x1+y1*y1 #z1 = ndimage.gaussian_filter(z1, sigma=3) z1 = ndimage.gaussian_gradient_magnitude(arriv, sigma=sig, mode='nearest') #z2 = ndimage.laplace(arriv) #zmin = amin(z1) * 42. # because it's 42 #print 'zmin',zmin #mask = z1<zmin mask, ids = detect_local_minima(np.log(z1)) typestr = { -2: 'bg', -1: 'udef', 0: 'sad', 1: 'min', 2: 'max', 3: 'canc'} types, s1, s2 = def_type(ids, x1,y1) #print types ids, types, s1, s2 = filter_types(ids, types, s1, s2, thres=10) #print types pnttype = np.ones(mask.shape) * -2 xids, yids = ids #print ids for i, xx in enumerate(xids): yy=yids[i] t=types[i] print i, xx, yy, t, typestr[t], s1[i], s2[i] pnttype[xx,yy]=t #return mask if False: fig = figure() panel = fig.add_subplot(2,2,1) panel.imshow(np.log(z1),origin='lower',interpolation='nearest') #panel.streamplot(x-np.min(x),y-np.min(y),y1,x1, density=5, minlength=0.01) panel = fig.add_subplot(2,2,2) panel.imshow(mask,origin='lower', vmin=0, vmax=1,interpolation='nearest') panel = fig.add_subplot(2,2,3) panel.imshow(x1,origin='lower',interpolation='nearest') panel = fig.add_subplot(2,2,4) panel.imshow(pnttype,origin='lower',interpolation='nearest') show() #savefig(folder+asw+flag+'_derr.png') fig = figure() panel = fig.add_subplot(1,1,1) panel.imshow(np.log(z1),origin='lower',interpolation='nearest', cmap='binary') cmap1 = mpl.colors.LinearSegmentedColormap.from_list('my_cmap',['black','blue','yellow', 'red', 'green', 'magenta'],6) cmap1._init() cmap1._lut[:,-1] = np.array([0,1,1,1,1,1,1,1,1]) panel.imshow(pnttype,origin='lower',interpolation='nearest', cmap=cmap1, vmin=-2, vmax=+3) os.mkdir(os.path.join(folder,asw)) savefig(os.path.join(folder,asw,'extr_points.png')) ''' fig = figure() panel = fig.add_subplot(1,1,1) #panel.imshow(mask,origin='lower', vmin=0, vmax=1, interpolation='nearest') panel.imshow(np.log(z1),origin='lower',interpolation='nearest') # panel = fig.add_subplot(2,1,2) # panel.imshow(y) #show() savefig(folder+asw+flag+'_derr.png') ''' fig = figure() panel = fig.add_subplot(1,1,1) panel.set_aspect('equal') lo,hi = amin(arriv), amax(arriv) lev = linspace(lo,lo+.2*(hi-lo),100) panel.contour(x,y,arriv,lev) savefig(folder+asw+flag+'_arriv.png') if True: fig = figure() panel = fig.add_subplot(1,1,1) panel.set_aspect('equal') lo,hi = amin(arriv), amax(arriv) lev = linspace(lo,lo+.2*(hi-lo),100) f = 2.5 if not flag=='C' else 1 panel.imshow(np.log(z1),origin='lower',interpolation='nearest', cmap='binary') panel.contour((x+R)*f,(y+R)*f,arriv,lev) panel.imshow(pnttype,origin='lower',interpolation='nearest', cmap=cmap1, vmin=-2, vmax=+3) #show() savefig(folder+asw+flag+'_all.png')
def run(self, workspace): # # Get the input and output image names. You need to get the .value # because otherwise you'll get the setting object instead of # the string name. # input_image_name = self.input_image_name.value output_image_name = self.output_image_name.value # # Get the image set. The image set has all of the images in it. # image_set = workspace.image_set # # Get the input image object. We want a grayscale image here. # The image set will convert a color image to a grayscale one # and warn the user. # input_image = image_set.get_image(input_image_name, must_be_grayscale = True) # # Get the pixels - these are a 2-d Numpy array. # pixels = input_image.pixel_data # # Get the smoothing parameter # if self.automatic_smoothing: # Pick the mode of the power spectrum - obviously this # is pretty hokey, not intended to really find a good number. # fft = np.fft.fft2(pixels) power2 = np.sqrt((fft * fft.conjugate()).real) mode = np.argwhere(power2 == power2.max())[0] scale = np.sqrt(np.sum((mode+.5)**2)) else: scale = self.scale.value g = gaussian_gradient_magnitude(pixels, scale) if self.gradient_choice == GRADIENT_MAGNITUDE: output_pixels = g else: # Numpy uses i and j instead of x and y. The x axis is 1 # and the y axis is 0 x = correlate1d(g, [-1, 0, 1], 1) y = correlate1d(g, [-1, 0, 1], 0) norm = np.sqrt(x**2+y**2) if self.gradient_choice == GRADIENT_DIRECTION_X: output_pixels = .5 + x / norm / 2 else: output_pixels = .5 + y / norm / 2 # # Make an image object. It's nice if you tell CellProfiler # about the parent image - the child inherits the parent's # cropping and masking, but it's not absolutely necessary # output_image = cpi.Image(output_pixels, parent_image = input_image) image_set.add(output_image_name, output_image) # # Save intermediate results for display if the window frame is on # if self.show_window: workspace.display_data.input_pixels = pixels workspace.display_data.gradient = g workspace.display_data.output_pixels = output_pixels
def gborders(img, alpha=1.0, sigma=1.0): """Stopping criterion for image borders.""" # The norm of the gradient. gradnorm = gaussian_gradient_magnitude(img, sigma, mode='constant') return 1.0/np.sqrt(1.0 + alpha*gradnorm)
return labels == best_label def background_distance(img,metric='geodesic',includeEDT=True): background = get_background(img) if metric == "euclidean": distanceMap = edt( img, background ) elif metric == "geodesic": distanceMap = gdt( img, background, includeEDT ) else: raise ValueError("Unknown metric: "+ metric) return irtk.Image(distanceMap,img.get_header()) if __name__ == "__main__": img = irtk.imread( sys.argv[1], dtype="float64" ) #filtered = nd.minimum_filter(img,5) filtered = nd.gaussian_gradient_magnitude(img,0.5) img = irtk.Image(filtered,img.get_header()) irtk.imwrite("test2.nii.gz",img) exit(0) img = world_align(img,pixelSize=[2,2,2,1]) irtk.imwrite("distanceEDT.nii.gz",background_distance(img,metric="euclidean")) irtk.imwrite( "distanceGDT.nii.gz", background_distance(img,metric="geodesic"))
def ROIstats(img1, img2, img, mask, polyXpoints, polyYpoints): # os.chdir('/Users/m131199/Documents/testData/testLGGdata/') # imgVol = nib.load('registered113.nii') # imgVol_data = imgVol.get_data() # img = imgVol_data[:,:,11] # (r,c) = img.shape # img = ndimage.rotate(img, -90, reshape=False) (r,c) = img.shape x = np.arange(0,r) y = np.arange(0,c) xx, yy = np.meshgrid(x, y) f1 = interpolate.interp2d(x, y, img, kind='cubic') f2 = interpolate.interp2d(x, y, mask.astype(int), kind='linear') f3 = interpolate.interp2d(x, y, img1, kind='cubic') f4 = interpolate.interp2d(x, y, img2, kind='cubic') minX = min(polyXpoints) maxX = max(polyXpoints) minY = min(polyYpoints) maxY = max(polyYpoints) centerX = round((minX+maxX)/2) centerY = round((minY+maxY)/2) # centerX = 95 # centerY = 110 d = np.round(np.sqrt((maxX-minX)**2 + (maxY-minY)**2)/2) # d = 50 theta = 36 xray = np.arange(centerX, centerX+d) # yray = centerY+(xray-centerX)*np.float(np.tan(np.pi/3)) # (indX,indY) = np.where(mask==True) # maskImg1Val = img1[indX,indY] # maskImg2Val = img2[indX,indY] # fig = plt.gcf() # fig.clf() # ax1 = fig.add_subplot(1,1,1) # plt.scatter(maskImg1Val,maskImg2Val,alpha = 0.5) # ax1.set_ylabel('T2 Image') # ax1.set_xlabel('T1C Image') # ax1.set_title('Joint Instensity Histogram for Tumor ROI') # plt.show() # # fig = plt.gcf() # fig.clf() # ax1 = fig.add_subplot(1,1,1) # plt.scatter(img1,img2,alpha = 0.5) # ax1.set_ylabel('T2 Image') # ax1.set_xlabel('T1C Image') # ax1.set_title('Joint Instensity Histogram (T1C and T2)') # plt.show() # plt.imshow(img,cmap = 'gray') # plt.figure() # fig = plt.gcf() # fig.clf() # ax1 = fig.add_subplot(1,1,1) # ax1.imshow(mask,cmap = 'gray') # fig.canvas.draw() fig = plt.gcf() fig.clf() ax1 = fig.add_subplot(1,1,1) ax1.imshow(img,cmap = 'gray') ax1.hold(True) alpha=100 sigma = 3 marginX = 3 gradMat = [] # plt.imshow(img,cmap = 'gray') # plt.hold(True) for phi in range(0,360,theta): if phi<90: endX = abs(np.round(d*np.cos(np.deg2rad(phi)))) xray = (np.arange(centerX,centerX+endX)) yray = (centerY-(xray-centerX)*np.float(np.tan(np.deg2rad(phi)))) resImgMatrix = np.rot90(f1(xray,yray),3) vecImageValues = np.diag(resImgMatrix) resMaskMatrix = np.rot90(f2(xray,yray),3) vecMaskValues = list(np.diag(resMaskMatrix)) ind = vecMaskValues.index(0) gradVec = gaussian_gradient_magnitude(vecImageValues[0:ind], sigma, mode='mirror') gradVec = 1.0/np.sqrt(1.0 + alpha*gradVec) gradMat.append(list(gradVec)) # ax1.hold(True) ax1.plot(xray[0:ind],yray[0:ind],'b-') fig.canvas.draw() # plt.show() # resImgMatrix1 = np.rot90(f3(xray,yray),3) # resImgMatrix2 = np.rot90(f4(xray,yray),3) # vecImageValues1 = np.diag(resImgMatrix1) # vecImageValues2 = np.diag(resImgMatrix2) # fig = plt.gcf() # fig.clf() # ax1 = fig.add_subplot(1,1,1) # plt.plot(vecImageValues1[0:ind],'b') # plt.plot(vecImageValues2[0:ind],'r') # ax1.set_ylabel('T2 Image') # ax1.set_xlabel('T1C Image') # ax1.set_title('Intensity profile of a ray') # plt.show() # # fig = plt.gcf() # fig.clf() # ax1 = fig.add_subplot(1,1,1) # plt.scatter(vecImageValues1[0:ind],vecImageValues2[0:ind], alpha=0.5) # ax1.set_ylabel('T2 Image') # ax1.set_xlabel('T1C Image') # ax1.set_title('Joint Intensity histogram for a ray') # plt.show() # elif phi>90 and phi<=180: endX = abs(np.round(d*np.cos(np.deg2rad(phi))))+marginX xray = (np.arange(centerX,centerX-endX,-1)) yray = (centerY-(xray-centerX)*np.float(np.tan(np.deg2rad(phi)))) resImgMatrix = f1(xray,yray) vecImageValues = np.diag(resImgMatrix) vecImageValues = vecImageValues[np.arange(len(vecImageValues)-1,0,-1)] resMaskMatrix = f2(xray,yray) vecMaskValues = list(np.diag(resMaskMatrix)) ind = endX-vecMaskValues.index(1) gradVec = gaussian_gradient_magnitude(vecImageValues[0:ind], sigma, mode='mirror') gradVec = 1.0/np.sqrt(1.0 + alpha*gradVec) gradMat.append(list(gradVec)) # ax1.hold(True) ax1.plot(xray[0:ind],yray[0:ind],'b-') fig.canvas.draw() # resImgMatrix1 = np.rot90(f3(xray,yray)) # resImgMatrix2 = np.rot90(f4(xray,yray)) # vecImageValues1 = np.diag(resImgMatrix1) # vecImageValues1 = vecImageValues1[np.arange(len(vecImageValues1)-1,0,-1)] # vecImageValues2 = np.diag(resImgMatrix2) # vecImageValues2 = vecImageValues2[np.arange(len(vecImageValues2)-1,0,-1)] # plt.plot(np.arange(0,ind), vecImageValues1[0:ind],'b') # plt.plot(np.arange(0,ind), vecImageValues2[0:ind],'r') # plt.show() # # # plt.scatter(vecImageValues1[0:ind],vecImageValues2[0:ind], alpha=0.5) # plt.show() # elif phi>180 and phi<=270: endX = abs(np.round(d*np.cos(np.deg2rad(phi))))+marginX xray = (np.arange(centerX,centerX-endX,-1)) yray = (centerY-(xray-centerX)*np.float(np.tan(np.deg2rad(phi)))) resImgMatrix = np.rot90(f1(xray,yray),1) vecImageValues = np.diag(resImgMatrix) gradMat.append(list(gradVec)) resMaskMatrix = np.rot90(f2(xray,yray)) vecMaskValues = list(np.diag(resMaskMatrix)) ind = vecMaskValues.index(0) gradVec = gaussian_gradient_magnitude(vecImageValues[0:ind], sigma, mode='mirror') gradVec = 1.0/np.sqrt(1.0 + alpha*gradVec) # ax1.hold(True) ax1.plot(xray[0:ind],yray[0:ind],'b-') fig.canvas.draw() # resImgMatrix1 = np.rot90(f3(xray,yray),1) # resImgMatrix2 = np.rot90(f4(xray,yray),1) # vecImageValues1 = np.diag(resImgMatrix1) # vecImageValues2 = np.diag(resImgMatrix2) # plt.plot(vecImageValues1[0:ind],'b') # plt.plot(vecImageValues2[0:ind],'r') # plt.show() # # # plt.scatter(vecImageValues1[0:ind],vecImageValues2[0:ind], alpha=0.5) # plt.show() elif phi>270 and phi<360: endX = abs(np.round(d*np.cos(np.deg2rad(phi))))+marginX xray = (np.arange(centerX,centerX+endX)) yray = (centerY-(xray-centerX)*np.float(np.tan(np.deg2rad(phi)))) resImgMatrix = f1(xray,yray) vecImageValues = np.diag(resImgMatrix) resMaskMatrix = f2(xray,yray) vecMaskValues = list(np.diag(resMaskMatrix)) ind = vecMaskValues.index(0) gradVec = gaussian_gradient_magnitude(vecImageValues[0:ind], sigma, mode='mirror') gradVec = 1.0/np.sqrt(1.0 + alpha*gradVec) gradMat.append(list(gradVec)) # ax1.hold(True) ax1.plot(xray[0:ind],yray[0:ind],'b-') fig.canvas.draw() # resImgMatrix1 = f3(xray,yray) # resImgMatrix2 = f4(xray,yray) # vecImageValues1 = np.diag(resImgMatrix1) # vecImageValues2 = np.diag(resImgMatrix2) # plt.plot(vecImageValues1[0:ind],'b') # plt.plot(vecImageValues2[0:ind],'r') # plt.show() # # # plt.scatter(vecImageValues1[0:ind],vecImageValues2[0:ind], alpha=0.5) # plt.show() # GUI_Starter.Main.ui.figure1.canvas.ax.plot(xray[0:d],yray[0:d],'b-') # GUI_Starter.Main.ui.figure1.canvas.ax.draw() # GUI_Starter.Ui_MainWindow.figure1.canvas.ax.plot(xray[0:d],yray[0:d],'b-') # GUI_Starter.Ui_MainWindow.figure1.canvas.ax.draw() # normGrad = [] minGradVec = [] # minNormGrad = [] # minG = [] for i in range(len(gradMat)): minGradVec.append(min(gradMat[i])) # minG = min(minGradVec) # for i in range(len(gradMat)): ## normGrad.append(np.divide(gradMat[i],minG)) # minNormGrad.append(min(normGrad[i])) th = np.mean(minGradVec)+4*np.std(minGradVec) # th = np.percentile(minGradVec,100) # plt.show() return th,centerX,centerY
sizes4 = np.load( args.detector + '/sizes4.npy' ) offsets5 = np.load( args.detector + '/offsets5.npy' ) offsets6 = np.load( args.detector + '/offsets6.npy' ) clf_heart = joblib.load( args.detector + '/clf_heart' ) reg_heart = joblib.load( args.detector + '/reg_heart' ) clf_heart.set_params(n_jobs=args.n_jobs) reg_heart.set_params(n_jobs=args.n_jobs) print "done loading detectors" print "preprocessing..." img = irtk.imread( args.input, dtype='float32', force_neurological=True ) grad = irtk.Image(nd.gaussian_gradient_magnitude( img, 0.5 ), img.get_header()) sat = integral_image(img) sat_grad = integral_image(grad) blurred_img = nd.gaussian_filter(img,0.5) gradZ = nd.sobel( blurred_img, axis=0 ).astype('float32') gradY = nd.sobel( blurred_img, axis=1 ).astype('float32') gradX = nd.sobel( blurred_img, axis=2 ).astype('float32') irtk.imwrite(args.output + "/img.nii.gz", img) irtk.imwrite(args.output + "/grad.nii.gz", grad) print "done preprocessing"
def getRBSTim(labim,im): """Compute RBST image Parameters ---------- labim: Label image im: original image """ height = labim.shape[0] - 1 width = labim.shape[1] - 1 #get Contour contour = np.floor(find_contours(labim, 0.5, fully_connected='low', positive_orientation='low')[0]) contour = np.int_(contour) contour = getUniqueContour(contour) slop_r = 3 normal_r = 40 length = contour.shape[0] radius = 2 RBST = np.zeros((normal_r,length)) RBSTim = np.zeros(labim.shape,np.double) contim = np.zeros(labim.shape,np.double) for i in range(length): # linear regression of the neighbourhood p = contour[i] if p[0] == 0 or p[0] == height or p[1] == 0 or p[1] == width: continue contim[p[0],p[1]] = i p1 = np.double(contour[(i - slop_r + length)%length] - p) p2 = np.double(contour[(i + slop_r)%length] - p) if p1[0] == p2[0]: if labim[p[0]+1,p[1]] == 1: for t in range(-normal_r, 0): j = -normal_r - t cord_x = p[0] + j cord_y = p[1] if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[abs(j),i] = np.mean(im[bound_x,:][:,bound_y]) else: for j in range(1,normal_r): cord_x = p[0] + j cord_y = p[1] if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[j ,i] = np.mean(im[bound_x,:][:,bound_y]) if p1[1] == p2[1]: if labim[p[0],p[1] + 1] == 1: for t in range(-normal_r, 0): j = -normal_r - t cord_x = p[0] cord_y = p[1] + j if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[abs(j) ,i] = np.mean(im[bound_x,:][:,bound_y]) else: for j in range(1,normal_r): cord_x = p[0] cord_y = p[1] + j if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[j ,i] = np.mean(im[bound_x,:][:,bound_y]) if p1[0] != p2[0] and p1[1] != p2[1]: gr = np.double(p2[0] - p1[0])/np.double(p2[1] - p1[1]) if abs(gr)<1: for t in range(-70,0): x = -70 - t y = gr*x dist = math.sqrt(y**2+x**2) cord_x = p[0]+ int(x) cord_y = p[1] - int(y) if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if dist < normal_r: if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[int(dist) ,i] = np.mean(im[bound_x,:][:,bound_y]) for x in range(1,70): y = gr*x dist = math.sqrt(y**2+x**2) cord_x = p[0]+ int(x) cord_y = p[1] - int(y) if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : break if dist < normal_r: if labim[cord_x,cord_y]==1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[int(dist) ,i] = np.mean(im[bound_x,:][:,bound_y]) else: for x in range(-70,0): y = x/gr dist = math.sqrt(y**2+x**2) cord_x = p[0] + int(y) cord_y = p[1] - int(x) if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width : continue if dist < normal_r: if labim[cord_x,cord_y] == 1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[int(dist) ,i] = np.mean(im[bound_x,:][:,bound_y]) for x in range(1,70): y = x/gr dist = math.sqrt(y**2+x**2) cord_x = p[0] + int(y) cord_y = p[1] - int(x) if cord_x < 0 or cord_y < 0 or cord_x > height or cord_y > width: break if dist < normal_r: if labim[cord_x,cord_y]==1: break RBSTim[cord_x,cord_y] = 1 bound_x = np.arange(max(cord_x-radius,0),min(cord_x+radius,height)) bound_y = np.arange(max(cord_y-radius,0),min(cord_y+radius,width)) RBST[int(dist) ,i] = np.mean(im[bound_x,:][:,bound_y]) for i in range(1,normal_r): for j in range(1,length): if RBST[i,j] == 0: RBST[i,j] = RBST[i-1,j] if RBST[i,j] == 0: RBST[i,j] = RBST[i,j-1] RBSTgr = np.zeros(RBST.shape, np.double) gaussian_gradient_magnitude(RBST, sigma = 1.5, output=RBSTgr, mode='constant') h_gr = np.mean(RBSTgr,0) h_gr = np.tile(h_gr, (40, 1)) thresh = 3200 mask = h_gr>thresh h_gr_bin = np.zeros(h_gr.shape) h_gr_bin[mask] = 1 ocross = 0 for i in range(1,h_gr.shape[1]): if h_gr_bin[0,i] - h_gr_bin[0,i-1] == 1: ocross = ocross +1 return ocross
def create_locator_inplace(data): ndimage.gaussian_filter(data, 4, output=data) ndimage.gaussian_gradient_magnitude(data, 8, output=data)