def _region_features_for(histone, dna, region): pixels0 = histone[region].ravel() pixels1 = dna[region].ravel() bin0 = pixels0 > histone.mean() bin1 = pixels1 > dna.mean() overlap = [np.corrcoef(pixels0, pixels1)[0, 1], (bin0 & bin1).mean(), (bin0 | bin1).mean()] spi = mh.sobel(histone, just_filter=1) sp = spi[mh.erode(region)] sdi = mh.sobel(dna, just_filter=1) sd = sdi[mh.erode(region)] sobels = [ np.dot(sp, sp) / len(sp), np.abs(sp).mean(), np.dot(sd, sd) / len(sd), np.abs(sd).mean(), np.corrcoef(sp, sd)[0, 1], np.corrcoef(sp, sd)[0, 1] ** 2, sp.std(), sd.std(), ] return np.concatenate( [ [region.sum()], haralick(histone * region, ignore_zeros=True).mean(0), haralick(dna * region, ignore_zeros=True).mean(0), overlap, sobels, haralick(mh.stretch(sdi * region), ignore_zeros=True).mean(0), haralick(mh.stretch(spi * region), ignore_zeros=True).mean(0), ] )
def test_se_zeros(): np.random.seed(35) f = np.random.random((128, 128)) > 0.9 f2 = np.dstack([f, f, f]) mahotas.erode(f, np.zeros((3, 3))) mahotas.dilate(f, np.zeros((3, 3))) mahotas.erode(f2[:, :, 1], np.zeros((3, 3))) mahotas.dilate(f2[:, :, 1], np.zeros((3, 3)))
def test_close_holes_simple(): img = np.zeros((64,64),bool) img[16:48,16:48] = True holed = (img - mahotas.erode(mahotas.erode(img))) assert np.all( mahotas.close_holes(holed) == img) holed[12,12] = True img[12,12] = True assert np.all( mahotas.close_holes(holed) == img) assert sys.getrefcount(holed) == 2
def test_cerode(): from mahotas.tests.pymorph_copy import erode as slow_erode from mahotas.tests.pymorph_copy import dilate as slow_dilate np.random.seed(334) f = np.random.random_sample((128,128)) f = (f > .9) assert np.all(mahotas.erode(f) == mahotas.cerode(f, np.zeros_like(f)))
def test_dilate_erode(): A = np.zeros((128, 128), dtype=bool) Bc = np.array([[0, 1, 0], [1, 1, 1], [0, 1, 0]], bool) A[32, 32] = True origs = [] for i in range(12): origs.append(A.copy()) A = mahotas.dilate(A, Bc) for i in range(12): A = mahotas.erode(A, Bc) assert np.all(A == origs[-i - 1])
def test_fast_binary(): # This test is based on an internal code decision: the fast code is only triggered for CARRAYs # Therefore, we test to see if both paths lead to the same result np.random.seed(34) for i in xrange(8): f = np.random.random((128,128)) > .9 f2 = np.dstack([f,f,f]) SEs = [ np.ones((3,3)), np.ones((5,5)), np.array([ [0,1,0], [0,0,0], [0,0,0]]), np.array([ [0,0,0], [1,0,0], [0,0,0]]), np.array([ [1,0,0], [1,0,0], [0,0,0]]), np.array([ [1,1,1], [1,1,1], [1,1,0]]), np.array([ [1,1,1], [0,1,1], [1,1,0]]), ] for Bc in SEs: assert np.all(mahotas.erode(f,Bc=Bc) == mahotas.erode(f2[:,:,1],Bc=Bc)) # For dilate, the border conditions are different; # This is not great, but it's actually the slow implementation # which has the most unsatisfactory behaviour: assert np.all(mahotas.dilate(f,Bc=Bc)[1:-1,1:-1] == mahotas.dilate(f2[:,:,1],Bc=Bc)[1:-1,1:-1])
def test_dilate_erode(): A = np.zeros((100,100)) Bc = np.array([ [0, 1, 0], [1, 1, 1], [0, 1, 0]], bool) A[30,30] = 1 A = (A!=0) orig = A.copy() for i in xrange(12): A = mahotas.dilate(A, Bc) for i in xrange(12): A = mahotas.erode(A, Bc) assert np.all(A == orig)
def mahotas_clean_up_seg(input_jacobian,frame_num): import mahotas as mh dsk = mh.disk(7) thresh_r = 0.1 thresh_g = 1 size_cutoff = 200 thresholded_jacobian = (np.int32(np.log(1+input_jacobian[frame_num][0])>thresh_r)+\ np.int32(np.log(1+input_jacobian[frame_num][1])>thresh_g))>0 thresholded_jacobian = mh.close_holes(thresholded_jacobian) thresholded_jacobian = mh.erode(thresholded_jacobian,dsk) labeled = mh.label(thresholded_jacobian)[0] sizes = mh.labeled.labeled_size(labeled) too_small = np.where(sizes < size_cutoff) labeled = mh.labeled.remove_regions(labeled, too_small) thresholded_jacobian = labeled>0 thresholded_jacobian = mh.dilate(thresholded_jacobian,dsk) return thresholded_jacobian
def test_grey_erode(): from mahotas.tests.pymorph_copy import erode as slow_erode from mahotas.tests.pymorph_copy import dilate as slow_dilate np.random.seed(334) for i in range(8): f = np.random.random_sample((128,128)) f *= 255 f = f.astype(np.uint8) B = (np.random.random_sample((3,3))*255).astype(np.uint8) B //= 4 fast = mahotas.erode(f,B) slow = slow_erode(f,B) # mahotas & pymorph use different border conventions. assert np.all(fast[1:-1,1:-1] == slow[1:-1,1:-1]) fast = mahotas.dilate(f,B) slow = slow_dilate(f,B) # mahotas & pymorph use different border conventions. assert np.all(fast[1:-1,1:-1] == slow[1:-1,1:-1])
def create_membrane_and_background_images(): for purpose in ['train','validate','test']: #img_search_string = '/media/vkaynig/NewVolume/IAE_ISBI2012/ground_truth/' + purpose + '/*.tif' img_search_string = '/media/vkaynig/Data1/Cmor_paper_data/labels/' + purpose + '/*.tif' # img_gray_search_string = '/media/vkaynig/NewVolume/IAE_ISBI2012/images/' + purpose + '/*.tif' # img_gray_search_string = '/media/vkaynig/NewVolume/Cmor_paper_data/images/' + purpose + '/*.tif' img_files = sorted( glob.glob( img_search_string ) ) # img_gray_files = sorted( glob.glob( img_gray_search_string ) ) for img_index in xrange(np.shape(img_files)[0]): print 'reading image ' + img_files[img_index] + '.' label_img = mahotas.imread(img_files[img_index]) # gray_img = mahotas.imread(img_gray_files[img_index]) #boundaries = label_img==0 boundaries = label_img == -1 boundaries[0:-1,:] = np.logical_or(boundaries[0:-1,:], np.diff(label_img, axis=0)!=0) boundaries[:,0:-1] = np.logical_or(boundaries[:,0:-1], np.diff(label_img, axis=1)!=0) boundaries = 1-boundaries shrink_radius=10 y,x = np.ogrid[-shrink_radius:shrink_radius+1, -shrink_radius:shrink_radius+1] disc = x*x + y*y <= (shrink_radius ** 2) background = boundaries membranes = 1-background #membranes = boundaries background = 1-(mahotas.erode(boundaries, disc) + 1) img_file_name = os.path.basename(img_files[img_index]) #outputPath = '/media/vkaynig/NewVolume/IAE_ISBI2012/labels/' outputPath = '/media/vkaynig/Data1/Cmor_paper_data/labels/' print 'writing image' + img_file_name mahotas.imsave(outputPath + 'background_nonDilate/' + purpose + '/' + img_file_name, np.uint8(background*255)) mahotas.imsave(outputPath + 'membranes_nonDilate/' + purpose + '/' + img_file_name, np.uint8(membranes*255))
def test_signed(): A = np.array([0, 0, 1, 1, 1, 0, 0, 0], dtype=np.int32) B = np.array([0, 1, 0]) assert np.min(mahotas.erode(A, B)) == -1
def test_erode_slice(): np.random.seed(30) for i in xrange(16): f = (np.random.random_sample((256, 256)) * 255).astype(np.uint8) assert np.all( mahotas.erode(f[:3, :3]) == mahotas.erode(f[:3, :3].copy()))
def fix_single_merge(cnn, cropped_image, cropped_prob, cropped_binary, N=10, invert=True, dilate=True, border_seeds=True, erode=False, debug=False, before_merge_error=None, real_border=np.zeros((1, 1)), oversampling=False, crop=True): ''' invert: True/False for invert or gradient image ''' bbox = mh.bbox(cropped_binary) orig_cropped_image = np.array(cropped_image) orig_cropped_prob = np.array(cropped_prob) orig_cropped_binary = np.array(cropped_binary) speed_image = None if invert: speed_image = Util.invert(cropped_image, smooth=True, sigma=2.5) else: speed_image = Util.gradient(cropped_image) dilated_binary = np.array(cropped_binary, dtype=np.bool) if dilate: for i in range(20): dilated_binary = mh.dilate(dilated_binary) # Util.view(dilated_binary, large=True) borders = np.zeros(cropped_binary.shape) best_border_prediction = np.inf best_border_image = np.zeros(cropped_binary.shape) original_border = mh.labeled.border(cropped_binary, 1, 0, Bc=mh.disk(3)) results_no_border = [] predictions = [] for n in range(N): ws = Util.random_watershed(dilated_binary, speed_image, border_seeds=border_seeds, erode=erode) if ws.max() == 0: continue ws_label1 = ws.max() ws_label2 = ws.max() - 1 border = mh.labeled.border(ws, ws_label1, ws_label2) # Util.view(ws, large=True) # Util.view(border, large=True) # print i, len(border[border==True]) # # remove parts of the border which overlap with the original border # ws[cropped_binary == 0] = 0 # Util.view(ws, large=False, color=False) ws_label1_array = Util.threshold(ws, ws_label1) ws_label2_array = Util.threshold(ws, ws_label2) eroded_ws1 = np.array(ws_label1_array, dtype=np.bool) eroded_ws2 = np.array(ws_label2_array, dtype=np.bool) if erode: for i in range(5): eroded_ws1 = mh.erode(eroded_ws1) # Util.view(eroded_ws, large=True, color=False) dilated_ws1 = np.array(eroded_ws1) for i in range(5): dilated_ws1 = mh.dilate(dilated_ws1) for i in range(5): eroded_ws2 = mh.erode(eroded_ws2) # Util.view(eroded_ws, large=True, color=False) dilated_ws2 = np.array(eroded_ws2) for i in range(5): dilated_ws2 = mh.dilate(dilated_ws2) new_ws = np.zeros(ws.shape, dtype=np.uint8) new_ws[dilated_ws1 == 1] = ws_label1 new_ws[dilated_ws2 == 1] = ws_label2 ws = new_ws # Util.view(new_ws, large=True, color=True) # ws[original_border == 1] = 0 prediction = Patch.grab_group_test_and_unify( cnn, cropped_image, cropped_prob, ws, ws_label1, ws_label2, oversampling=oversampling) if prediction == -1: # invalid continue # if (prediction < best_border_prediction): # best_border_prediction = prediction # best_border_image = border # print 'new best', n, prediction best_border_image = border borders += (border * prediction) result = np.array(cropped_binary) best_border_image[result == 0] = 0 result[best_border_image == 1] = 2 result = skimage.measure.label(result) result_no_border = np.array(result) result_no_border[best_border_image == 1] = 0 predictions.append(prediction) results_no_border.append(result_no_border) # result = np.array(cropped_binary) # best_border_image[result==0] = 0 # result[best_border_image==1] = 2 # result = skimage.measure.label(result) # result_no_border = np.array(result) # result_no_border[best_border_image==1] = 0 # result_no_border = mh.croptobbox(result_no_border) # if before_merge_error == None: # continue # print result_no_border.shape, before_merge_error.shape # if before_merge_error.shape[0] != result_no_border.shape[0] or before_merge_error.shape[1] != result_no_border.shape[1]: # result_no_border = np.resize(result_no_border, before_merge_error.shape) # print 'vi', Util.vi(before_merge_error.astype(np.uint8), result_no_border.astype(np.uint8)) # if debug: # Util.view(ws, text=str(i) + ' ' + str(prediction)) result = np.array(cropped_binary) best_border_image[result == 0] = 0 result[best_border_image == 1] = 2 result = skimage.measure.label(result) result_no_border = np.array(result) result_no_border[best_border_image == 1] = 0 return borders, best_border_image, result, result_no_border, results_no_border, predictions
orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo)))) PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log(1 + np.power(np.dot(rho, W), 2)) PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(orig_filtered)) temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(Image_orig_filtered)) orig_filtered_PST = np.fft.ifft2(temp) PHI_features = np.angle(Image_orig_filtered_PST) features = np.zeros((PHI_features.shape[0], PHI_features.shape[1])) features[PHI_features > Threshold_max] = 1 features[PHI_features < Threshold_min] = 1 features[I < (np.amax(I) / 20)] = 0 out = features out = mh.thin(out, 1) out = mh.bwperim(out, 4) out = mh.thin(out, 1) out = mh.erode(out, np.ones((1, 1))) def phase_stretch_transform(img, LPF, S, W, threshold_min, threshold_max, flag): L = 0.5 x = np.linspace(-L, L, img.shape[0]) y = np.linspace(-L, L, img.shape[1]) [X1, Y1] = (np.meshgrid(x, y)) X = X1.T Y = Y1.T theta, rho = cart2pol(X, Y) orig = ((np.fft.fft2(img))) expo = np.fft.fftshift(np.exp(-np.power((np.divide(rho, math.sqrt((LPF ** 2) / np.log(2)))), 2))) orig_filtered = np.real(np.fft.ifft2((np.multiply(orig, expo)))) PST_Kernel_1 = np.multiply(np.dot(rho, W), np.arctan(np.dot(rho, W))) - 0.5 * np.log( 1 + np.power(np.dot(rho, W), 2)) PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * S
def segment_layer(filename, params): ''' Segment one layer in a stack ''' start = time.time() #extract pixel size in xy and z xsize, zsize = extract_zoom(params.folder) #load image img = tifffile.imread(params.inputfolder + params.folder + filename) #normalize image img = ndimage.median_filter(img, 3) img = img * 255. / img.max() ##segment kidney tissue sizefactor = 10. small = ndimage.interpolation.zoom( img, 1. / sizefactor) #scale the image to a smaller size imgf = ndimage.gaussian_filter(small, 3. / xsize) #Gaussian filter median = np.percentile(imgf, 40) #40-th percentile for thresholding kmask = imgf > median * 1.5 #thresholding kmask = mahotas.dilate(kmask, mahotas.disk(5)) kmask = mahotas.close_holes(kmask) #closing holes kmask = mahotas.erode(kmask, mahotas.disk(5)) * 255 #remove objects that are darker than 2*percentile l, n = ndimage.label(kmask) llist = np.unique(l) if len(llist) > 2: means = ndimage.mean(imgf, l, llist) bv = llist[np.where(means < median * 2)] ix = np.in1d(l.ravel(), bv).reshape(l.shape) kmask[ix] = 0 kmask = ndimage.interpolation.zoom(kmask, sizefactor) #scale back to normal size kmask = normalize(kmask) kmask = (kmask > mahotas.otsu(kmask.astype( np.uint8))) * 255. #remove artifacts of interpolation #save indices of the kidney mask ind = np.where(kmask > 0) ind = np.array(ind) np.save( params.inputfolder + '../segmented/masks/kidney/' + params.folder + filename[:-4] + '.npy', ind) skimage.io.imsave( params.inputfolder + '../segmented/masks/kidney/' + params.folder + filename[:-4] + '.tif', (kmask > 0).astype(np.uint8) * 255) #segment glomeruli, if there is a kidney tissue if kmask.max() > 0: #remove all intensity variations larger than maximum radius of a glomerulus d = mahotas.disk(int(float(params.maxrad) / xsize)) img = img - mahotas.open(img.astype(np.uint8), d) img = img * 255. / img.max() ch = img[np.where(kmask > 0)] #segment glomeruli by otsu thresholding only if this threshold is higher than the 75-th percentile in the kidney mask t = mahotas.otsu(img.astype(np.uint8)) if t > np.percentile(ch, 75) * 1.5: cells = img > t cells[np.where(kmask == 0)] = 0 cells = mahotas.open( cells, mahotas.disk(int(float(params.minrad) / 2. / xsize))) else: cells = np.zeros_like(img) else: cells = np.zeros_like(img) #save indices of the glomeruli mask ind = np.where(cells > 0) ind = np.array(ind) np.save( params.inputfolder + '../segmented/masks/glomeruli/' + params.folder + filename[:-4] + '.npy', ind) skimage.io.imsave( params.inputfolder + '../segmented/masks/glomeruli/' + params.folder + filename[:-4] + '.tif', (cells > 0).astype(np.uint8) * 255)
def fix_single_merge(cnn, cropped_image, cropped_prob, cropped_binary, N=10, invert=True, dilate=True, border_seeds=True, erode=False, debug=False, before_merge_error=None, real_border=np.zeros((1,1)), oversampling=False, crop=True): ''' invert: True/False for invert or gradient image ''' bbox = mh.bbox(cropped_binary) orig_cropped_image = np.array(cropped_image) orig_cropped_prob = np.array(cropped_prob) orig_cropped_binary = np.array(cropped_binary) speed_image = None if invert: speed_image = Legacy.invert(cropped_image, smooth=True, sigma=2.5) else: speed_image = Legacy.gradient(cropped_image) Util.view(speed_image, large=False, color=False) dilated_binary = np.array(cropped_binary, dtype=np.bool) if dilate: for i in range(20): dilated_binary = mh.dilate(dilated_binary) Util.view(dilated_binary, large=False, color=False) borders = np.zeros(cropped_binary.shape) best_border_prediction = np.inf best_border_image = np.zeros(cropped_binary.shape) original_border = mh.labeled.border(cropped_binary, 1, 0, Bc=mh.disk(3)) results_no_border = [] predictions = [] borders = [] results = [] for n in range(N): ws = Legacy.random_watershed(dilated_binary, speed_image, border_seeds=border_seeds, erode=erode) if ws.max() == 0: continue ws_label1 = ws.max() ws_label2 = ws.max()-1 border = mh.labeled.border(ws, ws_label1, ws_label2) # Util.view(ws, large=True) # Util.view(border, large=True) # print i, len(border[border==True]) # # remove parts of the border which overlap with the original border # ws[cropped_binary == 0] = 0 # Util.view(ws, large=False, color=False) ws_label1_array = Util.threshold(ws, ws_label1) ws_label2_array = Util.threshold(ws, ws_label2) eroded_ws1 = np.array(ws_label1_array, dtype=np.bool) eroded_ws2 = np.array(ws_label2_array, dtype=np.bool) if erode: for i in range(5): eroded_ws1 = mh.erode(eroded_ws1) # Util.view(eroded_ws, large=True, color=False) dilated_ws1 = np.array(eroded_ws1) for i in range(5): dilated_ws1 = mh.dilate(dilated_ws1) for i in range(5): eroded_ws2 = mh.erode(eroded_ws2) # Util.view(eroded_ws, large=True, color=False) dilated_ws2 = np.array(eroded_ws2) for i in range(5): dilated_ws2 = mh.dilate(dilated_ws2) new_ws = np.zeros(ws.shape, dtype=np.uint8) new_ws[dilated_ws1 == 1] = ws_label1 new_ws[dilated_ws2 == 1] = ws_label2 ws = new_ws # Util.view(new_ws, large=True, color=True) # ws[original_border == 1] = 0 prediction = Patch.grab_group_test_and_unify(cnn, cropped_image, cropped_prob, ws, ws_label1, ws_label2, oversampling=oversampling) if prediction == -1 or prediction >= .5: # invalid continue # here we have for one border # the border # the prediction # borders.append(border) # predictions.append(prediction) results.append((prediction, border)) return results
circle = plt.Circle((jj * b + dw / 2 + b / 2, ii * b + dh / 2 + b / 2), 1.0, color='r') fig.gca().add_artist(circle) plt.show() svm = sklearn.svm.LinearSVC(class_weight = 'balanced') print sklearn.cross_validation.cross_val_score(svm, desc, labels) svm.fit(desc, labels) #%% plt.imshow(mahotas.distance(lg) > 80) #%% plt.imshow(im) plt.imshow(mahotas.erode(mahotas.erode(mahotas.erode(mahotas.erode(mahotas.erode(mahotas.erode(lg[:, :])))))), alpha = 0.5) #%% for i in range(40, 60, 1): #im = skimage.io.imread('labeled/{0}.bmp'.format(i)) im = skimage.io.imread('screencast_frames/videoframe{0:0>5d}.bmp'.format(i)) im = im[:(im.shape[0] / b) * b, :(im.shape[1] / b) * b] tmp = time.time() descriptors, (nhd, nwd), (dh, dw) = build_descriptors(im, b) nlabels = svm.predict(descriptors) nlabels = nlabels.reshape((nhd, nwd)) print 'time', time.time() - tmp #nlabels = numpy.tile(nlabels, [1, 1, b, b]) #nlabels = nlabels.reshape(im.shape[0:2])
input_vol[:, :, zoffset] = normalize_image( mahotas.imread( 'D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif' .format(imgi - zrad + zoffset))) #input_vol[:,:,zoffset] = mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi - zrad + zoffset)) blur_img = scipy.ndimage.gaussian_filter(input_img, gblur_sigma) boundaries = label_img == 0 boundaries[0:-1, :] = np.logical_or(boundaries[0:-1, :], diff(label_img, axis=0) != 0) boundaries[:, 0:-1] = np.logical_or(boundaries[:, 0:-1], diff(label_img, axis=1) != 0) # erode to be sure we include at least one membrane inside = mahotas.erode(boundaries == 0, shrink_disc) #display = input_img.copy() #display[np.nonzero(inside)] = 0 #figure(figsize=(20,20)) #imshow(display, cmap=cm.gray) seeds = label_img.copy() seeds[np.nonzero(inside == 0)] = 0 grow = mahotas.cwatershed(255 - blur_img, seeds) membrane = np.zeros(input_img.shape, dtype=uint8) membrane[0:-1, :] = diff(grow, axis=0) != 0 membrane[:, 0:-1] = np.logical_or(membrane[:, 0:-1], diff(grow, axis=1) != 0)
def multi_erode(img, x): img = img.astype(bool) for i in range(x): img = mh.erode(img) return img
combined_distances = smooth_distances * smoothing_factor + \ gap_completion_distances * gap_completion_factor combined_outflow = outflow_smooth * smoothing_factor + \ outflow_gap * gap_completion_factor combined_inflow = inflow_smooth * smoothing_factor + \ inflow_gap * gap_completion_factor # Find ignorable nodes - those where full 3x3 neighborhood has # source_sink_cap > 0 and sum of source_sink_cap in # neighborhood (except center) is greater than neighborhood # outflow. Similar for inflow. hollow = np.ones((3, 3)) hollow[1, 1] = 0 ignorable = np.logical_and(mahotas.erode(source_sink_cap > 0, np.ones((3,3))), (mahotas.convolve(source_sink_cap, hollow, mode='ignore') > combined_outflow)) np.logical_or(ignorable, np.logical_and(mahotas.erode(source_sink_cap < 0, np.ones((3,3))), (mahotas.convolve(source_sink_cap, hollow, mode='ignore') < - combined_inflow)), # careful with signs out=ignorable) print "Can ignore {0} of {1}".format(np.sum(ignorable), ignorable.size) ## Load the adjacency matrix for di, direction in enumerate(directions[:4]): dest_coords = shift_coords(coords, direction) source_coords, dest_coords = validate_and_broadcast(coords, dest_coords)
def addCell(self, eventTuple): if self.maskOn: if self.data.ndim == 2: self.aveData = self.data.copy() else: self.aveData = self.data.mean(axis=2) x, y = eventTuple localValue = self.currentMask[x, y] print str(self.mode) + " " + "x: " + str(x) + ", y: " + str(y) + ", mask val: " + str(localValue) # ensure mask is uint16 self.currentMask = self.currentMask.astype("uint16") sys.stdout.flush() ########## NORMAL MODE if self.mode is None: if localValue > 0 and localValue != self.currentMaskNumber: print "we are altering mask at at %d, %d" % (x, y) # copy the old mask newMask = self.currentMask.copy() # make a labeled image of the current mask labeledCurrentMask = mahotas.label(newMask)[0] roiNumber = labeledCurrentMask[x, y] # set that ROI to zero newMask[labeledCurrentMask == roiNumber] = self.currentMaskNumber newMask = newMask.astype("uint16") self.listOfMasks.append(newMask) self.currentMask = self.listOfMasks[-1] elif localValue > 0 and self.data.ndim == 3: # update info panel labeledCurrentMask = mahotas.label(self.currentMask.copy())[0] roiNumber = labeledCurrentMask[x, y] self.updateInfoPanel(ROI_number=roiNumber) elif localValue == 0: xmin = int(x - self.diskSize) xmax = int(x + self.diskSize) ymin = int(y - self.diskSize) ymax = int(y + self.diskSize) sub_region_image = self.aveData[xmin:xmax, ymin:ymax].copy() # threshold = mahotas.otsu(self.data[xmin:xmax, ymin:ymax].astype('uint16')) # do a gaussian_laplacian filter to find the edges and the center g_l = nd.gaussian_laplace( sub_region_image, 1 ) # second argument is a free parameter, std of gaussian g_l = mahotas.dilate(mahotas.erode(g_l >= 0)) g_l = mahotas.label(g_l)[0] center = g_l == g_l[g_l.shape[0] / 2, g_l.shape[0] / 2] # edges = mahotas.dilate(mahotas.dilate(mahotas.dilate(center))) - center newCell = np.zeros_like(self.currentMask) newCell[xmin:xmax, ymin:ymax] = center newCell = mahotas.dilate(newCell) if self.useNMF: modes, thresh_modes, fit_data, this_cell, is_cell, nmf_limits = self.doLocalNMF(x, y, newCell) for mode, mode_thresh, t, i in zip(modes, thresh_modes, this_cell, is_cell): # need to place it in the right place # have x and y mode_width, mode_height = mode_thresh.shape mode_thresh_fullsize = np.zeros_like(newCell) mode_thresh_fullsize[ nmf_limits[0] : nmf_limits[1], nmf_limits[2] : nmf_limits[3] ] = mode_thresh # need to add all modes belonging to this cell first, # then remove the ones nearby. if i: if t: valid_area = np.logical_and( mahotas.dilate( mahotas.dilate(mahotas.dilate(mahotas.dilate(newCell.astype(bool)))) ), mode_thresh_fullsize, ) newCell = np.logical_or(newCell.astype(bool), valid_area) else: newCell = np.logical_and( newCell.astype(bool), np.logical_not(mahotas.dilate(mode_thresh_fullsize)) ) newCell = mahotas.close_holes(newCell.astype(bool)) self.excludePixels(newCell, 2) newCell = newCell.astype(self.currentMask.dtype) # remove all pixels in and near current mask and filter for ROI size newCell[mahotas.dilate(self.currentMask > 0)] = 0 newCell = self.excludePixels(newCell, 10) newMask = (newCell * self.currentMaskNumber) + self.currentMask newMask = newMask.astype("uint16") self.listOfMasks.append(newMask.copy()) self.currentMask = newMask.copy() elif self.mode is "OGB": # build structuring elements se = pymorph.sebox() se2 = pymorph.sedisk(self.cellRadius, metric="city-block") seJunk = pymorph.sedisk(max(np.floor(self.cellRadius / 4.0), 1), metric="city-block") seExpand = pymorph.sedisk(self.diskSize, metric="city-block") # add a disk around selected point, non-overlapping with adjacent cells dilatedOrignal = mahotas.dilate(self.currentMask.astype(bool), Bc=se) safeUnselected = np.logical_not(dilatedOrignal) # tempMask is tempMask = np.zeros_like(self.currentMask, dtype=bool) tempMask[x, y] = True tempMask = mahotas.dilate(tempMask, Bc=se2) tempMask = np.logical_and(tempMask, safeUnselected) # calculate the area we should add to this disk based on % of a threshold cellMean = self.aveData[tempMask == 1.0].mean() allMeanBw = self.aveData >= (cellMean * float(self.contrastThreshold)) tempLabel = mahotas.label(np.logical_and(allMeanBw, safeUnselected).astype(np.uint16))[0] connMeanBw = tempLabel == tempLabel[x, y] connMeanBw = np.logical_and(np.logical_or(connMeanBw, tempMask), safeUnselected).astype(np.bool) # erode and then dilate to remove sharp bits and edges erodedMean = mahotas.erode(connMeanBw, Bc=seJunk) dilateMean = mahotas.dilate(erodedMean, Bc=seJunk) dilateMean = mahotas.dilate(dilateMean, Bc=seExpand) modes, thresh_modes, fit_data, this_cell, is_cell, limits = self.doLocaNMF(x, y) newCell = np.logical_and(dilateMean, safeUnselected) newMask = (newCell * self.currentMaskNumber) + self.currentMask newMask = newMask.astype("uint16") self.listOfMasks.append(newMask.copy()) self.currentMask = newMask.copy() ########## SQUARE MODE elif self.mode is "square": self.modeData.append((x, y)) if len(self.modeData) == 2: square_mask = np.zeros_like(self.currentMask) xstart = self.modeData[0][0] ystart = self.modeData[0][1] xend = self.modeData[1][0] yend = self.modeData[1][1] square_mask[xstart:xend, ystart:yend] = 1 # check if square_mask interfers with current mask, if so, abort if np.any(np.logical_and(square_mask, self.currentMask)): return None # add square_mask to mask newMask = (square_mask * self.currentMaskNumber) + self.currentMask newMask = newMask.astype("uint16") self.listOfMasks.append(newMask) self.currentMask = self.listOfMasks[-1] # clear current mode data self.clearModeData() ########## CIRCLE MODE elif self.mode is "circle": # make a strel and move it in place to make circle_mask if self.diskSize < 1: return None if self.diskSize is 1: se = np.ones((1, 1)) elif self.diskSize is 2: se = pymorph.secross(r=1) else: se = pymorph.sedisk(r=(self.diskSize - 1)) se_extent = int(se.shape[0] / 2) circle_mask = np.zeros_like(self.currentMask) circle_mask[x - se_extent : x + se_extent + 1, y - se_extent : y + se_extent + 1] = se * 1.0 circle_mask = circle_mask.astype(bool) # check if circle_mask interfers with current mask, if so, abort if np.any(np.logical_and(circle_mask, mahotas.dilate(self.currentMask.astype(bool)))): return None # add circle_mask to mask newMask = (circle_mask * self.currentMaskNumber) + self.currentMask newMask = newMask.astype("uint16") self.listOfMasks.append(newMask) self.currentMask = self.listOfMasks[-1] ########## POLY MODE elif self.mode is "poly": self.modeData.append((x, y)) sys.stdout.flush() self.makeNewMaskAndBackgroundImage()
def split_label(image, binary): bbox = mh.bbox(binary) sub_image = np.array(image[bbox[0]:bbox[1], bbox[2]:bbox[3]]) sub_binary = np.array(binary[bbox[0]:bbox[1], bbox[2]:bbox[3]]) sub_binary_border = mh.labeled.borders(sub_binary, Bc=mh.disk(3)) sub_binary = mh.erode(sub_binary.astype(np.bool)) for e in range(15): sub_binary = mh.erode(sub_binary) # # sub_binary = mh.erode(sub_binary) if sub_binary.shape[0] < 2 or sub_binary.shape[1] < 2: return np.zeros(binary.shape, dtype=np.bool), np.zeros(binary.shape, dtype=np.bool) # # smooth the image # sub_image = mh.gaussian_filter(sub_image, 3.5) grad_x = np.gradient(sub_image)[0] grad_y = np.gradient(sub_image)[1] grad = np.add(np.abs(grad_x), np.abs(grad_y)) grad -= grad.min() grad /= grad.max() grad *= 255 grad = grad.astype(np.uint8) coords = zip(*np.where(sub_binary == 1)) if len(coords) < 2: # print 'STRAAAAANGE' return np.zeros(binary.shape, dtype=np.bool), np.zeros(binary.shape, dtype=np.bool) seed1 = random.choice(coords) seed2 = random.choice(coords) seeds = np.zeros(sub_binary.shape, dtype=np.uint64) seeds[seed1] = 1 seeds[seed2] = 2 for i in range(10): seeds = mh.dilate(seeds) ws = mh.cwatershed(grad, seeds) ws[sub_binary == 0] = 0 # ws_relabeled = skimage.measure.label(ws.astype(np.uint8)) # ws_relabeled[sub_binary==0] = 0 # max_label = ws_relabeled.max() # plt.figure() # imshow(ws) binary_mask = Util.threshold(ws, ws.max()) border = mh.labeled.border(ws, ws.max(), ws.max() - 1, Bc=mh.disk(2)) border[sub_binary_border == 1] = 0 # remove any "real" border pixels # plt.figure() # imshow(binary_mask) # plt.figure() # imshow(border) # at this point, there can be multiple borders and labels labeled_border = skimage.measure.label(border) labeled_binary_mask = skimage.measure.label(binary_mask) # .. and we are going to select only the largest largest_border_label = Util.get_largest_label( labeled_border.astype(np.uint16), True) largest_binary_mask_label = Util.get_largest_label( labeled_binary_mask.astype(np.uint16), True) # .. filter out everything else border[labeled_border != largest_border_label] = 0 binary_mask[labeled_binary_mask != largest_binary_mask_label] = 0 large_label = np.zeros(binary.shape, dtype=np.bool) large_border = np.zeros(binary.shape, dtype=np.bool) large_label[bbox[0]:bbox[1], bbox[2]:bbox[3]] = binary_mask large_border[bbox[0]:bbox[1], bbox[2]:bbox[3]] = border return large_label, large_border
def show_overlay(image, segmentation, borders=np.zeros((1, 1)), labels=np.zeros((1, 1)), mask=None): b = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8) c = np.zeros((image.shape[0], image.shape[1], 4), dtype=np.uint8) b[:, :, 0] = image[:] b[:, :, 1] = image[:] b[:, :, 2] = image[:] b[:, :, 3] = 255 # from PIL import Image # def alpha_composite(src, dst): # ''' # Return the alpha composite of src and dst. # Parameters: # src -- PIL RGBA Image object # dst -- PIL RGBA Image object # The algorithm comes from http://en.wikipedia.org/wiki/Alpha_compositing # ''' # # http://stackoverflow.com/a/3375291/190597 # # http://stackoverflow.com/a/9166671/190597 # src = np.asarray(src) # dst = np.asarray(dst) # out = np.empty(src.shape, dtype = 'float') # alpha = np.index_exp[:, :, 3:] # rgb = np.index_exp[:, :, :3] # src_a = src[alpha]/255.0 # dst_a = dst[alpha]/255.0 # out[alpha] = src_a+dst_a*(1-src_a) # old_setting = np.seterr(invalid = 'ignore') # out[rgb] = (src[rgb]*src_a + dst[rgb]*dst_a*(1-src_a))/out[alpha] # np.seterr(**old_setting) # out[alpha] *= 255 # np.clip(out,0,255) # # astype('uint8') maps np.nan (and np.inf) to 0 # out = out.astype('uint8') # out = Image.fromarray(out, 'RGBA') # return out if not labels.shape[0] > 1: # c[segmentation==1] = (00,0,200,130) # c[segmentation==2] = (0,150,00,130) # c[mask!=0] = (0,0,200,130) c[segmentation == 1] = (0, 150, 0, 130) c[segmentation == 2] = (200, 0, 000, 130) c[segmentation == 3] = (100, 100, 00, 130) c[segmentation == 4] = (0, 0, 200, 130) if borders.shape[0] > 1: borders[mh.erode(mh.erode(mh.erode(segmentation))) == 0] = 0 c[borders == borders.max()] = (0, 255, 0, 255) c[borders == borders.max() - 1] = (255, 0, 0, 255) elif labels.shape[0] > 1: c[mask != 0] = (0, 0, 200, 130) c[labels == 1] = (0, 150, 0, 130) c[labels == 2] = (200, 0, 000, 130) c[labels == 3] = (100, 100, 00, 130) c[labels == 4] = (0, 0, 200, 130) return b, c
def PST(I, LPF=0.21, Phase_strength=0.48, Warp_strength=12.14, Threshold_min=-1, Threshold_max=0.0019, Morph_flag=1): # I: image # Gaussian Low Pass Filter # LPF = 0.21 # PST parameters: # Phase_strength = 0.48 # Warp_strength = 12.14 # Thresholding parameters (for post processing after the edge is computed) # Threshold_min = -1 # Threshold_max = 0.0019 # To compute analog edge, set Morph_flag = 0 and to compute digital edge, set Morph_flag = 1 # Morph_flag = 1 I_initial = I if (len(I.shape) == 3): I = I.mean(axis=2) L = 0.5 x = np.linspace(-L, L, I.shape[0]) y = np.linspace(-L, L, I.shape[1]) [X1, Y1] = (np.meshgrid(x, y)) X = X1.T Y = Y1.T [THETA, RHO] = cart2pol(X, Y) # Apply localization kernel to the original image to reduce noise Image_orig_f = ((np.fft.fft2(I))) expo = np.fft.fftshift( np.exp(-np.power((np.divide(RHO, math.sqrt((LPF**2) / np.log(2)))), 2))) Image_orig_filtered = np.real( np.fft.ifft2((np.multiply(Image_orig_f, expo)))) # Constructing the PST Kernel PST_Kernel_1 = np.multiply( np.dot(RHO, Warp_strength), np.arctan(np.dot(RHO, Warp_strength)) ) - 0.5 * np.log(1 + np.power(np.dot(RHO, Warp_strength), 2)) PST_Kernel = PST_Kernel_1 / np.max(PST_Kernel_1) * Phase_strength # Apply the PST Kernel temp = np.multiply(np.fft.fftshift(np.exp(-1j * PST_Kernel)), np.fft.fft2(Image_orig_filtered)) Image_orig_filtered_PST = np.fft.ifft2(temp) # Calculate phase of the transformed image PHI_features = np.angle(Image_orig_filtered_PST) if Morph_flag == 0: out = PHI_features return out else: # find image sharp transitions by thresholding the phase features = np.zeros((PHI_features.shape[0], PHI_features.shape[1])) features[PHI_features > Threshold_max] = 1 # Bi-threshold decision features[ PHI_features < Threshold_min] = 1 # as the output phase has both positive and negative values features[I < ( np.amax(I) / 20 )] = 0 # Removing edges in the very dark areas of the image (noise) # apply binary morphological operations to clean the transformed image out = features out = mh.thin(out, 1) out = mh.bwperim(out, 4) out = mh.thin(out, 1) out = mh.erode(out, np.ones((1, 1))) Overlay = mh.overlay(I, out) return (out, Overlay)
def _classify(path, name, frames, channels, target, choices, CellObject): gnp.free_reuse_cache() #GPU TO USE, WE HAVE 2, I PREFER IF YOU'RE USING GPU 0 #whole images take up a lot of memory so we need to coordinate this. # if you're not using the notebook or a script make sure to shutdown or restart the notebook # you can use nvidia-smi in terminal to see what process are running on the GPU gnp._useGPUid = 0 #protein localization categories localizationTerms = [ 'ACTIN', 'BUDNECK', 'BUDTIP', 'CELLPERIPHERY', 'CYTOPLASM', 'ENDOSOME', 'ER', 'GOLGI', 'MITOCHONDRIA', 'NUCLEARPERIPHERY', 'NUCLEI', 'NUCLEOLUS', 'PEROXISOME', 'SPINDLE', 'SPINDLEPOLE', 'VACUOLARMEMBRANE', 'VACUOLE' ] #normalization values (don't need to change) norm_vals = np.load( '/home/morphology/mpg4/OrenKraus/Data_Sets/Yeast_Protein_Localization/Yolanda_Chong/overal_mean_std_for_single_cell_crops_based_on_Huh.npz' ) #may change to better model (constatly training bgnumpy.track_memory_usage=Trueetter networks) model_path = '/home/okraus/mil_models_backup/mil_models/Yeast_Protein_Localization/Yeast_NAND_a_10_scratch_Dropout_v5_MAP_early_stopping_best_model.npz' #load model and set evaluation type (MIL convolves across whole images) #change size curImages, sizes = getImageData(path, frames, channels) curImages = normalize_by_constant_values(curImages, norm_vals['means'], norm_vals['stdevs']) sizeX = sizes[1] sizeY = sizes[0] nn = modelEvalFunctions.loadResizedModel(model_path, sizeY, sizeX) model = modelEvalFunctions.evaluateModel_MIL(nn, localizationTerms, outputLayer='loc') nn.ForwardProp({'X0': gnp.garray(curImages)}) # GET RATIOS OF CLASSES #values of prediction maps above pred_maps = nn._layers['MIL_pool'].Z[target - 1].as_numpy_array() #calculate relative activation of each map area = pred_maps.sum(1).sum(1) / pred_maps.sum() #calculate absolute area of each map (optional) area2 = pred_maps.sum(1).sum(1) / (pred_maps.shape[1] * pred_maps.shape[2]) #plot relative activations per class, use area or area2 area_lib = {} jacobian = getJacobian(nn, frames) plt.imshow(jacobian[target - 1, 0]) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + "_FULL0") save(loc) mahotas_segmentation = mahotas_clean_up_seg(jacobian, target - 1) plt.imshow(mahotas_segmentation) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + "_FULL1") save(loc) show_segmentation_boundaries(curImages, mahotas_segmentation, target - 1, sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + "_FULL2") save(loc) top5indices = np.argsort(area)[::-1][:5] del jacobian del mahotas_segmentation for i in range(len(localizationTerms)): if i in top5indices: area_lib[localizationTerms[i]] = area[i] jacobian_per_class = getJacobian_per_class(nn, i, frames) im2show = mahotas_clean_up_seg(jacobian_per_class, target - 1) overlay(curImages, im2show, target - 1, sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + "_" + localizationTerms[i]) save(loc) np.save(loc, im2show) continue if localizationTerms[i] not in choices: continue area_lib[localizationTerms[i]] = area[i] jacobian_per_class = getJacobian_per_class(nn, i, frames)[target - 1] im2show = np.int8( np.log(1 + jacobian_per_class[0]) > 0.1 + np.int8(np.log(1 + jacobian_per_class[1]) > 1)) > 0 im2show = mh.dilate( mh.dilate(mh.dilate(mh.erode(mh.erode(mh.erode(im2show > 0)))))) overlay(curImages, im2show, target - 1, sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + "_" + localizationTerms[i]) save(loc) np.save(loc, im2show) del nn del model gnp.free_reuse_cache() f = [['Class', 'Area']] for key in area_lib: f.append([str(key), area_lib[key]]) CellObject.activations = f CellObject.save() from openpyxl import Workbook wb = Workbook() ws = wb.active for arr in f: ws.append(arr) wb.save(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + '.xlsx') if CellObject.email != '': send_mail( 'Deep Cell Vision', 'Your image has been classified. Go to http://deepcellvision.com/results/' + CellObject.name + ' to see your results', '*****@*****.**', [CellObject.email], fail_silently=False) return
def predict(self, filenames_list): if not isinstance(filenames_list, list): raise Exception('Input list of files is not a list actually') rectangles = [] for filename in filenames_list: img_rgb = imread(filename) img_hsv = rgb2hsv(img_rgb) img_nrgb = color.normalize_RGBratio(img_rgb) img_lab = rgb2lab(img_rgb) img_h = img_hsv[:, :, 0] img_h[img_h < 0.4] = 1 - img_h[img_h < 0.4] # TODO add normalized RGB and opposite RGB channel = { 'r': img_rgb[:, :, 0], 'g': img_rgb[:, :, 1], 'b': img_rgb[:, :, 2], 'h': img_hsv[:, :, 0], 's': img_hsv[:, :, 1], 'v': img_hsv[:, :, 2], 'nr': img_nrgb[:, :, 0], 'ng': img_nrgb[:, :, 1], 'nb': img_nrgb[:, :, 2], 'l': img_lab[:, :, 0], 'a': img_lab[:, :, 1], 'b': img_lab[:, :, 2] } ### Selects best channel if self.combine: chan = 1. for x in self.candidates: chan *= color.scaler(channel[x]) else: chan = noise.least_noise([channel[x] for x in self.candidates]) (h, w) = chan.shape chan = color.scaler(chan) ### Executes further denoising, which helps later on #chan = noise.denoise(chan, mode=self.denoise_mode) chan = gaussian(chan, 5) ### Finding binary edges in the smoothed image #chan = rank.gradient(chan, disk(int(h*w/55756.))) chan = color.scaler(chan) if self.threshold == 'otsu': chan = chan > threshold_otsu(chan) elif self.threshold == 'gauss': n_dist = 3 if channel['s'].std() > 0.05 else 4 chan = chan > gauss.threshold_gauss(chan, n_dist) elif self.threshold == 'kmeans': n_dist = 3 if channel['s'].std() > 0.05 else 4 chan = chan > gauss.threshold_kmeans(chan, n_dist) ### Transforming contours into shapes at last by closing gaps # For sample3.jpg, these are the total time for each function: # skimage's dilation: 11 s # scipy's dilation: 7 s # mahota's dilation: 3 s chan = mahotas.dilate(chan, disk(h / 46)) chan = mahotas.erode(chan, disk(h / 46)) chan = ndi.binary_fill_holes(chan) ### Selects largest contour, supposedly to be the whale label_objects, nb_labels = ndi.label(chan) sizes = np.bincount(label_objects.ravel()) mask_sizes = sizes == np.sort(sizes)[-2] chan = mask_sizes[label_objects] ### Draw boundary rectangle rectangles.append(blob.bound_rect(chan)) return rectangles
def test_signed(): A = np.array([0,0,1,1,1,0,0,0], dtype=np.int32) B = np.array([0,1,0]) assert np.min(mahotas.erode(A,B)) == -1
def split_new(image, binary): ''' ''' bbox = mh.bbox(binary) sub_image = np.array(image[bbox[0]:bbox[1], bbox[2]:bbox[3]]) sub_binary = np.array(binary[bbox[0]:bbox[1], bbox[2]:bbox[3]]) sub_binary_border = mh.labeled.borders(sub_binary, Bc=mh.disk(3)) sub_binary = mh.erode(sub_binary.astype(np.bool)) for e in range(5): sub_binary = mh.erode(sub_binary) # sub_binary = mh.erode(sub_binary) if sub_image.shape[0] < 2 or sub_image.shape[1] < 2: return np.zeros(binary.shape, dtype=np.bool), np.zeros(binary.shape, dtype=np.bool) # # smooth the image # sub_image = mh.gaussian_filter(sub_image, 3.5) grad_x = np.gradient(sub_image)[0] grad_y = np.gradient(sub_image)[1] grad = np.add(np.abs(grad_x), np.abs(grad_y)) grad -= grad.min() grad /= grad.max() grad *= 255 grad = grad.astype(np.uint8) coords = zip(*np.where(sub_binary==1)) if len(coords) < 2: print 'STRAAAAANGE' return np.zeros(binary.shape, dtype=np.bool), np.zeros(binary.shape, dtype=np.bool) seed1 = random.choice(coords) seed2 = random.choice(coords) seeds = np.zeros(sub_binary.shape, dtype=np.uint64) seeds[seed1] = 1 seeds[seed2] = 2 for i in range(10): seeds = mh.dilate(seeds) ws = mh.cwatershed(grad, seeds) ws[sub_binary==0] = 0 # ws_relabeled = skimage.measure.label(ws.astype(np.uint8)) # ws_relabeled[sub_binary==0] = 0 # max_label = ws_relabeled.max() # plt.figure() # imshow(ws) binary_mask = Util.threshold(ws, ws.max()) border = mh.labeled.border(ws, ws.max(), ws.max()-1, Bc=mh.disk(2)) # border[sub_binary_border == 1] = 0 # remove any "real" border pixels # plt.figure() # imshow(binary_mask) # plt.figure() # imshow(border) large_label = np.zeros(binary.shape, dtype=np.bool) large_border = np.zeros(binary.shape, dtype=np.bool) large_label[bbox[0]:bbox[1], bbox[2]:bbox[3]] = binary_mask large_border[bbox[0]:bbox[1], bbox[2]:bbox[3]] = border return large_label, large_border
def test_erode_slice(): np.random.seed(30) for i in range(16): f = (np.random.random_sample((256,256))*255).astype(np.uint8) assert np.all(mahotas.erode(f[:3,:3]) == mahotas.erode(f[:3,:3].copy()))
def _classify(path, name, frames, channels, target, choices, CellObject): gnp.free_reuse_cache() #GPU TO USE, WE HAVE 2, I PREFER IF YOU'RE USING GPU 0 #whole images take up a lot of memory so we need to coordinate this. # if you're not using the notebook or a script make sure to shutdown or restart the notebook # you can use nvidia-smi in terminal to see what process are running on the GPU gnp._useGPUid = 0 #protein localization categories localizationTerms=['ACTIN', 'BUDNECK', 'BUDTIP', 'CELLPERIPHERY', 'CYTOPLASM', 'ENDOSOME', 'ER', 'GOLGI', 'MITOCHONDRIA', 'NUCLEARPERIPHERY', 'NUCLEI', 'NUCLEOLUS', 'PEROXISOME', 'SPINDLE', 'SPINDLEPOLE', 'VACUOLARMEMBRANE', 'VACUOLE'] #normalization values (don't need to change) norm_vals = np.load('/home/morphology/mpg4/OrenKraus/Data_Sets/Yeast_Protein_Localization/Yolanda_Chong/overal_mean_std_for_single_cell_crops_based_on_Huh.npz') #may change to better model (constatly training bgnumpy.track_memory_usage=Trueetter networks) model_path = '/home/okraus/mil_models_backup/mil_models/Yeast_Protein_Localization/Yeast_NAND_a_10_scratch_Dropout_v5_MAP_early_stopping_best_model.npz' #load model and set evaluation type (MIL convolves across whole images) #change size curImages, sizes = getImageData(path, frames, channels) curImages = normalize_by_constant_values(curImages,norm_vals['means'],norm_vals['stdevs']) sizeX=sizes[1] sizeY=sizes[0] nn = modelEvalFunctions.loadResizedModel(model_path,sizeY,sizeX) model = modelEvalFunctions.evaluateModel_MIL(nn,localizationTerms,outputLayer='loc') nn.ForwardProp({'X0':gnp.garray(curImages)}) # GET RATIOS OF CLASSES #values of prediction maps above pred_maps = nn._layers['MIL_pool'].Z[target-1].as_numpy_array() #calculate relative activation of each map area = pred_maps.sum(1).sum(1) / pred_maps.sum() #calculate absolute area of each map (optional) area2 = pred_maps.sum(1).sum(1) / (pred_maps.shape[1]*pred_maps.shape[2]) #plot relative activations per class, use area or area2 area_lib = {} jacobian = getJacobian(nn,frames) plt.imshow(jacobian[target-1,0]) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0]+"_FULL0") save(loc) mahotas_segmentation = mahotas_clean_up_seg(jacobian,target-1) plt.imshow(mahotas_segmentation) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0]+"_FULL1") save(loc) show_segmentation_boundaries(curImages,mahotas_segmentation,target-1,sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0]+"_FULL2") save(loc) top5indices = np.argsort(area)[::-1][:5] del jacobian del mahotas_segmentation for i in range(len(localizationTerms)): if i in top5indices: area_lib[localizationTerms[i]] = area[i] jacobian_per_class = getJacobian_per_class(nn,i,frames) im2show = mahotas_clean_up_seg(jacobian_per_class, target-1) overlay(curImages,im2show,target-1,sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0]+"_"+localizationTerms[i]) save(loc) np.save(loc, im2show) continue if localizationTerms[i] not in choices: continue area_lib[localizationTerms[i]] = area[i] jacobian_per_class = getJacobian_per_class(nn,i,frames)[target-1] im2show = np.int8(np.log(1+jacobian_per_class[0])>0.1+np.int8(np.log(1+jacobian_per_class[1])>1))>0 im2show = mh.dilate(mh.dilate(mh.dilate(mh.erode(mh.erode(mh.erode(im2show>0)))))) overlay(curImages,im2show,target-1,sizeX, sizeY) loc = str(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0]+"_"+localizationTerms[i]) save(loc) np.save(loc, im2show) del nn del model gnp.free_reuse_cache() f = [['Class', 'Area']] for key in area_lib: f.append([str(key), area_lib[key]]) CellObject.activations = f CellObject.save() from openpyxl import Workbook wb = Workbook() ws = wb.active for arr in f: ws.append(arr) wb.save(settings.MEDIA_ROOT + '/classes/' + name.split('.')[0] + '.xlsx') if CellObject.email != '': send_mail('Deep Cell Vision', 'Your image has been classified. Go to http://deepcellvision.com/results/' +CellObject.name + ' to see your results' , '*****@*****.**', [CellObject.email], fail_silently=False) return
def doLocalNMF(self, x, y, roi, n_comp=7, diskSizeMultiplier=3): # do NMF decomposition n = NMF(n_components=n_comp, tol=1e-1) xmin_nmf = max(0, int(x - self.diskSize * diskSizeMultiplier)) xmax_nmf = min(int(x + self.diskSize * diskSizeMultiplier), self.data.shape[0]) ymin_nmf = max(0, int(y - self.diskSize * diskSizeMultiplier)) ymax_nmf = min(int(y + self.diskSize * diskSizeMultiplier), self.data.shape[1]) xcenter_nmf = (xmax_nmf - xmin_nmf) / 2 ycenter_nmf = (ymax_nmf - ymin_nmf) / 2 reshaped_sub_region_data = self.data_white[xmin_nmf:xmax_nmf, ymin_nmf:ymax_nmf, :].reshape( xmax_nmf - xmin_nmf * ymax_nmf - ymin_nmf, self.data.shape[2] ) n.fit(reshaped_sub_region_data - reshaped_sub_region_data.min()) transformed_sub_region_data = n.transform(reshaped_sub_region_data - reshaped_sub_region_data.min()) modes = transformed_sub_region_data.reshape(xmax_nmf - xmin_nmf, ymax_nmf - ymin_nmf, n_comp).copy() modes = [m for m in np.rollaxis(modes, 2, 0)] params = [] this_cell = [] is_cell = [] thresh_modes = [] fit_data = [] for i, mode in enumerate(modes): # threshold mode uint16_mode = (mode / mode.max() * 2 ** 16).astype("uint16") uint16_mode = mahotas.dilate(mahotas.erode(uint16_mode)) uint16_mode = nd.gaussian_filter(uint16_mode, 1) thresh_mode = uint16_mode > mahotas.otsu(uint16_mode) # exclude all pixels less than 75% of typical size smallest_roi = 0.75 * self.diskSize * self.diskSize * np.pi thresh_mode = self.excludePixels(thresh_mode, smallest_roi).astype(int) thresh_modes.append(thresh_mode) # thresh_mode = (mode.astype('uint16') > mahotas.otsu(mode.astype('uint16'))).astype(int) # fit thresholded mode fit_parameters = self.fitgaussian(thresh_mode) fit_height, fit_xcenter, fit_ycenter, fit_xwidth, fit_ywidth = fit_parameters params.append(fit_parameters) # is cell-like? if 1 <= np.abs(fit_xwidth) <= 2 * self.diskSize and 1 <= np.abs(fit_ywidth) <= 2 * self.diskSize: if 0.02 <= thresh_mode.sum() / float(thresh_mode.size) <= 0.40: is_cell.append(True) else: is_cell.append(False) else: is_cell.append(False) # is this cell? if ( np.linalg.norm(np.array([xcenter_nmf, ycenter_nmf]) - np.array([fit_xcenter, fit_ycenter])) < self.diskSize * 1.5 ): this_cell.append(True) else: this_cell.append(False) fit_gaussian = self.gaussian(*fit_parameters) xcoords = np.mgrid[0 : xmax_nmf - xmin_nmf, 0 : ymax_nmf - ymin_nmf][0] ycoords = np.mgrid[0 : xmax_nmf - xmin_nmf, 0 : ymax_nmf - ymin_nmf][1] fit_data.append(fit_gaussian(xcoords, ycoords)) # print 'this cell', this_cell # print 'is cell', is_cell # print ' ' return ( modes, thresh_modes, fit_data, np.array(this_cell), np.array(is_cell), (xmin_nmf, xmax_nmf, ymin_nmf, ymax_nmf), )
#ilastik_filename = img_filename.replace('.png', '.png_processed.h5') #prob_file = h5py.File('Thousands_mito_em_s1152.png_processed (1).h5', 'r') #label_index = 1 #mito_prob = prob_file['/volume/prediction'][0,0,:,:,label_index] #prob_file.close() # load the results ilastik_filename = img_filename.replace('.png', '.png_processed.h5') prob_file = h5py.File('Thousands_mito_em_s1150.png_processed.h5', 'r') label_index = 1 mito_prob = prob_file['/volume/prediction'][0,0,:,:,label_index] prob_file.close() blur_img = scipy.ndimage.gaussian_filter(mito_prob, 13) mito_pred2 = blur_img<.85 mito_pred2 = mahotas.erode(mito_pred2, disc) prob_file2 = h5py.File('Thousands_mito_em_s1151.png_processed.h5', 'r') label_index = 1 mito_prob2 = prob_file2['/volume/prediction'][0,0,:,:,label_index] prob_file2.close() blur_img2 = scipy.ndimage.gaussian_filter(mito_prob2, 13) mito_pred22 = blur_img2<.85 mito_pred22 = mahotas.erode(mito_pred22, disc) prob_file3 = h5py.File('Thousands_mito_em_s1152.png_processed.h5', 'r') label_index = 1 mito_prob3 = prob_file3['/volume/prediction'][0,0,:,:,label_index] prob_file3.close()
input_vol = zeros((input_img.shape[0], input_img.shape[1], zd), dtype=uint8) for zoffset in range (zd): if zd == zrad: input_vol[:,:,zoffset] = input_img else: input_vol[:,:,zoffset] = normalize_image(mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi - zrad + zoffset))) #input_vol[:,:,zoffset] = mahotas.imread('D:\\dev\\datasets\\isbi\\train-input\\train-input_{0:04d}.tif'.format(imgi - zrad + zoffset)) blur_img = scipy.ndimage.gaussian_filter(input_img, gblur_sigma) boundaries = label_img==0; boundaries[0:-1,:] = np.logical_or(boundaries[0:-1,:], diff(label_img, axis=0)!=0); boundaries[:,0:-1] = np.logical_or(boundaries[:,0:-1], diff(label_img, axis=1)!=0); # erode to be sure we include at least one membrane inside = mahotas.erode(boundaries == 0, shrink_disc) #display = input_img.copy() #display[np.nonzero(inside)] = 0 #figure(figsize=(20,20)) #imshow(display, cmap=cm.gray) seeds = label_img.copy() seeds[np.nonzero(inside==0)] = 0 grow = mahotas.cwatershed(255-blur_img, seeds) membrane = np.zeros(input_img.shape, dtype=uint8) membrane[0:-1,:] = diff(grow, axis=0) != 0; membrane[:,0:-1] = np.logical_or(membrane[:,0:-1], diff(grow, axis=1) != 0); #display[np.nonzero(membrane)] = 2
# Values for the erode/dilate functions radius = 1.5 y,x = np.ogrid[-radius:radius+1, -radius:radius+1] disc = x*x + y*y <= radius*radius ## # Erode makes everything smaller and removes small objects mito_pred2 = mahotas.erode(mito_pred2, disc) ## # Dilate makes everything bigger and removes small holes # mito_pred2 = mahotas.dilate(mito_pred2, disc) # Predictions pylab.imshow(mito_pred2) pylab.gray() pylab.show() # Display the target output pylab.imshow(mito_img) pylab.gray() pylab.show() # Measure the result true_positives_h5 = np.sum(np.logical_and(mito_pred2 > 0, mito_img > 0))