def combine_sensor_sources(self): other = segmentation.expand_labels(self.other_robot_image(), distance=2.0 * self.safety_dist / self.raw_map.info.resolution) image = segmentation.expand_labels(self.map_image, distance=self.safety_dist / self.raw_map.info.resolution) scan_image = segmentation.expand_labels(self.scan_image(), distance=self.safety_dist / self.raw_map.info.resolution) image = gaussian_filter(image + other + scan_image, 1) image[image > 100] = 100 image[image <= 0] = 0 return image
def area_extract(array, coordinates, otsu_bins=20, expand_distance=5): h_upper, w_upper = array.shape labeled_area = np.zeros_like(array) # Extracting the available area from the given boxes for lbl, (x, y, w, h, _) in enumerate(coordinates): # Decide weather all the area inside the box is available # Notice: the 'lbl' used here is different from the 'label' used in object annotating # during the object recognition processing, but only for the pixel labeling. xmin, xmax = np.clip(x - w / 2, 0, None).astype(int), np.clip(x + w / 2, 0, w_upper).astype(int) ymin, ymax = np.clip(y - h / 2, 0, None).astype(int), np.clip(y + h / 2, 0, h_upper).astype(int) # Extract available areas from boxes strict_area = array[ymin:ymax, xmin:xmax] otsu_thres = threshold_otsu(strict_area, nbins=otsu_bins) # Labeling available areas y_index, x_index = (strict_area >= otsu_thres).nonzero() labeled_area[y_index + ymin, x_index + xmin] = lbl + 1 # Expanding the extracted areas expanded_labels = expand_labels(labeled_area, distance=expand_distance) # Re-thresholding of the expanded areas using Otsu-threshold for lbl in range(1, np.max(expanded_labels).astype(int)): lbl_area = (array - np.min(array)) * (expanded_labels == lbl) regions = np.digitize(lbl_area, bins=threshold_multiotsu(lbl_area)) expanded_labels[regions == 1] = -lbl expanded_labels[regions == 2] = lbl return expanded_labels
def convert(img, mask, enlarge=0): if enlarge != 0: mask = expand_labels(mask, enlarge).astype('uint8') m = label2rgb(mask, bg_label=0) m = segmentation.mark_boundaries(m, mask.astype('uint8')) i = 0.5 * np.array(img) / 255. ind = m != 0 i[ind] = m[ind] return i
def extract_ring(labels): #safety margin: 3 #expansion: 7 labels_expanded_3 = expand_labels(labels, distance=3) labels_expanded_13 = expand_labels(labels, distance=10) labels_ring = np.zeros(np.shape(labels)) labels_ring_stack = [] for label in range( 1, np.max(labels) + 1 ): #iterate through each nucleus, starting from index 1 as label 0 is background mask_nucleus_expanded_3 = labels_expanded_3 == label #boolean mask where nucleus is mask_nucleus_expanded_13 = labels_expanded_13 == label #boolean mask where extended region is mask_ring = np.logical_and( mask_nucleus_expanded_13, ~mask_nucleus_expanded_3) #the ring is where the nucleus is NOT labels_ring[mask_ring] = label return labels_ring.astype(int)
def __init__(self, frame=None, MD=None, pth=None, Pos=None, acq=None, Zindex=0, register=True, periring=False, periringsize=5, NucChannel='DeepBlue', cytoplasm=False, CytoChannel=None, zernike=False, segment_type='watershed', **kwargs): if pth is None and MD is not None: pth = MD.base_pth if any([Pos is None, pth is None, frame is None]): raise ValueError('Please input path, position, and frame') self.pth = pth if MD is None: MD = Metadata(pth) if MD().empty: raise AssertionError('No metadata found in supplied path') if Pos not in MD.posnames: raise AssertionError('Position does not exist in dataset') self.posname = Pos if frame not in MD.frames: raise AssertionError('Frame does not exist in dataset') self.frame = frame self._seg_fun = segmentation.segtype_to_segfun(segment_type) self.channels = MD.unique('Channel', Position=Pos, frame=frame) self.acq = MD.unique('acq', Position=Pos, frame=frame) self.XY = MD().at[MD.unique('index', Position=Pos, frame=frame)[0], 'XY'] self._pixelsize = MD()['PixelSize'][0] NucChannel = NucChannel if isinstance(NucChannel, list) else [NucChannel] CytoChannel = CytoChannel if isinstance(CytoChannel, list) else [CytoChannel] Data = {} for ch in self.channels: Data[ch] = np.squeeze( MD.stkread(Channel=ch, frame=frame, Position=Pos, Zindex=Zindex, verbose=False)) assert Data[ ch].ndim == 2, "channel/position/frame/Zindex did not return unique result" self.imagedims = np.shape(Data[NucChannel[0]]) nargs = self._seg_fun.__code__.co_argcount args = [self._seg_fun.__code__.co_varnames[i] for i in range(2, nargs)] defaults = list(self._seg_fun.__defaults__) input_dict = {args[i]: defaults[i] for i in range(0, nargs - 2)} input_dict = { **input_dict, **kwargs, 'nucchannel': NucChannel, 'cytochannel': CytoChannel } self._seg_params = input_dict try: imgCyto = np.sum([Data[ch] for ch in CytoChannel], axis=0) except: imgCyto = '' imgNuc = np.sum([Data[ch] for ch in NucChannel], axis=0) L = self._seg_fun(img=imgNuc, imgCyto=imgCyto, **kwargs) props = measure.regionprops(L, intensity_image=Data[NucChannel[0]]) props_df = regionprops_to_df(props) props_df.drop(['mean_intensity', 'max_intensity', 'min_intensity'], axis=1, inplace=True) if zernike: L1 = [ list(Zernike.coeff_fast(stats.zscore(r.intensity_image)))[1] for r in props ] K1 = [ list(Zernike.coeff_fast(stats.zscore(r.intensity_image)))[2] for r in props ] props_df['L'] = L1 props_df['K'] = K1 for ch in self.channels: props_channel = measure.regionprops(L, intensity_image=Data[ch]) mean_channel = [r.mean_intensity for r in props_channel] max_channel = [r.max_intensity for r in props_channel] min_channel = [r.min_intensity for r in props_channel] Ninty_channel = [ np.percentile(r.intensity_image, 90) for r in props_channel ] median_channel = [ np.median(r.intensity_image) for r in props_channel ] props_df['mean_' + ch] = mean_channel props_df['max_' + ch] = max_channel props_df['min_' + ch] = min_channel props_df['90th_' + ch] = Ninty_channel props_df['median_' + ch] = median_channel if zernike: c1 = [ list(Zernike.coeff_fast(stats.zscore( r.intensity_image)))[0] for r in props_channel ] props_df['zernike_' + ch] = c1 if periring: #from skimage.morphology import disk, dilation from skimage.segmentation import expand_labels Lperi = expand_labels(L, distance=periringsize) - L for ch in self.channels: props_channel = measure.regionprops(Lperi, intensity_image=Data[ch]) mean_channel = [r.mean_intensity for r in props_channel] max_channel = [r.max_intensity for r in props_channel] min_channel = [r.min_intensity for r in props_channel] Ninty_channel = [ np.percentile(r.intensity_image, 90) for r in props_channel ] median_channel = [ np.median(r.intensity_image) for r in props_channel ] props_df['mean_' + ch + '_periring'] = mean_channel props_df['max_' + ch + '_periring'] = max_channel props_df['min_' + ch + '_periring'] = min_channel props_df['90th_' + ch + '_periring'] = Ninty_channel props_df['median_' + ch + '_periring'] = median_channel if cytoplasm: pass if register: ind = MD.unique('index', Position=Pos, frame=frame, Channel=NucChannel) Tforms = MD().at[ind[0], 'driftTform'] if Tforms is not None: for i in np.arange(props_df.index.size): props_df.at[i, 'centroid'] = tuple( np.add(props_df.at[i, 'centroid'], Tforms[6:8])) props_df.at[i, 'weighted_centroid'] = tuple( np.add(props_df.at[i, 'weighted_centroid'], Tforms[6:8])) #print('\nRegistered centroids') else: print('No drift correction found') self.regionprops = props_df
preds[idx] = count lbls[idx] = label_arr[idx] # Analysis of result num_img = num_img + 1 dist = np.abs(np.subtract(count, label_arr[idx])) dist_sum = dist_sum + dist dev = 1 - np.divide(count, label_arr[idx]) dev_sum = dev_sum + np.abs(dev) if round(count)==round(label_arr[idx]): cor_sum = cor_sum + 1 # Show the segmentation if show_processed_image: expanded = expand_labels(seg1, distance=8) fig, axes = plt.subplots(nrows=1, ncols=2, figsize=(9, 5), sharex=True, sharey=True) color1 = label2rgb(seg1, image=thresh_img, bg_label=0) axes[0].imshow(color1) axes[0].set_title('Sobel+Watershed') color2 = label2rgb(expanded, image=thresh_img, bg_label=0) axes[1].imshow(color2) axes[1].set_title('Expanded labels') for a in axes: a.axis('off') fig.tight_layout() plt.show()