def plot_cup(img, bbox): img = FC.resize_image(img) lab_img = FC.apply_color_model(img) labels1, labels2 = FC.segment_image(img, lab_img) cup_labels = FC.select_cup_segment(labels2, bbox) # out3 = skic.label2rgb(cup_labels, img, kind='avg') out3 = cup_labels neg_segments = FC.select_negative_segments(cup_labels, labels2) out4 = skic.label2rgb(neg_segments, img, kind='avg') plt.figure() plt.imshow(img) plt.title("Image") plt.figure() plt.imshow(skic.label2rgb(labels1, img, kind='avg')) plt.title("Plain SLIC result") plt.figure() plt.imshow(skic.label2rgb(labels2, img, kind='avg')) plt.title("Merged segments") plt.figure() plt.imshow(out3) if np.max(out3.ravel()) == 1: plt.title("cup only") else: plt.title("rejected cup") plt.figure() plt.imshow(out4) plt.title("Negative segments")
def logo_iterate(labels, image, fns=d + 'logo-%03i.png'): height, width = labels.shape background = (labels == 0) foreground = ~background counter = it.count() # part one: just foreground/background colorcombos = it.permutations(colors, 2) lab2 = np.zeros(labels.shape, np.uint8) lab2[foreground] = 1 for cs in colorcombos: img = color.label2rgb(lab2, image, colors=cs) io.imsave(fns % next(counter), img) # part two: background split splits = np.arange(500, 1600, 100).astype(int) colorcombos = it.permutations(colors, 3) for s, cs in it.product(splits, colorcombos): im, lab = _split_img_horizontal(image, lab2, background, s) img = color.label2rgb(lab, im, colors=cs) io.imsave(fns % next(counter), img) # part three: foreground split colorcombos = it.permutations(colors, 3) for cs in colorcombos: img = color.label2rgb(labels, image, colors=cs) io.imsave(fns % next(counter), img) # part four: both split colorcombos = it.permutations(colors, 4) for s, cs in it.product(splits, colorcombos): im, lab = _split_img_horizontal(image, labels, background, s) img = color.label2rgb(lab, im, colors=cs) io.imsave(fns % next(counter), img)
def normcut_segmentations(img): #labels1 = segmentation.slic(img, compactness=3, n_segments=50) labels1 = segmentation.slic(img,compactness=3,n_segments=20) out1 = color.label2rgb(labels1, img)#, kind='avg') #return labels1 g = graph.rag_mean_color(img, labels1, mode='similarity') labels2 = graph.cut_normalized(labels1, g) out2 = color.label2rgb(labels2, img,image_alpha=0.2)#, kind='avg') return (labels1,labels2)
def number_nucleus(image): elevation_map = sobel(image) markers = np.zeros_like(image) markers[image < 250] = 1 markers[image > 2000] = 2 segmentation = watershed(elevation_map, markers) label_img = label(segmentation) prop = regionprops(label_img) width, height = plt.rcParams['figure.figsize'] plt.rcParams['image.cmap'] = 'gray' image_label_overlay = label2rgb(label_img, image=image) fig, (ax1, ax2) = plt.subplots(ncols=2, nrows=1, figsize=(15, 8)) ax1.imshow(image_label_overlay) ax2.imshow(image, cmap=plt.cm.gray, interpolation='nearest') # create list of region with are < 1000 image_labeled = [region for region in prop if region.area > 5000] return len(image_labeled)
def show_all(fname,images,titles,numsegs=1): num_images = len(images) num_titles = len(titles) titles += ['']*(num_images-num_titles) fig, axes = plt.subplots(ncols=num_images, figsize=(9, 2.5)) im = images[0] for i in range(numsegs): axes[i].imshow(images[i]) axes[i].set_title(titles[i]) print titles[i] for i in range(numsegs,num_images) : j=i #numsegs+i segimg = label2rgb(images[j], image=im, image_alpha=0.5) axes[j].imshow(segimg, interpolation='nearest') axes[j].set_title(titles[j]) print titles[j] for ax in axes: ax.axis('off') fig.subplots_adjust(hspace=0.01, wspace=0.01, top=1, bottom=0, left=0, right=1) plt.show() #plt.savefig(fname+"_seg.jpg") print fname+"_seg.jpg" return True
def build_region(self): start_time = time.time(); labels = segmentation.slic(self.q_frame,self.num_superpixels, self.compactness,convert2lab=True,multichannel=True) _num_superpixels = np.max(labels) + 1; self.s_frame = color.label2rgb(labels,self.q_frame, kind='avg') self.freq = np.array([np.sum(labels==label) for label in range(_num_superpixels)]) self.mean = np.array([region['centroid'] for region in regionprops(labels+1)],dtype=np.int16); self.color_data = np.array([np.sum(self.q_frame[np.where(labels==label)],0) for label in range(_num_superpixels)]) _inv_freq = 1/(self.freq+0.0000001); self.color_data = self.color_data*_inv_freq[:,None] gray_frame = cv2.cvtColor(self.q_frame,cv2.COLOR_RGB2GRAY) def texture_prop(label,patch_size = 5): _mean_min = self.mean[label]-patch_size; _mean_max = self.mean[label]+patch_size; glcm = greycomatrix(gray_frame[_mean_min[0]:_mean_max[0],_mean_min[1]:_mean_max[1]], [3], [0], 256, symmetric=True, normed=True) _dis = greycoprops(glcm, 'dissimilarity')[0, 0]; _cor = greycoprops(glcm, 'correlation')[0, 0]; return (_dis,_cor); self.texture_data = np.array([texture_prop(label) for label in range(_num_superpixels)]) self.data = np.hstack((self.color_data,self.texture_data)) cv2.imwrite('outs.png',self.s_frame); print "Build region (preprocess) : ",time.time()-start_time return (labels,_num_superpixels);
def roofRegion(edge): """Estimate region based on edges of roofRegion """ # apply threshold thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay) for region in regionprops(label_image): # skip small images if region.area < 100: continue # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) ax.add_patch(rect) plt.show()
def __build_region__(self,q_frame): start_time = time.time(); regions = segmentation.slic(q_frame,self.props.num_superpixels, self.props.compactness, convert2lab=self.props.useLAB,multichannel=True) num_regions = len(np.unique(regions)); s_frame = color.label2rgb(regions,q_frame, kind='avg') mean = np.array([region['centroid'] for region in regionprops(regions+1)]) freq = np.array([np.sum(regions==region) for region in range(num_regions)]) region_props = (mean,freq); if self.props.useColor: color_data = self.__extract_color__(q_frame,regions,region_props); if self.props.useTexture: texture_data = self.__extract_texture__(q_frame,regions,region_props); if self.props.useTexture and self.props.useColor: data = np.hstack((color_data,texture_data)) elif self.props.useTexture: data = texture_data else : data = color_data if self.props.doProfile: cv2.imwrite(self.PROFILE_PATH+self.method+'_s.png',s_frame); print "Build region (preprocess) : ",time.time()-start_time return (num_regions,regions,region_props,data);
def get_cells(image): ''' Get cellls from the polygon. ''' new_image=np.ones([3,image.shape[0],image.shape[1]],dtype=float) # apply threshold thresh = threshold_otsu(image) bw=image # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) #skimage.measure.label #find_contours borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) #extract the regions and get a polygon per region polygons=[] for i,region in enumerate(regionprops(label_image)): # skip small images if region.area < 100: continue #polygons.append(matplotlib.path.Path(region.coords)) print (region.coords.shape) a=np.zeros(region.coords.shape) a[:,0]=region.coords[:,1] a[:,1]=region.coords[:,0] polygons.append(a) return polygons
def _apply(self, img_msg, label_msg): bridge = cv_bridge.CvBridge() img = bridge.imgmsg_to_cv2(img_msg) label_img = bridge.imgmsg_to_cv2(label_msg) # publish only valid label region applied = img.copy() applied[label_img == 0] = 0 applied_msg = bridge.cv2_to_imgmsg(applied, encoding=img_msg.encoding) applied_msg.header = img_msg.header self.pub_img.publish(applied_msg) # publish visualized label if img_msg.encoding in {'16UC1', '32SC1'}: # do dynamic scaling to make it look nicely min_value, max_value = img.min(), img.max() img = (img - min_value) / (max_value - min_value) * 255 img = gray2rgb(img) label_viz_img = label2rgb(label_img, img, bg_label=0) label_viz_img = mark_boundaries(label_viz_img, label_img, (1, 0, 0)) label_viz_img = (label_viz_img * 255).astype(np.uint8) label_viz_msg = bridge.cv2_to_imgmsg(label_viz_img, encoding='rgb8') label_viz_msg.header = img_msg.header self.pub_label_viz.publish(label_viz_msg) # publish mask if self._publish_mask: bg_mask = (label_img == 0) fg_mask = ~bg_mask bg_mask = (bg_mask * 255).astype(np.uint8) fg_mask = (fg_mask * 255).astype(np.uint8) fg_mask_msg = bridge.cv2_to_imgmsg(fg_mask, encoding='mono8') fg_mask_msg.header = img_msg.header bg_mask_msg = bridge.cv2_to_imgmsg(bg_mask, encoding='mono8') bg_mask_msg.header = img_msg.header self.pub_fg_mask.publish(fg_mask_msg) self.pub_bg_mask.publish(bg_mask_msg)
def detectOpticDisc(image): labels = segmentation.slic(image, n_segments = 70) out = color.label2rgb(labels, image, kind='avg') gray = cv2.cvtColor(out, cv2.COLOR_RGB2GRAY) minimum = np.max(gray) image[gray==minimum] = 255 return image
def plot_preprocessed_image(self): """ plots pre-processed image. The plotted image is the same as obtained at the end of the get_text_candidates method. """ image = restoration.denoise_tv_chambolle(self.image, weight=0.1) thresh = threshold_otsu(image) bw = closing(image > thresh, square(2)) cleared = bw.copy() label_image = measure.label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(12, 12)) ax.imshow(image_label_overlay) for region in regionprops(label_image): if region.area < 10: continue minr, minc, maxr, maxc = region.bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) ax.add_patch(rect) plt.show()
def SuperPixel(self, Image): segments = slic(Image, n_segments=20, sigma=5) # show the output of SLIC segments = segments + 1 # So that no labelled region is 0 and ignored by regionprops label_rgb = color.label2rgb(segments, Image, kind='avg') return label_rgb
def detectOpticDisc(image): kernel = octagon(10, 10) thresh = threshold_otsu(image[:,:,1]) binary = image > thresh print binary.dtype luminance = convertToHLS(image)[:,:,2] t = threshold_otsu(luminance) t = erosion(luminance, kernel) labels = segmentation.slic(image[:,:,1], n_segments = 3) out = color.label2rgb(labels, image[:,:,1], kind='avg') skio.imshow(out) x, y = computeCentroid(t) print x, y rows, cols, _ = image.shape p1 = closing(image[:,:,1],kernel) p2 = opening(p1, kernel) p3 = reconstruction(p2, p1, 'dilation') p3 = p3.astype(np.uint8) #g = dilation(p3, kernel)-erosion(p3, kernel) #g = rank.gradient(p3, disk(5)) g = cv2.morphologyEx(p3, cv2.MORPH_GRADIENT, kernel) #markers = rank.gradient(p3, disk(5)) < 10 markers = drawCircle(rows, cols, x, y, 85) #markers = ndimage.label(markers)[0] #skio.imshow(markers) g = g.astype(np.uint8) #g = cv2.cvtColor(g, cv2.COLOR_GRAY2RGB) w = watershed(g, markers) print np.max(w), np.min(w) w = w.astype(np.uint8) #skio.imshow(w) return w
def view_dataset(self): for datum in self.val: rgb, label = self.load_datum(datum, train=False) label_viz = label2rgb(label, rgb, bg_label=-1) label_viz[label == 0] = 0 plt.imshow(label_viz) plt.show()
def get_cells(image): ''' Get cellls from the polygon. ''' # apply threshold thresh = threshold_otsu(image) binary = image > thresh bw=binary plt.imshow(bw) # Remove connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = skimage.measure.label(cleared) #find_contours borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) #extract the regions and get a polygon per region polygons=[] for i,region in enumerate(regionprops(label_image)): # skip small images if region.area < 100: continue a=np.zeros([len(region.coords),2]) #a=np.zeros( plt.imshow(bw) for i in range(len(region.coords)): a[i,:]=[region.coords[i][0],region.coords[i][1]] polygons.append(a) return polygons
def getRegions(): """Geocode address and retreive image centered around lat/long""" address = request.args.get('address') results = Geocoder.geocode(address) lat, lng = results[0].coordinates zip_code = results[0].postal_code map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1' request_url = map_url.format(lat, lng) req = urllib.urlopen(request_url) img = io.imread(req.geturl(),flatten=True) labels, numobjects = ndimage.label(img) image = filter.canny(img, sigma=3) thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay)
def _callback(self, img_msg, mask_msg): bridge = cv_bridge.CvBridge() bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8') mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8') if mask_img.size < 1: logwarn_throttle(10, 'Too small sized image') return logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<') if mask_img.ndim == 3 and mask_img.shape[2] == 1: mask_img = mask_img.reshape(mask_img.shape[:2]) if mask_img.shape != bgr_img.shape[:2]: jsk_logwarn('Size of mask and color image is different.' 'Resizing.. mask {0} to {1}' .format(mask_img.shape, bgr_img.shape[:2])) mask_img = resize(mask_img, bgr_img.shape[:2], preserve_range=True).astype(np.uint8) blob = bgr_img - self.mean_bgr blob = blob.transpose((2, 0, 1)) x_data = np.array([blob], dtype=np.float32) if self.gpu != -1: x_data = cuda.to_gpu(x_data, device=self.gpu) x = Variable(x_data, volatile=True) self.model(x) pred_datum = cuda.to_cpu(self.model.score.data[0]) candidate_labels = [self.target_names.index(name) for name in self.tote_contents] label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0) label_pred = np.zeros_like(label_pred_in_candidates) for idx, label_val in enumerate(candidate_labels): label_pred[label_pred_in_candidates == idx] = label_val label_pred[mask_img == 0] = 0 # set bg_label label_viz = label2rgb(label_pred, bgr_img, bg_label=0) label_viz = (label_viz * 255).astype(np.uint8) debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8') debug_msg.header = img_msg.header self.pub_debug.publish(debug_msg) output_mask = np.ones(mask_img.shape, dtype=np.uint8) output_mask *= 255 for label_val, label_name in enumerate(self.target_names): if label_name in self.label_names: assert label_name == 'kleenex_paper_towels' assert label_val == 21 label_mask = ((label_pred == label_val) * 255).astype(np.uint8) contours, hierachy = cv2.findContours( label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(output_mask, contours, -1, 255, -1) # output_mask[label_pred == label_val] = False # output_mask = output_mask.astype(np.uint8) # output_mask[output_mask == 1] = 255 output_mask[mask_img == 0] = 0 output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8') output_mask_msg.header = img_msg.header self.pub.publish(output_mask_msg) logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
def validate(self): """Validate training with data.""" log_templ = ('{i_iter}: type={type}, loss={loss}, acc={acc}, ' 'acc_cls={acc_cls}, iu={iu}, fwavacc={fwavacc}') type = 'val' self.model.train = False N_data = len(self.dataset.val) result = defaultdict(list) desc = '{0}: validating'.format(self.i_iter) for indice in tqdm.tqdm(xrange(N_data), ncols=80, desc=desc): loss, acc, acc_cls, iu, fwavacc = self._iterate_once( type=type, indices=[indice]) result['loss'].append(loss) result['acc'].append(acc) result['acc_cls'].append(acc_cls) result['iu'].append(iu) result['fwavacc'].append(fwavacc) # visualize predicted label blob = cuda.to_cpu(self.model.x.data)[0] label_true = cuda.to_cpu(self.model.t.data)[0] img = self.dataset.datum_to_img(blob) label_true_viz = label2rgb(label_true, img, bg_label=0) label_true_viz[label_true == 0] = 0 label_true_viz = (label_true_viz * 255).astype(np.uint8) label = cuda.to_cpu(self.model.score.data)[0].argmax(axis=0) label_viz = label2rgb(label, img, bg_label=0) label_viz[label == 0] = 0 label_viz = (label_viz * 255).astype(np.uint8) hline = np.zeros((5, img.shape[1], 3), dtype=np.uint8) hline.fill(255) imsave( osp.join(self.log_dir, 'visualize_{0}.jpg'.format(self.i_iter)), np.vstack([img, hline, label_true_viz, hline, label_viz, hline])) log = dict( i_iter=self.i_iter, type=type, loss=np.array(result['loss']).mean(), acc=np.array(result['acc']).mean(), acc_cls=np.array(result['acc_cls']).mean(), iu=np.array(result['iu']).mean(), fwavacc=np.array(result['fwavacc']).mean(), ) print(log_templ.format(**log)) self.logfile.write( '{i_iter},{type},{loss},{acc},{acc_cls},{iu},{fwavacc}\n' .format(**log))
def forward(self, bottom, top): # bottom[0]: images N*3*W*H # bottom[1]: prediction N*1*W*H n = bottom[0].data.shape[0] for i in range(n): labels = segmentation.slic( bottom[0].data[i].transpose((1,2,0)), compactness=self.compactness, n_segments=self.n_segs) top[0].data[i, ...] = color.label2rgb(labels, bottom[1].data[i].transpose((1,2,0)), kind='avg').transpose((2,0,1)) #.reshape(top[0].data[i].shape)
def fig_label_segments(fig, image, segments, label): labels, _ = ndimage.label(segments) image_label_overlay=label2rgb(labels, image=image) image_label_overlay=mark_boundaries(image, segments) fig.set_title(label) fig.axis('off') fig.imshow(image_label_overlay) print ("%s number of segments: %d" % (label, len(np.unique(segments))))
def agglomerativeClusteringFeatures(image): connectivity = grid_to_graph(*image[:,:,2].shape) X = np.reshape(image[:,:,2], (-1,1)) ward = AgglomerativeClustering(n_clusters=150, linkage = 'ward', connectivity = connectivity).fit(X) labels = np.reshape(ward.labels_, image[:,:,2].shape) averageIntensity = color.label2rgb(labels, image[:,:,2], kind = 'avg') #areas = getAreas(labels) return averageIntensity
def build_region(self): start_time = time.time() labels = segmentation.slic( self.q_cur_frame, self.num_superpixels, self.compactness, convert2lab=True, multichannel=True ) self.s_frame = color.label2rgb(labels, self.q_cur_frame, kind="avg") cv2.imwrite("outs.png", self.s_frame) print "Slic time : ", time.time() - start_time return labels
def segment_image(image, n_segments=400, compactness=30, sigma=5, verbose=True): if verbose: print("segmenting image") print("n_segments=%d, compactness=%d, sigma=%d" % (n_segments, compactness, sigma)) image = image.astype("float64") labels = slic(image, compactness=compactness, n_segments=n_segments, sigma=sigma) segmented = color.label2rgb(labels, image, kind="avg") return segmented.astype("uint8")
def spectral_cluster(filename, compactness_val=30, n=6): img = misc.imread(filename) labels1 = segmentation.slic(img, compactness=compactness_val, n_segments=n) out1 = color.label2rgb(labels1, img, kind='overlay', colors=['red','green','blue','cyan','magenta','yellow']) fig, ax = plt.subplots() ax.imshow(out1, interpolation='nearest') ax.set_title("Compactness: {} | Segments: {}".format(compactness_val, n)) plt.show()
def createImage(thres): # do simple filter based on color value filtered_image = np.zeros_like(image) # set up all-zero image filtered_image[image > thres] = 1 # filtered values set to 1 # label features and convert to rgb image labeled_particles, num_features = ndi.label(filtered_image) image_label_overlay = label2rgb(labeled_particles, image=image, bg_label=0) return image_label_overlay
def getArea(address): """Geocode address and retreive image centered around lat/long""" address = address results = Geocoder.geocode(address) lat, lng = results[0].coordinates zip_code = results[0].postal_code map_url = 'https://maps.googleapis.com/maps/api/staticmap?center={0},{1}&size=640x640&zoom=19&sensor=false&maptype=roadmap&&style=visibility:simplified|gamma:0.1' request_url = map_url.format(lat, lng) req = urllib.urlopen(request_url) img = io.imread(req.geturl(),flatten=True) labels, numobjects = ndimage.label(img) image = filter.canny(img, sigma=3) thresh = threshold_otsu(image) bw = closing(image > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 image_label_overlay = label2rgb(label_image, image=image) fig, ax = plt.subplots(ncols=1, nrows=1, figsize=(6, 6)) ax.imshow(image_label_overlay) dist = [] rp = regionprops(label_image) rp = [x for x in rp if 100 < x.area <= 900] for region in rp: # skip small images #if region.area < 100: # continue dist.append(sqrt( ( 320-region.centroid[0] )**2 + ( 320-region.centroid[1] )**2 )) # draw rectangle around segmented coins #minr, minc, maxr, maxc = region.bbox #rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, # fill=False, edgecolor='red', linewidth=2) #ax.add_patch(rect) roof_index = dist.index(min(dist)) minr, minc, maxr, maxc = rp[roof_index].bbox rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) ax.add_patch(rect) img = StringIO() fig.savefig(img) img.seek(0) session['roof_area'] = rp[roof_index].area roof_area = (rp[roof_index].area)*12 return(roof_area)
def overlay_cells_on_image(image, boundries, cells, offset=(0,0)): mask = numpy.zeros(image.shape[:2]) boundries -= numpy.asarray(tuple(offset) * 2) label = 1 for cell in filter_cells_by_boundry(cells, boundries): math.modify_with_bounding_box(cell.bbox, mask, cell.mask.astype(numpy.uint16)*label) label += 1 return label2rgb(mask, image=image, bg_label=0)
def parse_image(self): self.m_print("Parsing panel image",0) thresh = threshold_otsu(self.i_file) bw = closing(self.i_file > thresh, square(3)) # remove artifacts connected to image border cleared = bw.copy() clear_border(cleared) # label image regions label_image = label(cleared) borders = np.logical_xor(bw, cleared) label_image[borders] = -1 return label2rgb(label_image, image=self.i_file),label_image
def build_region(self): start_time = time.time(); labels = segmentation.slic(self.q_cur_frame,self.num_superpixels, self.compactness,convert2lab=True,multichannel=True) _num_superpixels = np.max(labels) + 1; self.s_frame = color.label2rgb(labels,self.q_cur_frame, kind='avg') self.mean = np.array([region['centroid'] for region in regionprops(labels+1)]) self.color_data = np.array([np.sum(self.q_cur_frame[np.where(labels==label)],0) for label in range(_num_superpixels)]) self.freq = np.array([np.sum(labels==label) for label in range(_num_superpixels)]) _inv_freq = 1/(self.freq+0.0000001); self.color_data = self.color_data*_inv_freq[:,None] cv2.imwrite('outs.png',self.s_frame); print "Build region (preprocess) : ",time.time()-start_time return (labels,_num_superpixels);
def image_pre_process(image): # Apply otsu filter to find the thresholds print('starting with multi otsu filter') thresholds = threshold_multiotsu(image, classes=THO_N + 1) print('thresholds are', thresholds) regions = np.digitize(image, bins=thresholds) otsu_regions_img = img_as_ubyte(regions) # Binary threshold methods # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html ret, img_binary = cv2.threshold(image, thresholds[0], 255, cv2.THRESH_BINARY) # Dilation and erosion # https://www.youtube.com/watch?v=WQK_oOWW5Zo kernel = np.ones((3, 3), np.uint8) erosion = cv2.erode(img_binary, kernel, iterations=1) dilation = cv2.dilate(img_binary, kernel, iterations=1) opening = cv2.morphologyEx(img_binary, cv2.MORPH_OPEN, kernel) # Label image # https://www.youtube.com/watch?v=u3nG5_EjfM0&list=PLZsOBAyNTZwbIjGnolFydAN33gyyGP7lT&index=119 #label_image = measure.label(otsu_regions_img, connectivity=img_binary.ndim) label_image = measure.label(opening, connectivity=img_binary.ndim) image_label_overlay = label2rgb(label_image, image=otsu_regions_img) # Get different regions from the labeled image # https://scikit-image.org/docs/dev/auto_examples/segmentation/plot_label.html i = 0 regions = [] bbx = [] for region in measure.regionprops(label_image): i += 1 print('i is', i) print('region is', region.image) regions.append(region.image) bbx.append(region.bbox) figure = plt.figure() plt.imshow(region.image) plt.title('region ' + np.str(i)) plt.savefig( 'G:\\My Drive\\Project\\IntraOral Scanner Registration\\Results\\Segmentation test\\region ' + np.str(i)) plt.close() print('shape of regions is', np.shape(regions)) for bbx_ind in bbx: #figure = plt.figure() #plt.imshow(regions[-3]) #plt.title('label image check') print('region bounding box is', bbx_ind) # Edge detection # https://www.youtube.com/watch?v=Oy4duAOGdWQ&list=PLZsOBAyNTZwbIjGnolFydAN33gyyGP7lT&index=105 robert_image = roberts(img_binary) sobel_image = sobel(img_binary) scharr_image = scharr(img_binary) prewitt_image = prewitt(img_binary) farid_image = farid(img_binary) print('detected edge is', sobel_image) return otsu_regions_img, image_label_overlay, sobel_image
def run(self, ips, snap, img, para=None): lab = segmentation.felzenszwalb(snap, para['scale'], para['sigma'], para['min_size']) return color.label2rgb(lab, snap, kind='avg')
def run(self, ips, snap, img, para=None): lab = segmentation.quickshift(snap, para['ratio'], para['kernel_size'], para['max_dist'], para['sigma']) return color.label2rgb(lab, snap, kind='avg')
def labelsConnection(data): labels = measure.label(data, connectivity=2) #connectivity表示连接的模式,1代表4连通,2代表8连通 dst = color.label2rgb(labels) #根据不同的标记显示不同的颜色 #print('regions number:',labels.max()+1) #显示连通区域块数(从0开始标记) return dst
So you will reduce this image from 265×191=50,615 pixels down to 400 regions. Young woman Already preloaded as face_image. The show_image() function has been preloaded for you as well. Instructions 100 XP Import the slic() function from the segmentation module. Import the label2rgb() function from the color module. Obtain the segmentation with 400 regions using slic(). Put segments on top of original image to compare with label2rgb(). ''' SOLUTION # Import the slic function from segmentation module from skimage.segmentation import slic # Import the label2rgb function from color module from skimage.color import label2rgb # Obtain the segmentation with 400 regions segments = slic(face_image, n_segments=400) # Put segments on top of original image to compare segmented_image = label2rgb(segments, face_image, kind='avg') # Show the segmented image show_image(segmented_image, "Segmented image, 400 superpixels")
# -*- coding: utf-8 -*- """ Created on Mon Sep 2 22:18:57 2019 @author: dtket """ import imageio as iio from skimage import filters from skimage.color import rgb2gray # only needed for incorrectly saved images from skimage.measure import regionprops import matplotlib.pyplot as plt from skimage.color import label2rgb image = rgb2gray(iio.imread('cube.png')) threshold_value = filters.threshold_otsu(image) labeled_foreground = (image > threshold_value).astype(int) properties = regionprops(labeled_foreground, image) center_of_mass = properties[0].centroid weighted_center_of_mass = properties[0].weighted_centroid colorized = label2rgb(labeled_foreground, image, colors=['black'], alpha=0.1) fig, ax = plt.subplots() ax.imshow(colorized) # Note the inverted coordinates because plt uses (x, y) while NumPy uses (row, column) ax.scatter(center_of_mass[1], center_of_mass[0], s=160, c='C0', marker='+') plt.show()
from skimage import data, segmentation, color from skimage.future import graph from matplotlib import pyplot as plt img = data.coffee() labels1 = segmentation.slic(img, compactness=30, n_segments=400) out1 = color.label2rgb(labels1, img, kind='avg') g = graph.rag_mean_color(img, labels1, mode='similarity') labels2 = graph.cut_normalized(labels1, g) out2 = color.label2rgb(labels2, img, kind='avg') plt.imshow(out1 ) plt.show()
icount = len(image_set) start_time = datetime.datetime.now() #开始去掉一些噪音 for ii, image_name in enumerate(image_set): # image_name='003.png' image = cv2.imread(os.path.join(image_predict_dir, image_name)) image2 = image[:, :, 0] # 只取一个分量即可 image2[image2 > 0] = 1 # 原始数据的掩模,非0的有很多种数,而模型的输入要求是二值的 # 采用skimage中的measure,寻找每一个连通区域 labeled_img, num = measure.label(image2, background=0, return_num=True) dst = color.label2rgb(labeled_img) classes = np.unique(labeled_img) classes = classes[1:] # 不要背景0这一类 for c in classes: inds = np.where(labeled_img == c) inds = np.array(inds).T # 忽略太少的点组成的连通区域 if len(inds[:, 1]) < 1500: image2[labeled_img == c] = 0 else: trans_pca = PCA(n_components=2).fit(inds) # 对该连通区域所有点的坐标集合,进行PCA变换 pcas = trans_pca.components_ # PCA的两个主成分,即为点的坐标集合的主要方向和次要方向 # 最主要的方向,计算方向角度theta,准备旋转theta角变成水平方向 pca1 = pcas[0]
ax = axes.ravel() ax[0].imshow(camera, cmap=cm.gray) ax[0].set_title('Input image') ax[1].imshow(edges, cmap=cm.gray) ax[1].set_title('Canny edges') ax[2].imshow(edges * 0) for line in lines: p0, p1 = line ax[2].plot((p0[0], p1[0]), (p0[1], p1[1])) ax[2].set_xlim((0, camera2.shape[1])) ax[2].set_ylim((camera2.shape[0], 0)) ax[2].set_title('Probabilistic Hough') ''' camera = label2rgb(camera2, image=camera1) fig, ax = plt.subplots(figsize=(10, 6)) ax.imshow(camera, cmap='gray', interpolation='nearest') #ax.imshow(camera , cmap='gray', interpolation='nearest') count = 0 regions = regionprops(camera2) for region in regions: # take regions with large enough areas if region.area >= 650: # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox slice_hei = int((maxr - minr) * 0.1) #print(minr, minc, maxr, maxc) croped = camera1[minr - slice_hei:maxr + slice_hei, minc:maxc] binary_crop = cam_clear[minr - slice_hei:maxr + slice_hei, minc:maxc] #print("Here",camera.shape)
# Binary threshold methods # https://opencv-python-tutroals.readthedocs.io/en/latest/py_tutorials/py_imgproc/py_thresholding/py_thresholding.html ret, img_binary = cv2.threshold(img_segment, thresholds[0], 255, cv2.THRESH_BINARY) # Dilation and erosion # https://www.youtube.com/watch?v=WQK_oOWW5Zo kernel = np.ones((3, 3), np.uint8) erosion = cv2.erode(img_binary, kernel, iterations=1) dilation = cv2.dilate(img_binary, kernel, iterations=1) # Label image # https://www.youtube.com/watch?v=u3nG5_EjfM0&list=PLZsOBAyNTZwbIjGnolFydAN33gyyGP7lT&index=119 label_image = measure.label(output, connectivity=img_binary.ndim) image_label_overlay = label2rgb(label_image, image=output) # Edge detection # https://www.youtube.com/watch?v=Oy4duAOGdWQ&list=PLZsOBAyNTZwbIjGnolFydAN33gyyGP7lT&index=105 robert_image = roberts(img_binary) sobel_image = sobel(img_binary) scharr_image = scharr(img_binary) prewitt_image = prewitt(img_binary) farid_image = farid(img_binary) #img_mutso = multiOtsu(3, img) plt.figure() plt.imshow(img_gray, cmap='gray') plt.title("Grayscale original image") plt.figure()
def augment_and_show(aug, image, mask=None, bboxes=[], categories=[], category_id_to_name=[], filename=None, font_scale_orig=0.35, font_scale_aug=0.35, show_title=True, **kwargs): augmented = aug(image=image, mask=mask, bboxes=bboxes, category_id=categories) image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) image_aug = cv2.cvtColor(augmented['image'], cv2.COLOR_BGR2RGB) for bbox in bboxes: visualize_bbox(image, bbox, **kwargs) for bbox in augmented['bboxes']: visualize_bbox(image_aug, bbox, **kwargs) if show_title: for bbox, cat_id in zip(bboxes, categories): visualize_titles(image, bbox, category_id_to_name[cat_id], font_scale=font_scale_orig, **kwargs) for bbox, cat_id in zip(augmented['bboxes'], augmented['category_id']): visualize_titles(image_aug, bbox, category_id_to_name[cat_id], font_scale=font_scale_aug, **kwargs) if mask is None: f, ax = plt.subplots(1, 2, figsize=(16, 8)) ax[0].imshow(image) ax[0].set_title('Original image') ax[1].imshow(image_aug) ax[1].set_title('Augmented image') else: f, ax = plt.subplots(2, 2, figsize=(16, 16)) if len(mask.shape) != 3: mask = label2rgb(mask, bg_label=0) mask_aug = label2rgb(augmented['mask'], bg_label=0) else: mask = cv2.cvtColor(mask, cv2.COLOR_BGR2RGB) mask_aug = cv2.cvtColor(augmented['mask'], cv2.COLOR_BGR2RGB) ax[0, 0].imshow(image) ax[0, 0].set_title('Original image') ax[0, 1].imshow(image_aug) ax[0, 1].set_title('Augmented image') ax[1, 0].imshow(mask, interpolation='nearest') ax[1, 0].set_title('Original mask') ax[1, 1].imshow(mask_aug, interpolation='nearest') ax[1, 1].set_title('Augmented mask') f.tight_layout() if filename is not None: f.savefig(filename) return augmented['image'], augmented['mask'], augmented['bboxes']
def image_segmentation(in_file_name, out_file_name, show_image): #example_ni1 = os.path.join(data_path, in_file_name) n1_img = nib.load(in_file_name) img_data = n1_img.get_data() print(img_data.shape) #save_example_ni1 = os.path.join(data_path, out_file_name) slice = np.zeros((176, 176, 208)) segm = np.zeros((176, 176, 208)) for i in range(175): slice[i] = img_data[:, :, i, 0] slice[i] = exposure.rescale_intensity(slice[i], out_range=(0, 256)) img = color.gray2rgb(slice[i]) if (img.min() >= 0): labels1 = segmentation.slic(img, compactness=30, n_segments=200, multichannel=False) out1 = color.label2rgb(labels1, img, kind='avg') #g = graph.rag_mean_color(img, labels1, mode='similarity') #labels2 = graph.cut_normalized(labels1, g) #out2 = color.label2rgb(labels2, img, kind='avg') segm[i] = color.rgb2gray(out1) #segm[i] = out1 if (show_image): show_slices([slice[100], slice[110], slice[120]]) plt.suptitle("slices") for i in range(175): img_data[:, :, i, 0] = segm[i] if (show_image): # display results fig, (ax1, ax2, ax3) = plt.subplots(nrows=1, ncols=3, figsize=(8, 3), sharex=True, sharey=True) ax1.imshow(img_data[:, :, 100, 0]) ax1.axis('off') ax1.set_title('image 100', fontsize=20) ax2.imshow(img_data[:, :, 110, 0]) ax2.axis('off') ax2.set_title('image 110', fontsize=20) ax3.imshow(img_data[:, :, 120, 0]) ax3.axis('off') ax3.set_title('image 120', fontsize=20) fig.subplots_adjust(wspace=0.02, hspace=0.02, top=0.9, bottom=0.02, left=0.02, right=0.98) plt.show() save_img = nib.Nifti1Image(img_data, np.eye(4)) nib.save(save_img, save_example_ni1)
import numpy as np from skimage import data, util, filters, color from skimage.segmentation import watershed import matplotlib.pyplot as plt coins = data.coins() edges = filters.sobel(coins) grid = util.regular_grid(coins.shape, n_points=468) seeds = np.zeros(coins.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(edges, seeds) w1 = watershed(edges, seeds, compactness=0.01) fig, (ax0, ax1) = plt.subplots(1, 2) ax0.imshow(color.label2rgb(w0, coins, bg_label=-1)) ax0.set_title('Classical watershed') ax1.imshow(color.label2rgb(w1, coins, bg_label=-1)) ax1.set_title('Compact watershed') plt.show()
def extract(self, data): path = data.basePath + "\\" + data.name print(path) img = cv2.imread(path, cv2.IMREAD_GRAYSCALE) # print(img) treshold = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY)[1] dots = treshold > treshold.mean() dots_labels = measure.label(dots, background=1) image_label_overlay = label2rgb(dots_labels, image=treshold) max_area = 0 total_area = 0 count_connected_group = 0 average = 0.0 for region in regionprops(dots_labels): if (region.area > 10): total_area += region.area count_connected_group += 1 if (region.area >= 250): if (region.area > max_area): max_area = region.area average = (total_area / count_connected_group) a4_constant = ((average / 84.0) * 250.0) + 100 b = morphology.remove_small_objects(dots_labels, a4_constant) if os.path.isdir(data.basePath + '\\outputs'): pass else: os.mkdir(data.basePath + '\\outputs') plt.imsave(data.basePath + '\\outputs\\pre_version.png', b) img = cv2.imread(data.basePath + '\\outputs\\pre_version.png', cv2.IMREAD_GRAYSCALE) img = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1] cv2.imwrite(data.basePath + "\\outputs\\output.png", img) print('Signature Extraction Success!') image = cv2.imread(data.basePath + '\\outputs\\output.png') result = image.copy() image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV) lower = np.array([0, 0, 0]) upper = np.array([255, 255, 200]) mask = cv2.inRange(image, lower, upper) kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) close = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel, iterations=1) contours = cv2.findContours(close, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) if len(contours) == 2: contours = contours[0] else: contours = contours[1] boxes = [] for c in contours: (x, y, w, h) = cv2.boundingRect(c) boxes.append([x, y, x + w, y + h]) boxes = np.asarray(boxes) left = np.min(boxes[:, 0]) top = np.min(boxes[:, 1]) right = np.max(boxes[:, 2]) bottom = np.max(boxes[:, 3]) result[close == 0] = (255, 255, 255) ROI = result[top:bottom, left:right].copy() path = data.basePath + '\\result_' + data.name cv2.imwrite(path, ROI) # print('Signature Capture Success') return path
# Voila! # # This solution works reasonably well. Of course there is still a lot of room for improvement. # # For cells that get oversegmented we should implement a function that glues those oversegmented cells together rather than drop them. # # But that I will leave for you to create! # # If you liked the solution and would like to see how it works as a part of the end-to-end pipeline please go to: # # https://github.com/neptune-ml/data-science-bowl-2018 # # To stay up to date with new features and fully open sourced solution (with predictions) read our data science bowl journal thread https://www.kaggle.com/c/data-science-bowl-2018/discussion/47590 # # Cheers and good luck! # # LB Test set predictions # In[102]: test_masks = joblib.load('../input/test-predictions/test_masks.pkl') # In[103]: from skimage.color import label2rgb for mask in test_masks[:5]: plt.imshow(label2rgb(mask)) plt.show()
#LOCAL SOURCE image_rgb = io.imread('examples/leds_off.jpg') image = rgb2gray(image_rgb) # apply threshold bw = closing(image > detect_th, square(3)) # remove artifacts connected to image border cleared = clear_border(bw) # label image regions label_image = label(cleared) # to make the background transparent, pass the value of `bg_label`, # and leave `bg_color` as `None` and `kind` as `overlay` image_label_overlay = label2rgb(label_image, image=image, bg_label=0) fig, ax = plt.subplots(figsize=(10, 6)) ax.imshow(image_label_overlay) leds = {} #led objects for region in regionprops(label_image): # take regions with large enough areas if region.area > 1000: # draw rectangle around segmented item minr, minc, maxr, maxc = region.bbox cropped = image_rgb[minr:maxr, minc:maxc] skimage.io.imsave("temp/" + str(region.label) + ".jpg", cropped)
def cConexas2(image): label_image = label(image) image_label_overlay = label2rgb(image, image=image) fig, ax = plt.subplots(figsize=(10, 6)) imageComponentesConexas = np.zeros(image.shape) ax.imshow(image_label_overlay) i = 0 regions = [] for region in regionprops(label_image): # take regions with large enough areas regions.append(region) # draw rectangle around segmented coins minr, minc, maxr, maxc = region.bbox if (i == 0): rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='red', linewidth=2) elif (i == 1): rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='green', linewidth=2) elif (i == 2): rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='blue', linewidth=2) elif (i == 3): rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='yellow', linewidth=2) else: rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr, fill=False, edgecolor='purple', linewidth=2) i = i + 1 ax.add_patch(rect) imageComponentesConexas = newImage(imageComponentesConexas, region.coords) ax.set_axis_off() plt.tight_layout() plt.imshow(imageComponentesConexas, cmap='gray') plt.title("CC Finales") return imageComponentesConexas, regions
def MERGEIAMGES(self, displaygui, CH1, RGB_Channels): if displaygui.Ch1CheckBox.isChecked() == True: ch1_rgb = np.stack((CH1, ) * 3, axis=-1) else: ch1_rgb = np.zeros(RGB_Channels.shape, dtype=np.uint8) All_Channels = cv2.addWeighted(ch1_rgb, 1, RGB_Channels, 1, 0) height, width, ch = np.shape(All_Channels) totalBytes = All_Channels.nbytes #print(self.AnalysisGui.NucleiChannel.currentIndex().dtype) if displaygui.NuclMaskCheckBox.isChecked() == True: self.input_image = self.IMAGE_TO_BE_MASKED(displaygui) bound, filled_res = ImageAnalyzer.neuceli_segmenter( self.input_image) #cv2.imwrite('mask_saved.jpg',bound) if displaygui.NucPreviewMethod.currentText() == "Boundary": All_Channels[bound != 0] = [255, 0, 0] if displaygui.NucPreviewMethod.currentText() == "Area": labeled_array, num_features = label(filled_res) rgblabel = label2rgb(labeled_array, alpha=0.1, bg_label=0) rgblabel = cv2.normalize(rgblabel, None, 0, 255, cv2.NORM_MINMAX, dtype=cv2.CV_8U) image_input_stack = np.stack((self.input_image, ) * 3, axis=-1) All_Channels = cv2.addWeighted(rgblabel, 0.2, ch1_rgb, 1, 0) ############## if displaygui.SpotsCheckBox.isChecked() == True: ch1_spots_log, ch2_spots_log, ch3_spots_log, ch4_spots_log = self.IMAGE_FOR_SPOT_DETECTION( displaygui) if displaygui.SpotPreviewMethod.currentText() == "Dots": if ch1_spots_log != []: All_Channels[ch1_spots_log != 0] = [255, 255, 255] if ch2_spots_log != []: All_Channels[ch2_spots_log != 0] = [255, 0, 0] if ch3_spots_log != []: All_Channels[ch3_spots_log != 0] = [0, 255, 0] if ch4_spots_log != []: All_Channels[ch4_spots_log != 0] = [0, 0, 255] if displaygui.NucPreviewMethod.currentText() == "Cross": pass self.SHOWIMAGE(displaygui, All_Channels, width, height, totalBytes)
def _callback(self, img_msg, mask_msg): bridge = cv_bridge.CvBridge() bgr_img = bridge.imgmsg_to_cv2(img_msg, desired_encoding='bgr8') mask_img = bridge.imgmsg_to_cv2(mask_msg, desired_encoding='mono8') if mask_img.size < 1: logwarn_throttle(10, 'Too small sized image') return logwarn_throttle(10, '[FCNMaskForLabelNames] >> Start Processing <<') if mask_img.ndim == 3 and mask_img.shape[2] == 1: mask_img = mask_img.reshape(mask_img.shape[:2]) if mask_img.shape != bgr_img.shape[:2]: jsk_logwarn('Size of mask and color image is different.' 'Resizing.. mask {0} to {1}'.format( mask_img.shape, bgr_img.shape[:2])) mask_img = resize(mask_img, bgr_img.shape[:2], preserve_range=True).astype(np.uint8) blob = bgr_img - self.mean_bgr blob = blob.transpose((2, 0, 1)) x_data = np.array([blob], dtype=np.float32) if self.gpu != -1: x_data = cuda.to_gpu(x_data, device=self.gpu) x = Variable(x_data, volatile=True) self.model(x) pred_datum = cuda.to_cpu(self.model.score.data[0]) candidate_labels = [ self.target_names.index(name) for name in self.tote_contents ] label_pred_in_candidates = pred_datum[candidate_labels].argmax(axis=0) label_pred = np.zeros_like(label_pred_in_candidates) for idx, label_val in enumerate(candidate_labels): label_pred[label_pred_in_candidates == idx] = label_val label_pred[mask_img == 0] = 0 # set bg_label label_viz = label2rgb(label_pred, bgr_img, bg_label=0) label_viz = (label_viz * 255).astype(np.uint8) debug_msg = bridge.cv2_to_imgmsg(label_viz, encoding='rgb8') debug_msg.header = img_msg.header self.pub_debug.publish(debug_msg) output_mask = np.ones(mask_img.shape, dtype=np.uint8) output_mask *= 255 for label_val, label_name in enumerate(self.target_names): if label_name in self.label_names: assert label_name == 'kleenex_paper_towels' assert label_val == 21 label_mask = ((label_pred == label_val) * 255).astype(np.uint8) contours, hierachy = cv2.findContours(label_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE) cv2.drawContours(output_mask, contours, -1, 255, -1) # output_mask[label_pred == label_val] = False # output_mask = output_mask.astype(np.uint8) # output_mask[output_mask == 1] = 255 output_mask[mask_img == 0] = 0 output_mask_msg = bridge.cv2_to_imgmsg(output_mask, encoding='mono8') output_mask_msg.header = img_msg.header self.pub.publish(output_mask_msg) logwarn_throttle(10, '[FCNMaskForLabelNames] >> Finshed processing <<')
def show(*obj, file_name=None, overlay=False, pred=False, show_bbox=True, figsize=(10,10), cmap='binary_r', **kwargs): "Show image, mask, and weight (optional)" if len(obj)==3: img,msk,weight = obj elif len(obj)==2: img,msk = obj weight = None elif len(obj)==1: img = obj[0] msk, weight = None, None else: raise ValueError(f'Function not defined for {len(obj)} arguments.') # Image preprocessing img = np.array(img) # Swap axis to channels last if img.shape[0]<20: img=np.moveaxis(img,0,-1) # One channel images if img.ndim == 3 and img.shape[-1] == 1: img=img[...,0] # Mask preprocessing if msk is not None: msk = np.array(msk) # Remove background class from masks if msk.shape[0]==2: msk=msk[1,...] # Create bbox pad = (np.array(img.shape[:2])-np.array(msk.shape))//2 bbox = Rectangle((pad[0]-1,pad[1]-1),img.shape[0]-2*pad[0]+1,img.shape[0]-2*pad[0]+1, edgecolor='r',linewidth=1,facecolor='none') # Padding mask and weights msk = np.pad(msk, pad, 'constant', constant_values=(0)) if cmap is None: cmap = 'binary_r' if msk.max()==1 else cmap # Weights preprocessing if weight is not None: weight = np.array(weight) weight = np.pad(weight, pad, 'constant', constant_values=(0)) ncol=1 if msk is None else 2 ncol=ncol if weight is None else ncol+1 fig, ax = plt.subplots(1,ncol,figsize=figsize) img_ax = ax[0] if ncol>1 else ax # Plot img img_ax.imshow(img, cmap=cmap) if file_name is not None: img_ax.set_title('Image {}'.format(file_name)) else: img_ax.set_title('Image') img_ax.set_axis_off() # Plot img and mask if msk is not None: if overlay: label_image = label(msk) img_l2o = label2rgb(label_image, image=img, bg_label=0, alpha=.8, image_alpha=1) ax[1].set_title('Image + Mask (#ROIs: {})'.format(label_image.max())) ax[1].imshow(img_l2o) else: ax[1].imshow(msk, cmap=cmap) ax[1].set_title('Mask') if show_bbox: ax[1].add_patch(copy(bbox)) ax[1].set_axis_off() # Plot weights if weight is not None: max_w = weight.max() vmax_w = max(1, max_w) ax[2].imshow(weight, vmax=vmax_w, cmap=cmap) if pred: ax[2].set_title('Prediction') else: ax[2].set_title('Weights (max value: {:.{p}f})'.format(max_w, p=1)) if show_bbox: ax[2].add_patch(copy(bbox)) ax[2].set_axis_off() #ax.set_axis_off() plt.tight_layout() plt.show()
axes.imshow(image, cmap="gray") axes.set_title('Gray image') print(image.shape) global_thresh = threshold_otsu(image) binary_global = image < global_thresh fig, axes = plt.subplots(nrows=1, figsize=(40, 40)) axes.imshow(binary_global, cmap="gray") axes.set_title('Binary image') label_image, n = measure.label(binary_global, neighbors=8, background=0, return_num=True) image_label_overlay = label2rgb(label_image, image=binary_global) fig, axes = plt.subplots(nrows=1, figsize=(40, 40)) axes.imshow(image_label_overlay) axes.set_title('Conected components') area_thresh = 10000 #@param {type:"slider", min:0, max:1000000, step:1} seg_prop = 0.011 #@param {type:"slider", min:0.001, max:100.0, step:0.01} selected_components = np.zeros(label_image.shape, dtype=int) selected_components_slic = np.zeros(label_image.shape, dtype=int) properties = regionprops(label_image, intensity_image=binary_global) for region in properties: if region['convex_area'] > area_thresh:
def overlay_labels(image, lbp, labels): mask = np.logical_or.reduce([lbp == each for each in labels]) return label2rgb(mask, image=image, bg_label=0, alpha=0.5)
from skimage import io from skimage import data, segmentation, color from skimage.io import imread from skimage import data from skimage.future import graph img = io.imread("../pants.jpg") img_segments = segmentation.slic(img, compactness=30, n_segments=200) out1 = color.label2rgb(img_segments, img, kind='avg') segment_graph = graph.rag_mean_color(img, img_segments, mode='similarity') img_cuts = graph.cut_normalized(img_segments, segment_graph) normalized_cut_segments = color.label2rgb(img_cuts, img, kind='avg') io.imshow(normalized_cut_segments) io.show()
def preview(self, ips, para): lab = segmentation.quickshift(ips.snap, para['ratio'], para['kernel_size'], para['max_dist'], para['sigma']) ips.img[:] = color.label2rgb(lab, ips.snap, kind='avg')
def preview(self, ips, para): lab = segmentation.felzenszwalb(ips.snap, para['scale'], para['sigma'], para['min_size']) ips.img[:] = color.label2rgb(lab, ips.snap, kind='avg')
def run(self, ips, snap, img, para=None): lab = segmentation.slic(snap, para['n_segments'], para['compactness'], para['max_iter'], para['sigma']) return color.label2rgb(lab, snap, kind='avg')
from minisom import MiniSom import numpy as np import argparse parser = argparse.ArgumentParser() parser.add_argument('input_image', type=str, help='input image path') parser.add_argument('num_superpixel', type=int, help='number of segments') parser.add_argument('compactness', type=int, help='compactness param of SLIC') args = parser.parse_args() #img = data.coffee() img = io.imread(args.input_image) labels = segmentation.slic(img, n_segments=args.num_superpixel, compactness=args.compactness) out1 = color.label2rgb(labels, img, kind='avg') io.imshow(out1) io.show() pixels = np.reshape(out1, (out1.shape[0] * out1.shape[1], 3)) / 255 print('training...') som = MiniSom(2, 1, 3, sigma=1., learning_rate=0.2, neighborhood_function='bubble') som.random_weights_init(pixels) starting_weights = som.get_weights().copy() # saving the starting weights
import numpy as np import matplotlib.pyplot as plt from skimage import io, data, util, filters, color from skimage.morphology import watershed kitten = color.rgb2gray(io.imread("../images/kitten.jpeg")) kitten_edge = filters.sobel(kitten) # use edge detection algo before watershed grid = util.regular_grid( kitten.shape, n_points=300) # find 300 points evenly spaced in the image # The seed matrix is the same shape as the original image, and it contains integers in the range [1, size of image] seeds = np.zeros(kitten.shape, dtype=int) seeds[grid] = np.arange(seeds[grid].size).reshape(seeds[grid].shape) + 1 w0 = watershed(kitten_edge, seeds) water_classic = color.label2rgb(w0, kitten, alpha=0.4, kind="overlay") plt.figure(figsize=(8, 8)) plt.imshow(water_classic)
def prepareDataset(basedir='WeizmannSingleScale/horses/training/images/', labeldir='WeizmannSingleScale/horses/training/masks/'): global pixelClasses dataSetX = [] dataSetX_layer2 = [] dataSetY = [] datasetGroundTruth = [] for (dirpath, dirnames, filenames) in walk(basedir): n = 0 for imageFilename in filenames: #if n>=1: # break print imageFilename n = n + 1 # Read RGB and label image image = img_as_float(skimageIO.imread(basedir + imageFilename)) bgrImage = cv2.imread(basedir + imageFilename, cv2.IMREAD_COLOR) #bgrImage = cv2.fastNlMeansDenoisingColored(bgrImage) #bgrImage = exposure.adjust_sigmoid(bgrImage) labelImage = img_as_float( skimageIO.imread(labeldir + imageFilename.replace('image', 'mask'))) if len(image.shape) == 2: image = color.gray2rgb(image) # Resize #image = resize(image,(120,120), preserve_range=True ) #bgrImage = resize(bgrImage,(120,120), preserve_range=True) #labelImage = resize(labelImage,(120,120), preserve_range=True) # Scan label image for additional classes if len(labelImage.shape) == 2: labelImageRGB = color.gray2rgb(labelImage) else: labelImageRGB = labelImage #for i in range(0,labelImageRGB.shape[0]) : # for j in range(0,labelImageRGB.shape[1]) : # if len(pixelClasses) == 0 : # pixelClasses.append(labelImageRGB[i][j]) # else : # isAlreadyAPixelClass = False # for pixelClass in pixelClasses: # if numpy.array_equal(pixelClass, labelImageRGB[i][j]) : # isAlreadyAPixelClass = True # break # if not isAlreadyAPixelClass : # pixelClasses.append(labelImageRGB[i][j]) #print pixelClasses # Derive superpixels and get their average RGB component segments = slic(image, n_segments=500, sigma=1.0) rgb_segments = img_as_ubyte(mark_boundaries(image, segments)) label_segments = img_as_ubyte( mark_boundaries(labelImageRGB, segments)) avg_rgb = color.label2rgb(segments, image, kind='avg') avg_label = color.label2rgb(segments, labelImageRGB, kind='avg') #avg_cie_rgb = color.rgb2lab(avg_rgb) #avg_cie_label = color.rgb2lab(avg_label) # Create graph of superpixels and compute their centers vertices, edges = make_graph(segments) gridx, gridy = numpy.mgrid[:segments.shape[0], :segments.shape[1]] centers = dict() for v in vertices: centers[v] = [ gridy[segments == v].mean(), gridx[segments == v].mean() ] #print vertices #print edges #print centers # Build training instances n_features = [] edge_features = [] n_labels = [] # Compute image centers centerX = labelImageRGB.shape[1] / 2.0 centerY = labelImageRGB.shape[0] / 2.0 for v in vertices: # unary features layer 1 - average rgb of superpixel, histogram of patch surrounding center and CNN features avg_rgb2 = avg_rgb[int(centers[v][1])][int(centers[v][0])] hist, hogFeatures = getHistogramFeatures(bgrImage, int(centers[v][1]), int(centers[v][0]), forUnaryFeature=True) #relativeX = (centers[v][1] - centerX) / centerX #relativeY = (centers[v][0] - centerY) / centerY node_feature = numpy.concatenate([avg_rgb2, hist, hogFeatures]) n_features.append(node_feature) # label minEuclideanDistance = numpy.inf # simulate infinity pixelClass = -1 for i in range(0, len(pixelClasses)): # set the label of the superpixel to the pixelClass with minimum euclidean distance dist = numpy.linalg.norm( avg_label[int(centers[v][1])][int(centers[v][0])] - pixelClasses[i]) if dist < minEuclideanDistance: pixelClass = i minEuclideanDistance = dist n_labels.append(pixelClass) histogramCache = {} for e in edges: # pairwise feature layer 1 - histogram distance, avg RGB euclidean distance of adjacent superpixels , texture similarity dist = numpy.linalg.norm( avg_rgb[int(centers[e[0]][1])][int(centers[e[0]][0])] - avg_rgb[int(centers[e[1]][1])][int(centers[e[1]][0])]) if e[0] not in histogramCache: hist1, lbphist1 = getHistogramFeatures( bgrImage, int(centers[e[0]][1]), int(centers[e[0]][0])) histogramCache[e[0]] = {'hist': hist1, 'lbphist': lbphist1} else: hist1 = histogramCache[e[0]]['hist'] lbphist1 = histogramCache[e[0]]['lbphist'] if e[1] not in histogramCache: hist2, lbphist2 = getHistogramFeatures( bgrImage, int(centers[e[1]][1]), int(centers[e[1]][0])) histogramCache[e[1]] = {'hist': hist2, 'lbphist': lbphist2} else: hist2 = histogramCache[e[1]]['hist'] lbphist2 = histogramCache[e[1]]['lbphist'] histogramDist = cv2.compareHist(hist1, hist2, 3) # Bhattacharyya distance textureSimilarity = kullback_leibler_divergence( lbphist1, lbphist2) # KL divergence pairwise_feature = numpy.array( [dist, histogramDist, textureSimilarity]) edge_features.append(pairwise_feature) # Add to dataset dataSetX.append((numpy.array(n_features), numpy.array(edges), numpy.array(edge_features))) dataSetY.append(numpy.array(n_labels)) return dataSetX, dataSetY
def test_rrssd_box_coder_synthetic(): label_image = np.zeros((768, 768), dtype=np.uint8) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((100, 100), (100, 20), 0)), 1).astype(int), (1, 1, 1)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200, 100), (100, 20), 45)), 1).astype(int), (2, 2, 2)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((100, 200), (100, 20), 90)), 1).astype(int), (3, 3, 3)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200, 200), (100, 20), 135)), 1).astype(int), (4, 4, 4)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((100 + 200, 100), (20, 100), 0)), 1).astype(int), (5, 5, 5)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200 + 200, 100), (20, 100), 45)), 1).astype(int), (6, 6, 6)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((100 + 200, 200), (20, 100), 90)), 1).astype(int), (7, 7, 7)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200 + 200, 200), (20, 100), 135)), 1).astype(int), (8, 8, 8)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((100, 105 + 200), (100, 20), 45. / 2)), 1).astype(int), (9, 9, 9)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200, 100 + 200), (16, 4), 49)), 1).astype(int), (10, 10, 10)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200, 100 + 210), (100, 20), 49)), 1).astype(int), (11, 11, 11)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((200, 200 + 200), (100, 20), 165)), 1).astype(int), (12, 12, 12)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((300, 300), (10, 6), 49)), 1).astype(int), (13, 13, 13)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((300 + 50, 300), (200, 40), 90)), 1).astype(int), (14, 14, 14)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((300 + 70, 300), (100, 20), 90)), 1).astype(int), (15, 15, 15)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((500, 500), (2, 3), 9)), 1).astype(int), (16, 16, 16)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((510, 500), (2, 3), 19)), 1).astype(int), (17, 17, 17)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((540, 500), (4, 6), 29)), 1).astype(int), (18, 18, 18)) cv2.fillConvexPoly( label_image, np.expand_dims(cv2.boxPoints(((560, 500), (2, 3), 39)), 1).astype(int), (19, 19, 19)) image = (label2rgb(label_image, bg_label=0) * 255).astype(np.uint8) # Test what happens if we rotate # image = np.rot90(image).copy() # label_image = np.rot90(label_image).copy() rbboxes = instance_mask_to_rbboxes(label_image) print(rbboxes) labels = np.zeros(len(rbboxes), dtype=np.intp) box_coder = RRNBoxCoder(768, 768) loc_targets, cls_targets, anchors = box_coder.encode([], [], return_anchors=True) loc_targets, cls_targets, anchors = box_coder.encode(rbboxes, labels, return_anchors=True) print(loc_targets.shape, cls_targets.shape, anchors.shape) print('Object anchors', (cls_targets > 0).sum()) print('Background anchors', (cls_targets == 0).sum()) print('Ignore anchors', (cls_targets == -1).sum()) # cls_targets = np.expand_dims(cls_targets) # print(cls_targets_one_hot.shape) dec_boxes, dec_scores = box_coder.decode(loc_targets, cls_targets) print(dec_boxes) print('Total anchors', len(anchors)) for bbox in dec_boxes: visualize_rbbox(image, bbox, (255, 0, 255), thickness=3) for bbox in rbboxes: visualize_rbbox(image, bbox, (0, 255, 0), thickness=1) for bbox in anchors: visualize_rbbox(image, bbox, (255, 255, 255), thickness=1) cv2.imshow('overlays', image) cv2.waitKey(-1)