def _append_images(self, imgs): if self.cache_img is None: out_bgr = jsk_recognition_utils.get_tile_image(imgs) self.cache_img = out_bgr else: out_bgr = jsk_recognition_utils.get_tile_image(imgs, tile_shape=None, result_img=self.cache_img) bridge = cv_bridge.CvBridge() imgmsg = bridge.cv2_to_imgmsg(out_bgr, encoding="bgr8") self.pub_img.publish(imgmsg)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--tile-shape', default='5x8', help='Default: 5x8') args = parser.parse_args() tile_shape = map(int, args.tile_shape.split('x')) with open(osp.join(PKG_DIR, 'config/label_names.yaml')) as f: obj_names = yaml.load(f)['label_names'] obj_names = obj_names[1:-1] imgs = [] data_dir = osp.join(PKG_DIR, 'data') for i, obj in enumerate(obj_names): obj_id = i + 1 img_file = osp.join(data_dir, 'objects', obj, 'top.jpg') img = cv2.imread(img_file) # put obj_id height, width = img.shape[:2] x1, y1, x2, y2 = 10, 10, width - 10, height - 10 img = img[y1:y2, x1:x2] cv2.putText(img, '%2d' % obj_id, (0, 60), cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1.8, (255, 255, 255), 3) imgs.append(img) img_viz = jsk_recognition_utils.get_tile_image(imgs, tile_shape=tile_shape) out_file = osp.join(PKG_DIR, 'data/others', 'object_list_{0}x{1}.jpg'.format(*tile_shape)) cv2.imwrite(out_file, img_viz) print('==> Wrote file: %s' % out_file)
def get_gallery(): content = [] for pkg in PACKAGES: # TODO(wkentaro): Scrape pacakge.xml only once by creating a class. from bs4 import BeautifulSoup with open(osp.join(pkg, 'package.xml')) as f: soup = BeautifulSoup(f.read(), 'lxml') website_url = soup.find('url', type='website') imgs = [] for ext in ['*.png', '*.jpg']: pattern = osp.join(here, 'doc', pkg, 'nodes/images', ext) for fname in glob.glob(pattern): img = cv2.imread(fname) scale = math.sqrt(1. * 200 * 200 / img.shape[0] / img.shape[1]) img = cv2.resize(img, None, None, fx=scale, fy=scale) imgs.append(img) if len(imgs) >= 15: break if not imgs: continue cols = 5 rows = max(len(imgs) // cols, 1) tiled = jsk_recognition_utils.get_tile_image( imgs, tile_shape=(cols, rows), margin_color=[255, 255, 255]) fname = osp.join(here, '.readme/gallery_%s.jpg' % pkg) cv2.imwrite(fname, tiled) content.append('### [%s](%s)' % (pkg, website_url.text)) content.append('') content.append('[![](%s)](%s)' % (osp.relpath(fname, here), website_url.text)) content.append('') return '\n'.join(content)
def main(): parser = argparse.ArgumentParser() parser.add_argument('-s', '--tile-shape', default='5x8', help='Default: 5x8') args = parser.parse_args() tile_shape = map(int, args.tile_shape.split('x')) obj_names = jsk_arc2017_common.get_object_names() imgs = [] data_dir = osp.join(PKG_DIR, 'data') for i, obj in enumerate(obj_names): obj_id = i + 1 img_file = osp.join(data_dir, 'objects', obj, 'top.jpg') img = cv2.imread(img_file) # put obj_id height, width = img.shape[:2] x1, y1, x2, y2 = 10, 10, width - 10, height - 10 img = img[y1:y2, x1:x2] cv2.putText(img, '%2d' % obj_id, (0, 60), cv2.cv.CV_FONT_HERSHEY_SIMPLEX, 1.8, (255, 255, 255), 3) imgs.append(img) img_viz = jsk_recognition_utils.get_tile_image(imgs, tile_shape=tile_shape) out_file = osp.join(PKG_DIR, 'data/others', 'object_list_{0}x{1}.jpg'.format(*tile_shape)) cv2.imwrite(out_file, img_viz) print('==> Wrote file: %s' % out_file)
def _append_images(self, imgs): if not imgs: return if self.cache_img is None: out_bgr = jsk_recognition_utils.get_tile_image(imgs) self.cache_img = out_bgr else: try: out_bgr = jsk_recognition_utils.get_tile_image( imgs, tile_shape=None, result_img=self.cache_img) except ValueError: # cache miss out_bgr = jsk_recognition_utils.get_tile_image(imgs) self.cache_img = out_bgr bridge = cv_bridge.CvBridge() imgmsg = bridge.cv2_to_imgmsg(out_bgr, encoding='bgr8') self.pub_img.publish(imgmsg)
def _apply(self, *msgs): bridge = cv_bridge.CvBridge() imgs = [] for msg in msgs: img = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') imgs.append(img) out_bgr = jsk_recognition_utils.get_tile_image(imgs) imgmsg = bridge.cv2_to_imgmsg(out_bgr, encoding='bgr8') self.pub_img.publish(imgmsg)
def _apply(self, *msgs): bridge = cv_bridge.CvBridge() imgs = [] for msg, topic in zip(msgs, self.input_topics): img = bridge.imgmsg_to_cv2(msg, desired_encoding='bgr8') if self.draw_topic_name: cv2.putText(img, topic, (0, 50), cv2.FONT_HERSHEY_PLAIN, 4, (0, 255, 0), 4) imgs.append(img) out_bgr = jsk_recognition_utils.get_tile_image(imgs) imgmsg = bridge.cv2_to_imgmsg(out_bgr, encoding='bgr8') self.pub_img.publish(imgmsg)
def _append_images(self, imgs): if not imgs: return # convert tile shape: (Y, X) -> (X, Y) # if None, shape is automatically decided to be square AMAP. shape_xy = self._shape[::-1] if self._shape else None if self.cache_img is None: out_bgr = jsk_recognition_utils.get_tile_image( imgs, tile_shape=shape_xy) self.cache_img = out_bgr else: try: out_bgr = jsk_recognition_utils.get_tile_image( imgs, tile_shape=shape_xy, result_img=self.cache_img) except ValueError: # cache miss out_bgr = jsk_recognition_utils.get_tile_image( imgs, tile_shape=shape_xy) self.cache_img = out_bgr bridge = cv_bridge.CvBridge() imgmsg = bridge.cv2_to_imgmsg(out_bgr, encoding='bgr8') self.pub_img.publish(imgmsg)
def _apply_tile(self, img_msg, label_msg): bridge = cv_bridge.CvBridge() img = bridge.imgmsg_to_cv2(img_msg) label_img = bridge.imgmsg_to_cv2(label_msg) imgs = [] labels = np.unique(label_img) for label in labels: if label == 0: # should be skipped 0, because # 0 is to label image as black region to mask image continue img_tmp = img.copy() mask = label_img == label img_tmp[~mask] = 0 img_tmp = bounding_rect_of_mask(img_tmp, mask) imgs.append(img_tmp) tile_img = get_tile_image(imgs) tile_msg = bridge.cv2_to_imgmsg(tile_img, encoding='bgr8') tile_msg.header = img_msg.header self.pub_tile.publish(tile_msg)
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataset_dir', help='Dataset dir path') parser.add_argument('-s', '--start', help='Start timestamp (ex. 2017-06-10T10:00:23)') args = parser.parse_args() dataset_dir = args.dataset_dir start = args.start if not osp.exists(dataset_dir): print('Please install dataset to: %s' % dataset_dir) quit(1) label_files = [] for stamp_dir in os.listdir(dataset_dir): stamp_dir = osp.join(dataset_dir, stamp_dir) label_file = osp.join(stamp_dir, 'label.npz') if osp.exists(label_file): label_files.append(label_file) else: label_files.append(None) print('==> Size of dataset: All: %d, Annotated: %d.' % (len(label_files), len(list(filter(None, label_files))))) PKG_DIR = rospkg.RosPack().get_path('jsk_arc2017_common') with open(osp.join(PKG_DIR, 'config/label_names.yaml')) as f: object_names = yaml.load(f) object_names.append('__unlabeled__') print('==> Press keys: [q] to quit, [n] to go next, [p] to go previous') stamp_dirs = list(sorted(os.listdir(dataset_dir))) i = 0 while True: stamp = datetime.datetime.fromtimestamp(int(stamp_dirs[i]) / 1e9) if start and stamp < dateutil.parser.parse(start): i += 1 continue start = None stamp_dir = osp.join(dataset_dir, stamp_dirs[i]) print('%s: %s' % (stamp.isoformat(), stamp_dir)) img_file = osp.join(stamp_dir, 'image.jpg') img = skimage.io.imread(img_file) depth_file = osp.join(stamp_dir, 'depth.npz') depth = np.load(depth_file)['arr_0'] depth_viz = colorize_depth(depth, min_value=0.4, max_value=1.0) label_file = osp.join(stamp_dir, 'label.npz') if osp.exists(label_file): label = np.load(label_file)['arr_0'] mask_unlabeled = label == -1 label[mask_unlabeled] = object_names.index('__unlabeled__') img_labeled = img.copy() img_labeled[mask_unlabeled] = \ np.random.randint(0, 255, (mask_unlabeled.sum(), 3)) label_viz = label2rgb( lbl=label, img=img_labeled, label_names=dict(enumerate(object_names))) else: label_viz = np.zeros_like(img) viz = jsk_recognition_utils.get_tile_image([img, label_viz, depth_viz]) cv2.imshow('view_jsk_v1', viz[:, :, ::-1]) key = cv2.waitKey(0) if key == ord('q'): break elif key == ord('n'): if i == len(stamp_dirs) - 1: print('Reached the end edge of the dataset') continue i += 1 elif key == ord('p'): if i == 0: print('Reached the start edge of the dataset') continue i -= 1 else: continue
def main(): parser = argparse.ArgumentParser() parser.add_argument('dataset_dir', help='Dataset dir path') parser.add_argument('-s', '--start', help='Start timestamp (ex. 2017-06-10T10:00:23)') args = parser.parse_args() dataset_dir = args.dataset_dir start = args.start if not osp.exists(dataset_dir): print('Please install dataset to: %s' % dataset_dir) quit(1) label_files = [] for stamp_dir in os.listdir(dataset_dir): stamp_dir = osp.join(dataset_dir, stamp_dir) label_file = osp.join(stamp_dir, 'label.npz') if osp.exists(label_file): label_files.append(label_file) else: label_files.append(None) print('==> Size of dataset: All: %d, Annotated: %d.' % (len(label_files), len(list(filter(None, label_files))))) PKG_DIR = rospkg.RosPack().get_path('jsk_arc2017_common') with open(osp.join(PKG_DIR, 'config/label_names.yaml')) as f: object_names = yaml.load(f) object_names.append('__unlabeled__') print('==> Press keys: [q] to quit, [n] to go next, [p] to go previous') stamp_dirs = list(sorted(os.listdir(dataset_dir))) i = 0 while True: stamp = datetime.datetime.fromtimestamp(int(stamp_dirs[i]) / 1e9) if start and stamp < dateutil.parser.parse(start): i += 1 continue start = None stamp_dir = osp.join(dataset_dir, stamp_dirs[i]) print('%s: %s' % (stamp.isoformat(), stamp_dir)) img_file = osp.join(stamp_dir, 'image.jpg') img = skimage.io.imread(img_file) depth_file = osp.join(stamp_dir, 'depth.npz') depth = np.load(depth_file)['arr_0'] depth_viz = colorize_depth(depth, min_value=0.4, max_value=1.0) label_file = osp.join(stamp_dir, 'label.npz') if osp.exists(label_file): label = np.load(label_file)['arr_0'] mask_unlabeled = label == -1 label[mask_unlabeled] = object_names.index('__unlabeled__') img_labeled = img.copy() img_labeled[mask_unlabeled] = \ np.random.randint(0, 255, (mask_unlabeled.sum(), 3)) label_viz = label2rgb(lbl=label, img=img_labeled, label_names=dict(enumerate(object_names))) else: label_viz = np.zeros_like(img) viz = jsk_recognition_utils.get_tile_image([img, label_viz, depth_viz]) cv2.imshow('view_jsk_v1', viz[:, :, ::-1]) key = cv2.waitKey(0) if key == ord('q'): break elif key == ord('n'): if i == len(stamp_dirs) - 1: print('Reached the end edge of the dataset') continue i += 1 elif key == ord('p'): if i == 0: print('Reached the start edge of the dataset') continue i -= 1 else: continue