def visualize_classification(image, post_processed, config): """Draw classfication result to inference input image. Args: image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md config (EasyDict): Inference config. Returns: PIL.Image.Image: drawn image object. """ colormap = get_color_map(len(config.CLASSES)) font_size = 16 max_class_id = int(np.argmax(post_processed)) image_width = image.shape[1] image = PIL.Image.fromarray(image) draw = PIL.ImageDraw.Draw(image) font = PIL.ImageFont.truetype(FONT, font_size) text_color = colormap[max_class_id] class_name = config.CLASSES[max_class_id] score = float(np.max(post_processed)) text = "{}\n{:.3f}".format(class_name, score) text_size = draw.multiline_textsize(text, font=font) draw.multiline_text((image_width - text_size[0] - 10, 0), text, fill=text_color, font=font, align="right") return image
def __init__(self, *args, label_colors=None, **kwargs): super().__init__( *args, **kwargs, ) if label_colors is None: self.label_colors = get_color_map(self.num_classes) else: self.label_colors = label_colors
def run_sementic_segmentation(config): global nn camera_width = 320 camera_height = 240 window_name = "Segmentation Demo" vc = init_camera(camera_width, camera_height) pool = Pool(processes=1, initializer=nn.init) result = None fps = 1.0 q_save = Queue() q_show = Queue() grabbed, camera_img = vc.read() if not grabbed: print("Frame is empty") q_show.put(camera_img.copy()) input_img = camera_img.copy() colormap = np.array(get_color_map(len(config['CLASSES'])), dtype=np.uint8) while True: m1 = MyTime("1 loop of while(1) of main()") pool_result = pool.apply_async(_run_inference, (input_img, )) is_first = True while True: grabbed, camera_img = vc.read() if is_first: input_img = camera_img.copy() is_first = False q_save.put(camera_img.copy()) if not q_show.empty(): window_img = q_show.get() overlay_img = window_img if result is not None: seg_img = label_to_color_image(result, colormap) seg_img = cv2.resize(seg_img, dsize=(camera_width, camera_height)) overlay_img = cv2.addWeighted(window_img, 1, seg_img, 0.8, 0) overlay_img = add_fps(overlay_img, fps) cv2.imshow(window_name, overlay_img) key = cv2.waitKey(2) # Wait for 2ms if key == 27: # ESC to quit return if pool_result.ready(): break q_show = clear_queue(q_show) q_save, q_show = swap_queue(q_save, q_show) result, fps = pool_result.get() m1.show()
def visualize_object_detection(image, post_processed, config): """Draw object detection result boxes to image. Args: image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md config (EasyDict): Inference config. Returns: PIL.Image.Image: drawn image object. """ colormap = get_color_map(len(config.CLASSES)) height_scale = image.shape[0] / float(config.IMAGE_SIZE[0]) width_scale = image.shape[1] / float(config.IMAGE_SIZE[1]) predict_boxes = np.copy(post_processed) predict_boxes[:, 0] *= width_scale predict_boxes[:, 1] *= height_scale predict_boxes[:, 2] *= width_scale predict_boxes[:, 3] *= height_scale image = PIL.Image.fromarray(image) draw = PIL.ImageDraw.Draw(image) for predict_box in predict_boxes: class_id = int(predict_box[4]) class_name = config.CLASSES[class_id] box = [x for x in predict_box[:4]] score = predict_box[5] color = tuple(colormap[class_id]) xy = [box[0], box[1], box[0] + box[2], box[1] + box[3]] draw.rectangle(xy, outline=color) top_left = [box[0] - 10, box[1]] txt = "{:s}: {:.3f}".format(class_name, float(score)) draw.text(top_left, txt, fill=color) return image
def visualize_semantic_segmentation(image, post_processed, config): """Draw semantic segmentation result mask to image. Args: image (np.ndarray): A inference input RGB image to be draw. post_processed (np.ndarray): A one batch output of model be already applied post process. format is defined at https://github.com/blue-oil/blueoil/blob/master/lmnet/docs/specification/output_data.md config (EasyDict): Inference config. Returns: iamge (PIL.Image.Image): drawn image object. """ colormap = np.array(get_color_map(len(config.CLASSES)), dtype=np.uint8) alpha = 0.5 image_height = image.shape[0] image_width = image.shape[1] mask_image = label_to_color_image(np.expand_dims(post_processed, 0), colormap) mask_img = PIL.Image.fromarray(mask_image) mask_img = mask_img.resize(size=(image_width, image_height)) result = PIL.Image.blend(PIL.Image.fromarray(image), mask_img, alpha) return result
def __init__(self, task, classes, image_size): assert task in Tasks self.task = task self.classes = classes self.image_size = image_size self.color_maps = get_color_map(len(classes))
def test_get_color_map_with_large_length(): color_map = get_color_map(30) assert len(color_map) == 30 assert color_map[0] == (192, 0, 128) assert color_map[29] == (128, 0, 192)
def test_get_color_map_with_small_length(): color_map = get_color_map(5) assert len(color_map) == 5 assert color_map[0] == (192, 0, 128) assert color_map[4] == (64, 0, 128)
def label_colors(self): classes = self.parse_labels() return get_color_map(len(classes))