def do_detect_for_image(self,image): if not image is None: self.frame = image self.mask_frame = [image * self.mask] self.height, self.width, _ = image.shape self.data[self.frame_id]=[] detection = self.detect(self.mask_frame) res = detection[0] res_count = len(res['scores']) class_masks = np.zeros((self.num_classes, self.height, self.width), dtype = np.bool_) for track_id in range(res_count): box = res['rois'][i] i = 0 for k in range(len(res['class_ids'])): if Detector.check_boxes(res['rois'][k],box): i = k break class_id = self.track_per_class['classes'].get(track_id,None) score = res['scores'][i] if class_id is None: class_id = res['class_ids'][i] self.track_per_class['classes'][track_id]=class_id self.track_per_class['scores'][track_id]={} self.track_per_class['scores'][track_id][class_id] = self.track_per_class['scores'][track_id].get(class_id,0)+score s = sum(self.track_per_class['scores'][track_id].values()) mx = 0 mx_class_id = class_id for key in self.track_per_class['scores'][track_id]: self.track_per_class['scores'][track_id][key]/=s if mx<self.track_per_class['scores'][track_id][key]: mx=self.track_per_class['scores'][track_id][key] mx_class_id = key if mx_class_id!=class_id: self.track_per_class['classes'][track_id]=mx_class_id class_id = mx_class_id class_id = int(class_id) mask = res['masks'][:,:,i] class_masks[class_id] += mask self.data[self.frame_id].append(Detection(box=box,track_id = track_id, class_id = class_id, score = score)) cv2.putText(self.frame, str(track_id), (box[1] - 1, box[0] - 1), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 6) cv2.putText(self.frame, str(track_id), (box[1] - 3, box[0] - 3), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255), 2) visualize.draw_box(self.frame, box, Detection.get_hash_color(track_id)) for class_id in range(self.num_classes): visualize.apply_mask(self.frame, class_masks[class_id], class_colors[class_id])
def merge_image_pred_gt(image, pred_mask, gt_mask, alpha=0.5): ''' the gt mask are blue, the overlap area are green, the other mask are red :param image: :param pred_mask: :param gt_mask: :return: image after merge ''' color_red = [1.0, 0.0, 0.0] color_green = [0.0, 1.0, 0.0] color_blue = [0.0, 0.0, 1.0] overlap_mask = np.logical_and(pred_mask, gt_mask) pred2_mask = np.logical_and(pred_mask, np.logical_not(gt_mask)) gt2_mask = np.logical_and(gt_mask, np.logical_not(pred_mask)) image2 = visualize.apply_mask(image, overlap_mask, color=color_green, alpha=alpha) image2 = visualize.apply_mask(image2, pred2_mask, color=color_red, alpha=alpha) image2 = visualize.apply_mask(image2, gt2_mask, color=color_blue, alpha=alpha) return image2
def run_model_for_paper(): global target global source debug_imgs = Path(target, 'debug') debug_imgs.mkdir(exist_ok=True) files = [f for f in source.glob('*.png')] mask_sign = Path(target, 'signs') mask_sign.mkdir(exist_ok=True) mask_stamps = Path(target, 'stamps') mask_stamps.mkdir(exist_ok=True) colors = random_colors(3) for f in tqdm(files): img = cv2.imread(str(f)) sign, stamps = detect_objects(img) debug_img = img.copy() sign_masks = Path() for idx, roi in enumerate(sign['rois']): mask = sign['masks'][:, :, idx] masked = get_masked_img(img, mask, roi) mask_file = Path(mask_sign, f'{f.stem}_{str(idx)}_sign.png') cv2.imwrite(str(mask_file), masked) y1, x1, y2, x2 = roi debug_img = cv2.rectangle(debug_img, (x1, y1), (x2, y2), colors[0], thickness=2) # purple debug_img = apply_mask(debug_img, mask, colors[0]) for idx, roi in enumerate(stamps['rois']): mask = stamps['masks'][:, :, idx] masked = get_masked_img(img, mask, roi) mask_file = Path(mask_stamps, f'{f.stem}_{str(idx)}_stamp.png') cv2.imwrite(str(mask_file), masked) # cv2.rectangle(debug_img, (x1, y1), (x2, y2), # (0, 255, 0), thickness=2) # greens debug_img = apply_mask(debug_img, mask, colors[1]) debug_file = Path(debug_imgs, f'{f.stem}.png') cv2.imwrite(str(debug_file), debug_img)
def makeMask(r, image, name): MASK_DIR = os.path.join(WORK_DIR, name + "//mask") TMASK_DIR = os.path.join(WORK_DIR, name + "//tmask") cv.imwrite(os.path.join(MASK_DIR, "0.png"), np.zeros(shape=image.shape, dtype=np.uint8)) cv.imwrite(os.path.join(TMASK_DIR, "0.png"), np.zeros(shape=image.shape, dtype=np.uint8)) maskCnt = 1 for i in range(0, r["rois"].shape[0]): if r['scores'][i] >= 0.9 and r['class_ids'][i] == 1: savefile = str(maskCnt) + ".png" maskCnt += 1 mask = r['masks'][:, :, i] image2 = image.copy() black_image = np.zeros(shape=image2.shape, dtype=np.uint8) black_image = visualize.apply_mask(black_image, mask, (1, 1, 1), alpha=1) img_gray = cv.cvtColor(black_image, cv.COLOR_BGR2GRAY) ret, img_binary = cv.threshold(img_gray, 127, 255, 0) _, contours, hierarchy = cv.findContours(img_binary, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) for cnt in contours: cv.drawContours(black_image, [cnt], 0, (255, 255, 255), 2) cv.imwrite(os.path.join(MASK_DIR, savefile), black_image) for cnt in contours: cv.drawContours(black_image, [cnt], 0, (255, 255, 255), 13) cv.imwrite(os.path.join(TMASK_DIR, savefile), black_image) png_convert(maskCnt, name) return maskCnt
def make_mask_images(OUTPUT_DIR2, file_names, IMAGE_DIR, WIDTH, HEIGHT): for i in range(0, len(file_names)): image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[i])) init_height, init_width = image.shape[:2] if (init_height / init_width) > (HEIGHT / WIDTH): small_height = int(init_height * (WIDTH / init_width)) image = cv2.resize(image, (WIDTH, small_height), interpolation=cv2.INTER_NEAREST) image = image[(small_height // 2 - HEIGHT // 2):(small_height // 2 + HEIGHT // 2), 0: WIDTH] else: small_width = int(init_width * (HEIGHT / init_height)) image = cv2.resize(image, (small_width, HEIGHT), interpolation=cv2.INTER_NEAREST) image = image[0:HEIGHT, (small_width // 2 - WIDTH // 2):(small_width // 2 + WIDTH // 2)] # Run detection results = model.detect([image], verbose=1) r = results[0] # Prepare black image mask_base = np.zeros((image.shape[0], image.shape[1], image.shape[2]), np.uint8) after_mask_img = image.copy() color = (10, 10, 10) # white number_of_objects = len(r['masks'][0, 0]) mask_img = mask_base for j in range(0, number_of_objects): mask = r['masks'][:, :, j] mask_img = visualize.apply_mask(mask_base, mask, color, alpha=1) if not os.path.exists(OUTPUT_DIR2): os.makedirs(OUTPUT_DIR2) cv2.imwrite(OUTPUT_DIR2 + '/' + file_names[i], mask_img)
def color_splash(image, masks, boxes, class_id, colors): """Apply color splash effect. image: RGB image [height, width, 3] mask: instance segmentation mask [height, width, instance count] class_id: list of class ids per mask colors: list of colors per class Returns result image. """ # Make a grayscale copy of the image. The grayscale copy still # has 3 RGB channels, though. # gray = skimage.color.gray2rgb(skimage.color.rgb2gray(image)) * 255 # # Copy color pixels from the original color image where mask is set # if mask.shape[-1] > 0: # # We're treating all instances as one, so collapse the mask into one layer # mask = (np.sum(mask, -1, keepdims=True) >= 1) # splash = np.where(mask, image, gray).astype(np.uint8) # else: # splash = gray.astype(np.uint8) # return splash # green = np.zeros([image.shape[0], image.shape[1], image.shape[2]], dtype=np.uint8) # green[:,:] = [0, 255, 0] if masks.shape[-1] > 0: # We're treating all instances as one, so collapse the mask into one layer # mask = (np.sum(mask, -1, keepdims=True) < 1) num_inst = masks.shape[-1] for i in range(num_inst): color = colors[int(class_id[i])] mask = masks[:, :, i] cover = visualize.apply_mask(image, mask, color) # cover = np.where(mask, image, green).astype(np.uint8) else: # error case, return image cover = image return cover
def render(result, rgb_image, target): N = result['rois'].shape[0] # 検出数 result_image = rgb_image.copy() mask = None colors = visualize.random_colors(N) for i in range(N): '''クラス関係なく1物体ごと処理を行う''' if class_names[result['class_ids'][i]] in target: # Color color = colors[i] rgb = (round(color[0] * 255), round(color[1] * 255), round(color[2] * 255)) font = cv2.FONT_HERSHEY_SIMPLEX # Bbox result_image = visualize.draw_box(result_image, result['rois'][i], rgb) # Class & Score text_top = f"ID{i:d} {class_names[result['class_ids'][i]]}: {result['scores'][i]:.3f}" result_image = cv2.putText( result_image, text_top, (result['rois'][i][1], result['rois'][i][0]), font, 0.7, rgb, 1, cv2.LINE_AA) # Mask mask = result['masks'][:, :, i] result_image = visualize.apply_mask(result_image, mask, color) # log print( f"ID: {i} | {class_names[result['class_ids'][i]]}: {result['scores'][i]}" ) return result_image, mask
def make_mask_images(OUTPUT_DIR2,file_names,dataset,IMAGE_DIR,data_num): for i in range(0,len(file_names)): image = skimage.io.imread(os.path.join(IMAGE_DIR, file_names[i])) image=cv2.resize(image,(WIDTH,HEIGHT)) # Run detection results = model.detect([image], verbose=1) r = results[0] # Prepare black image mask_base = np.zeros((image.shape[0],image.shape[1],image.shape[2]),np.uint8) after_mask_img = image.copy() color = (10, 10, 10) #white number_of_objects=len(r['masks'][0,0]) mask_img=mask_base for j in range(0,number_of_objects): mask = r['masks'][:, :, j] mask_img = visualize.apply_mask(mask_base, mask, color,alpha=1) if not os.path.exists(OUTPUT_DIR2): os.makedirs(OUTPUT_DIR2) cv2.imwrite(OUTPUT_DIR2 + '/' + data_num + "_" + file_names[i] + '.jpg',mask_img)
def show_pred_rcnn(self, r, img): hsv = [(i / 2, 1, 1.0) for i in range(len(self.conf['CLASSES']))] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) classes = self.conf['CLASSES'] for i in range(0, len(r["scores"])): (startY, startX, endY, endX) = r["rois"][i] classID = r['class_ids'][i] label = classes[classID] score = r['scores'][i] color = colors[classID] cv2.rectangle(img, (startX, startY), (endX, endY), color, 2) text = "{} : {:.3f}".format(label, score) y = startY - 10 if startY - 10 > 10 else startY + 10 # cv2.putText(img,text,(startX,y), cv2.FONT_HERSHEY_SIMPLEX, # 0.6,color,2) img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) for i in range(0, r["rois"].shape[0]): classID = r['class_ids'][i] mask = r['masks'][:, :, i] color = colors[classID] img = visualize.apply_mask(img, mask, color, alpha=0.5) target_size = self.conf['OUTPUT_VIZU_SIZE'] img = cv2.resize(img, (target_size[0], target_size[1])) cv2.imshow("Output image", img) cv2.waitKey()
def drawImage(image, boxes, masks, class_ids, class_names, scores, show_bbox=False, show_label=False, show_seg=True, show_center=True): N = boxes.shape[0] # Number of instances if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] masked_image = image.astype(np.uint32).copy() car_count = 0 for i in range(N): class_id = class_ids[i] if class_id != 3 and class_id != 6 and class_id != 8: continue car_count = car_count + 1 # Bounding Box. if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: cv2.rectangle(image, (x1, y1), (x2, y2), (0, 255, 0), 1, cv2.LINE_AA) #p = patches.Rectangle((x1, y1), x2 - x1, y2 - y1, linewidth=2, alpha=0.7, linestyle="dashed", edgecolor=color, facecolor='none') if show_center: cv2.circle(image, (x1 + int(math.fabs(float(x2 - x1))/2), y1 + int(math.fabs(float(y2 - y1))/2)), 4, (0, 0, 255), -1) if show_label: score = scores[i] if scores is not None else None label = class_names[class_id] caption = "{} {:.3f}".format(label, score) if score else label font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, caption, (x1, y1 + 8), font, 1, (255, 255, 255), 1, cv2.LINE_AA) #ax.text(x1, y1 + 8, caption, color='w', size=11, backgroundcolor="none") if show_seg: # Mask mask = masks[:, :, i] masked_image = visualize.apply_mask(masked_image, mask, (0, 0, 0)) # Mask Polyline # Pad to ensure proper polygons for masks that touch image edges. padded_mask = np.zeros((mask.shape[0] + 2, mask.shape[1] + 2), dtype=np.uint8) padded_mask[1:-1, 1:-1] = mask contours = visualize.find_contours(padded_mask, 0.5) for verts in contours: # Subtract the padding and flip (y, x) to (x, y) verts = np.fliplr(verts) - 1 cv2.polylines(image, np.int32([verts]), True, (0, 255, 255)) #p = visualize.Polygon(verts, facecolor="none", edgecolor=()) #ax.add_patch(p) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(image, 'CAR: {}'.format(car_count), (5, 32), font, 1, (0, 0, 255), 2, cv2.LINE_AA) pass
def subplot_image_mask(image, mask): plt.figure(figsize=(15, 15)) plt.subplot(1, 2, 1) plt.imshow(image) plt.subplot(1, 2, 2) color = visualize.random_colors(1, bright=True) plt.imshow(visualize.apply_mask(image, mask, color=color[0], alpha=0.5)) plt.show()
def click_and_crop(event, x, y, flags, param): # 클릭된 좌표정보, 원본 이미지를 가져옴 global refPt_x, refPt_y, ori if event == cv.EVENT_LBUTTONDOWN: # 클릭 이벤트가 발생했을때, 해당 좌표의 정보를 저장 refPt_x = x refPt_y = y # 전체 좌표값, 픽셀의 갯수만큼 반복 for i in range(0, len(results[0]["masks"][refPt_y][refPt_x])): # 만약 클릭한 좌표가 객체이고, 사람일때 코드를 실행 if results[0]["masks"][refPt_y][refPt_x][i] == True: if results[0]["class_ids"][i] == 1: mask = r['masks'][:, :, i] color = COLORS[classID][::-1] # 마스크 색상 지정 image2 = ori.copy() # 마스크 정보를 저장할 black_image를 생성, 그 이미지에 마스크 정보를 전달 black_image = np.zeros(shape=image2.shape, dtype=np.uint8) # 빈 이미지 생성 black_image = visualize.apply_mask(black_image, mask, (1, 1, 1), alpha=1) # black_image를 그레이스케일하고 이진화를 통해 컨투어를 생성 img_gray = cv.cvtColor(black_image, cv.COLOR_BGR2GRAY) ret, img_binary = cv.threshold(img_gray, 127, 255, 0) contours, hierarchy = cv.findContours( img_binary, cv.RETR_LIST, cv.CHAIN_APPROX_SIMPLE) # 컨투어를 원본 이미지와 빈 이미지에 그림, 마스크를 따라 더 넓게 마스크로 인식하도록 하기 위해 for cnt in contours: cv.drawContours(image2, [cnt], 0, (255, 255, 255), 1) for cnt in contours: cv.drawContours(black_image, [cnt], 0, (255, 255, 255), 1) image2 = visualize.apply_mask(image2, mask, (1, 1, 1), alpha=1) # 인페인팅 처리를 위한 png파일들을 생성(mask) cv.destroyWindow("mask") cv.imshow("mask", black_image) #cv.imwrite("mask.png", black_image) cv.destroyWindow("image2") cv.imshow("image2", image2)
def save_segments(expanded_masks, input_dir, filename, output_dir, mask_expansion=True): '''This function takes the masks, the corresponding image and an output directory and saves the segmented image of a structure depiction. The mask_expansion attribute only changes the name of the output file.''' IMAGE_PATH = input_dir + "/" + filename mask = expanded_masks for i in range(mask.shape[2]): image = cv2.imread(os.path.join(IMAGE_PATH), -1) for j in range(image.shape[2]): image[:, :, j] = image[:, :, j] * mask[:, :, i] original = image.copy() #Remove unwanted background grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) _, thresholded = cv2.threshold(grayscale, 0, 255, cv2.THRESH_OTSU) bbox = cv2.boundingRect(thresholded) x, y, w, h = bbox foreground = image[y:y + h, x:x + w] masked_image = np.zeros(image.shape).astype(np.uint8) masked_image = visualize.apply_mask(masked_image, mask[:, :, i], [1, 1, 1]) masked_image = Image.fromarray(masked_image) masked_image = masked_image.convert('RGB') im_gray = cv2.cvtColor(np.asarray(masked_image), cv2.COLOR_RGB2GRAY) (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) #Removal of transparent layer - black background _, alpha = cv2.threshold(im_bw, 0, 255, cv2.THRESH_BINARY) b, g, r = cv2.split(image) rgba = [b, g, r, alpha] dst = cv2.merge(rgba, 4) background = dst[y:y + h, x:x + w] trans_mask = background[:, :, 3] == 0 background[trans_mask] = [255, 255, 255, 255] new_img = cv2.cvtColor(background, cv2.COLOR_BGRA2BGR) #save segments if mask_expansion: output_image = output_dir + filename + "_segment_%d.png" % i else: output_image = output_dir + filename + "_segment_NOEXPANSION_%d.png" % i cv2.imwrite(output_image, new_img) return "Completed, Segments saved inside the ouput folder!"
def custom_visualize(test_image, model, colors, classes, draw_bbox, mrcnn_visualize, instance_segmentation): detections = model.detect([test_image], verbose=1)[0] if mrcnn_visualize: matplotlib.use('TkAgg') visualize.display_instances(test_image, detections['rois'], detections['masks'], detections['class_ids'], classes, detections['scores']) return if instance_segmentation: hsv = [(i / len(detections['rois']), 1, 1.0) for i in range(len(detections['rois']))] colors = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) random.seed(42) random.shuffle(colors) for i in range(0, detections["rois"].shape[0]): classID = detections["class_ids"][i] mask = detections["masks"][:, :, i] if instance_segmentation: color = colors[i][::-1] else: color = colors[classID][::-1] # To visualize the pixel-wise mask of the object test_image = visualize.apply_mask(test_image, mask, color, alpha=0.5) test_image = cv2.cvtColor(test_image, cv2.COLOR_RGB2BGR) if draw_bbox == 'True': for i in range(0, len(detections["scores"])): (startY, startX, endY, endX) = detections["rois"][i] classID = detections["class_ids"][i] label = classes[classID] score = detections["scores"][i] if instance_segmentation: color = [int(c) for c in np.array(colors[i]) * 255] else: color = [int(c) for c in np.array(colors[classID]) * 255] cv2.rectangle(test_image, (startX, startY), (endX, endY), color, 2) text = "{}: {:.2f}".format(label, score) y = startY - 10 if startY - 10 > 10 else startY + 10 cv2.putText(test_image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2) return test_image
def detect_object(self, image_url, classes, image_dir="/ml/image/temp.jpg"): # file_names = next(os.walk(IMAGE_DIR))[2] # image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names))) # MEDIA_ROOT = "E:\Temp\object_detection\media" image = skimage.io.imread(image_url) # Run detection results = self.model.detect([image], verbose=0) # Visualize results r = results[0] CLASS_NAMES = classes hsv = [(i / len(CLASS_NAMES), 1, 1.0) for i in range(len(CLASS_NAMES))] COLORS = list(map(lambda c: colorsys.hsv_to_rgb(*c), hsv)) for i in range(0, r["rois"].shape[0]): # extract the class ID and mask for the current detection, then # grab the color to visualize the mask (in BGR format) classID = r["class_ids"][i] mask = r["masks"][:, :, i] color = COLORS[classID][::-1] # visualize the pixel-wise mask of the object image = visualize.apply_mask(image, mask, color, alpha=0.5) # convert the image back to BGR so we can use OpenCV's drawing # functions image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) # loop over the predicted scores and class labels # for i in range(0, len(r["scores"])): # # extract the bounding box information, class ID, label, predicted # # probability, and visualization color # (startY, startX, endY, endX) = r["rois"][i] # classID = r["class_ids"][i] # label = CLASS_NAMES[classID] # score = r["scores"][i] # color = [int(c) for c in np.array(COLORS[classID]) * 255] # draw the bounding box, class label, and score of the object # cv2.rectangle(image, (startX, startY), (endX, endY), color, 2) # text = "{}: {:.3f}".format(label, score) # y = startY - 10 if startY - 10 > 10 else startY + 10 # cv2.putText(image, text, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, # 0.6, color, 2) # show the output image cv2.imwrite(settings.STATIC_DIR + "/ml/temp.jpg", image) # cv2.imshow("Output", image) # cv2.waitKey() return r
def update(self, subject): image = subject.image masks = subject.masks rois = subject.rois colors = random_colors(masks.shape[2]) for i in range(0, masks.shape[2]): mask = masks[:, :, i] image = apply_mask(image, mask, colors[i]) # display cv2.imshow("Mask RCNN", image) cv2.waitKey(1)
def display_instances(image, boxes, masks, class_ids, class_names, scores=None, show_mask=True, show_bbox=True, colors=None, captions=None): # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # Generate random colors colors = colors or visualize.random_colors(N) masked_image = image.astype(np.uint8).copy() for i in range(N): color = colors[i] # Bounding box if not np.any(boxes[i]): # Skip this instance. Has no bbox. Likely lost in image cropping. continue y1, x1, y2, x2 = boxes[i] if show_bbox: cv2.rectangle(masked_image, (x1, y1), (x2, y2), color, 1) # Label if not captions: class_id = class_ids[i] score = scores[i] if scores is not None else None label = class_names[class_id] caption = "{} {:.3f}".format(label, score) if score else label else: caption = captions[i] cv2.putText(masked_image, str(caption), (x1, y1 + 8), cv2.FONT_HERSHEY_SIMPLEX, .5, color, lineType=cv2.LINE_AA) # Mask mask = masks[:, :, i] if show_mask: masked_image = visualize.apply_mask(masked_image, mask, color) return masked_image
def save_segments(zipper): expanded_masks, IMAGE_PATH, output_directory = zipper mask = expanded_masks for i in range(mask.shape[2]): image = cv2.imread(os.path.join(IMAGE_PATH), -1) for j in range(image.shape[2]): image[:, :, j] = image[:, :, j] * mask[:, :, i] #Remove unwanted background grayscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) _, thresholded = cv2.threshold(grayscale, 0, 255, cv2.THRESH_OTSU) bbox = cv2.boundingRect(thresholded) x, y, w, h = bbox foreground = image[y:y + h, x:x + w] masked_image = np.zeros(image.shape).astype(np.uint8) masked_image = visualize.apply_mask(masked_image, mask[:, :, i], [1, 1, 1]) masked_image = Image.fromarray(masked_image) masked_image = masked_image.convert('RGB') im_gray = cv2.cvtColor(np.asarray(masked_image), cv2.COLOR_RGB2GRAY) (thresh, im_bw) = cv2.threshold(im_gray, 128, 255, cv2.THRESH_BINARY | cv2.THRESH_OTSU) #Removal of transparent layer - black background _, alpha = cv2.threshold(im_bw, 0, 255, cv2.THRESH_BINARY) b, g, r = cv2.split(image) rgba = [b, g, r, alpha] dst = cv2.merge(rgba, 4) background = dst[y:y + h, x:x + w] trans_mask = background[:, :, 3] == 0 background[trans_mask] = [255, 255, 255, 255] new_img = cv2.cvtColor(background, cv2.COLOR_BGRA2BGR) #Save segments #Making directory for saving the segments if os.path.exists(output_directory + "/segments"): pass else: os.system("mkdir " + str(os.path.normpath(output_directory + "/segments"))) #Define the correct path to save the segments segment_dirname = os.path.normpath(output_directory + "/segments/") filename = str(IMAGE_PATH).replace( "\\", "/").split("/")[-1][:-4] + "_%d.png" % i file_path = os.path.normpath(segment_dirname + "/" + filename) print(file_path) cv2.imwrite(file_path, new_img) return output_directory + "/segments/"
def imwrite_mask(image, masks, classes, savename, remove_inflamation=False, saveoriginal=False): """ saves an image with colored segments s.t blue is pdl1+ red is pdl1- and green is inflammation. :param image: the image to be colored :param masks: masks as resulted from PDL1NetTester.test function :param classes: classes as resulted from PDL1NetTester.test function :param savename: save the image in the output folder using the given savename str as file name :param remove_inflamation: if true do not color inflammation segments. :param saveoriginal: if True save also the image itself unmodified to the output folder """ if len(image.shape) < 3: image = cvtColor(image, COLOR_GRAY2RGB) if any(classes): # if classes in not empty list if remove_inflamation: inflamation_num = 1 classes[classes == inflamation_num] = 0 classes = classes.reshape(1, 1, -1) # for i in range(len(classes)): # mask[masks[:, :, i] is True] = (masks[:, :, i] * classes[i])[masks[:,:,i] is True] masks = masks * classes mask = np.max(masks, axis=2) class_to_color = {1: (0, 1., 0), 2: (1., 0, 0), 3: (0, 0, 1.)} edited_image = image.copy() for class_ in np.unique(classes.ravel()): edited_image = vis.apply_mask(edited_image, mask, class_to_color[class_], label=class_, alpha=0.5) edited_image = remove_black_frame(edited_image) if savename is not None: file_name = os.path.join(result_dir, "mask_" + savename + ".png") edited_image = resize(edited_image, masks.shape[:2]) edited_image = cvtColor(edited_image, COLOR_BGR2RGB) imwrite(file_name, edited_image) else: if savename is not None: file_name = os.path.join(result_dir, "mask_" + savename + ".png") image_org = remove_black_frame(image) image_org = resize(image_org, masks.shape[:2]) image_org = cvtColor(image_org, COLOR_BGR2RGB) imwrite(file_name, image_org) if saveoriginal: if savename is not None: file_name = os.path.join(result_dir, "org_" + savename + ".png") image_org = remove_black_frame(image) image_org = resize(image_org, masks.shape[:2]) image_org = cvtColor(image_org, COLOR_BGR2RGB) imwrite(file_name, image_org)
def run(self): if not os.path.exists(self.__video_path): raise FileNotFoundError(f"Файл не существует! Неверно указан путь к видеофайлу:{self.__video_path}") capture = cv2.VideoCapture(self.__video_path) width = int(capture.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(capture.get(cv2.CAP_PROP_FRAME_HEIGHT)) fps = capture.get(cv2.CAP_PROP_FPS) print(f'Source file FPS:{capture.get(cv2.CAP_PROP_FPS)}"') # Define codec and create video writer writer = cv2.VideoWriter(self.__output_file, cv2.VideoWriter_fourcc(*'MJPG'), fps, (width, height)) count = 0 success = True while success: print("Frame number: ", count) # Производится считывание нового изображения success, image = capture.read() if success: r = self.__model.detect([image], verbose=0)[0] boxes = r['rois'] # Если на кадре найдены объекты, то проводим их визуализацию if len(r['class_ids']): for num, object_id in enumerate(r['class_ids']): # Извлекаются данные о точности распознавания объекта score = r['scores'][num] # Извлекаются параметры найденного объекта box = boxes[num] start_x = box[1] start_y = box[0] end_x = box[3] end_y = box[2] # Определяется цвет закраски color = [int(c) for c in COLORS[object_id]] # Отрисовывается прямоугольник на изображении cv2.rectangle( image, (start_x, start_y), (end_x, end_y), color, 2 ) mask = r['masks'][:, :, num] image = apply_mask(image, mask, color) # Определяется маркировка найденного объекта label = f"{OBJECTS[object_id]}:{round(score * 100, 2)}" # Добавляем маркировку объекта к изображению cv2.putText(image, label, (box[1], box[0]), cv2.FONT_HERSHEY_SIMPLEX, 0.75 / 2, (0, 0, 0), 1) writer.write(image) count += 1 writer.release()
def color_splash(image, mask, color): """Apply color splash effect. image: RGB image [height, width, 3] mask: instance segmentation mask [height, width, instance count] Returns result image. """ mask = (np.sum(mask, -1, keepdims=True) >= 1) mask = np.squeeze(mask) splash = apply_mask(image, mask, color[0]) print(splash.shape) return splash
def processImage(frame): image = modellib.load_image_single(frame, config) results = model.detect([image], verbose=0) r = results[0] masked_image = image.astype(np.uint32).copy() N = r['rois'].shape[0] #find out how many different instances are there try: #if no mask simply return image frame mask = r['masks'][:, :, 0] color = visualize.random_colors(N) #generates random instance colors color = color[0] image = visualize.apply_mask(masked_image, mask, color) return image.astype(np.uint8) except Exception: return image
def apply_mask(img, r, COLORS): """Apply the given mask to the image. """ for i in range(0, r["rois"].shape[0]): #extract the class ID and mask for the current detection, then #grab the color to visualize the mask (in BGR format) classID = r["class_ids"][i] mask = r["masks"][:, :, i] color = COLORS[classID][::-1] #visualize the pixel-wise mask of the object img = visualize.apply_mask(img, mask, color, alpha=0.5) return img
def segmentation(): image_file = request.files.get('image') image = np.array(Image.open(image_file)) image = np.asarray(image, dtype=np.float32) if image.shape[2] == 4: image = cv2.cvtColor(image, cv2.COLOR_BGRA2BGR) if max(image.shape) > 3000: h, w, _ = image.shape image = cv2.resize(image, (int(h / 2), int(w / 2))) global sess global graph with graph.as_default(): set_session(sess) results = model.detect([image], verbose=1) r = results[0] N = r['rois'].shape[0] if N == 0: return jsonify({"status": "object missing"}), 422 for i in range(N): mask = r['masks'][:, :, i] base_image = image.astype(np.uint32).copy() masked_images = [] for j in range(60): tmp = base_image.copy() tmp = visualize.apply_mask(tmp, mask, colorsys.hsv_to_rgb(j / 60, 1, 1.0), 0.5) masked_images.append(Image.fromarray(tmp.astype('uint8'))) img_id = str(uuid.uuid4()) out_gif = img_id + '.gif' base_image = Image.fromarray(base_image.astype('uint8')) base_image.save(out_gif, save_all=True, append_images=masked_images, loop=0) shutil.move(out_gif, os.path.join('/tmp', out_gif)) out_gif = os.path.join('/tmp', out_gif) break try: return send_file(out_gif, attachment_filename='out.gif') except Exception as e: return str(e)
def detect(model): print("Running on {}".format(args.img)) # Read image image = skimage.io.imread(args.img) # Detect objects r = model.detect([image], verbose=1)[0] for i in range(len(r['rois'])): image = visualize.draw_box(image, r['rois'][i], (255, 0, 0)) image = visualize.apply_mask(image, r['masks'][i], (255, 0, 0)) # Save output file_name = "splash_{:%Y%m%dT%H%M%S}.png".format(datetime.now()) skimage.io.imsave(file_name, image)
def findPersonInPhoto(image, show, showMask): # convert to rgb image for model image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) # perform forward pass of the network print("[INFO] making predictions with Mask R-CNN...") r = model.detect([image], verbose=1)[0] # loop over of the detected object's bounding boxes and masks for i in range(0, r["rois"].shape[0]): # extract the class ID and mask classID = r["class_ids"][i] # ignore all non-people objects if CLASS_NAMES[classID] != 'person': continue clone = image.copy() mask = r["masks"][:, :, i] # visualize the pixel-wise mask of the object image = visualize.apply_mask(image, mask, color, alpha=0.5) # convert the image to BGR for OpenCV use image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR) (startY, startX, endY, endX) = r["rois"][i] # extract the ROI of the image, use foreground extraction for mask roi = clone[startY:endY, startX:endX] roiMask = extractMaskFromROI(roi) # extract the mask produced by CNN visMask = (mask * 255).astype("uint8") visMask = visMask[startY:endY, startX:endX] # extract overlapping regions of both masks to minimalise mask errors. finalMask = cv2.bitwise_and(roiMask, visMask) if show or showMask: if show: cv2.namedWindow("ROI", cv2.WINDOW_NORMAL) cv2.imshow("ROI", roi) cv2.namedWindow("Output", cv2.WINDOW_NORMAL) cv2.imshow("Output", image) cv2.namedWindow("Mask", cv2.WINDOW_NORMAL) cv2.imshow("Mask", visMask) cv2.namedWindow('roi mask', cv2.WINDOW_NORMAL) cv2.imshow('roi mask', roiMask) if showMask: cv2.namedWindow('final mask', cv2.WINDOW_NORMAL) cv2.imshow('final mask', finalMask) cv2.waitKey(0) break return finalMask
def display_instances(image, boxes, masks, class_ids): masked_image = image.copy() # Number of instances N = boxes.shape[0] if not N: return masked_image else: assert boxes.shape[0] == masks.shape[-1] == class_ids.shape[0] # Generate random colors colors = visualize.random_colors(N) for i in range(N): color = colors[i] mask = masks[:, :, i] masked_image = visualize.apply_mask(masked_image, mask, color) return masked_image
def find_masks(pic_folder, model): image = skimage.io.imread(pic_folder + "init") r = model.detect([image])[0] N = len(r['rois']) colors = visualize.random_colors(N) # save mask and masked image to display for i in range(N): mask = r['masks'][:, :, i] maskImage = np.zeros(image.shape) maskImage[mask == True] = [255, 255, 255] kernel = np.ones((10, 10), np.uint8) maskImage = cv2.dilate(maskImage, kernel, 1) cv2.imwrite(pic_folder + "mask_" + str(i) + ".jpg", maskImage) masked_image = visualize.apply_mask(np.copy(image), mask, colors[i]) skimage.io.imsave(pic_folder + "mask_pic_" + str(i) + ".png", masked_image) return N
def draw_instances(image, boxes, masks, classes, scores, colors): # Number of instances N = boxes.shape[0] if not N: print("\n*** No instances to display *** \n") else: assert boxes.shape[0] == masks.shape[-1] == classes.shape[0] masked_image = image #image.astype(np.uint32).copy() for i in range(N): color = colors[classes[i]] if scores[i] < 0.9: continue # Mask mask = masks[:, :, i] masked_image = visualize.apply_mask(masked_image, mask, color) return masked_image
def display_instances(image_ndarray, boxes, masks, class_ids, class_names, scores=None, show_mask=True, show_bbox=True, show_title=True): # 获取实例的数量 N = boxes.shape[0] # 生成随机的颜色 colors = random_colors(N) # 循环遍历每个实例 for i in range(N): color = colors[i] color = tuple([int(255 * k) for k in color]) y1, x1, y2, x2 = boxes[i] # 绘制边界框 if show_bbox: thickness = 3 leftTop_point = x1, y1 rightDown_point = x2, y2 cv2.rectangle(image_ndarray, leftTop_point, rightDown_point, color, thickness) # 绘制边界框上面的标题 if show_title: class_id = class_ids[i] title = '%s %.3f' % (class_names[class_id], scores[i]) font = cv2.FONT_HERSHEY_SIMPLEX font_size = 0.7 title_color = (0, 0, 255) thickness = 2 cv2.putText(image_ndarray, title, leftTop_point, font, font_size, title_color, thickness) # 绘制掩码 if show_mask: mask = masks[:, :, i] color = tuple([float(k / 255) for k in color]) image_ndarray = apply_mask(image_ndarray, mask, color) return image_ndarray