def detect(self, image): if self.model is None: return {} image = image.convert('RGB') width, height = image.size image.thumbnail((1024, 1024)) image = img_to_array(image) result = self.model.detect([image])[0] masks = result.get('masks') class_ids = result.get('class_ids') coco_image = im.Image(width=width, height=height) for i in range(masks.shape[-1]): mask = resize(masks[..., i], (height, width)) mask = im.Mask(mask) class_id = class_ids[i] class_name = CLASS_NAMES[class_id] category = im.Category(class_name) coco_image.add(mask, category=category) return coco_image.coco()
def process_image(file, image_id): """ Assumes that prediction and image file have similar prefix, see below""" prediction = cv2.imread(os.path.join(basedir, file), cv2.IMREAD_ANYDEPTH) # reduce noise (makes it easier to edit predictions) prediction = (prediction == 19).astype('int32') for _ in range(2): prediction = sp.ndimage.median_filter(prediction, size=12) # remove part where there is obvious street and ego-vehicle prediction[700:, 500:1750] = 0 prediction[vehicle_mask != 0] = 1 annotation = imantics.Image(width=2048, height=1024) for c in categories: annotation.add(imantics.Mask(prediction == c), category=categories[c]) coco_dataset['images'].append({ 'id': image_id, 'dataset_id': 1, 'path': "/datasets/lostandfound/{}".format(file[:-10] + 'rgb.png'), 'width': 2048, 'height': 1024, 'file_name': file[:-10] + 'rgb.png' }) coco_annotations = annotation.coco()['annotations'] for coco_annotation in coco_annotations: coco_annotation['id'] = 0 coco_annotation['image_id'] = image_id coco_annotation['dataset_id'] = 1 return coco_annotations
def prepare_dataset(self, all_images=False): dataset = imantics.Dataset('INbreast') categories_names = list(self.annotations_df.category.unique()) colors = {name: imantics.Color.random() for name in categories_names} categories = { name: imantics.Category(name, id=i, color=colors[name]) for i, name in enumerate(categories_names) } total = len(self.annotations_df) prepared = 0 for case_id, annotations_data in self.annotations_df.groupby( 'case_id'): width = annotations_data['width'].unique()[0] height = annotations_data['height'].unique()[0] image = imantics.Image(id=case_id, width=width, height=height) image.file_name = '%d.png' % case_id annotations = [] for _, annotation_data in annotations_data.iterrows(): annotation_id = annotation_data['annotation_id'] category = categories[annotation_data['category']] color = colors[annotation_data['category']] points = annotation_data['points'] annotation = imantics.Annotation(id=annotation_id, polygons=[points], category=category, color=color) annotations.append(annotation) image.add(annotations) dataset.add(image) prepared += 1 print('\r%d %d/%d' % (case_id, prepared, total), end="") print('\rprepared %d images' % prepared) print() self.dataset = dataset
def imageCallback(self, img_msg, info_msg): try: img = self.bridge.imgmsg_to_cv2(img_msg, "bgr8") header = img_msg.header except CvBridgeError as err: rospy.logerr(err) return board_mask = np.zeros_like(img[:, :, 0]) if (self.image_roi is None): board_mask[:, :] = 1 else: board_mask[self.image_roi[0]:self.image_roi[1], self.image_roi[2]:self.image_roi[3]] = 1 board_size = board_mask.sum() all_markers = segmentImage(img, self.filter_size, self.filter_const) markers_masked = (all_markers + 1) * board_mask filtered_idxs, filtered_counts = filterMarkers(markers_masked, board_size / 250, board_size / 3) markers_masked[np.isin(markers_masked, filtered_idxs, invert=True)] = 0 ann_img = imantics.Image(image_array=img) colors = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (0, 255, 255), (255, 0, 255), (255, 255, 0)] for j, idx in enumerate(filtered_idxs): mask = imantics.Mask((markers_masked == idx).astype(np.uint8)) ann = imantics.Annotation(image=ann_img, category=imantics.Category( 'obj_{}'.format(j), color=imantics.Color(rgb=colors[j])), mask=mask, bbox=imantics.BBox.from_mask(mask)) ann_img.add(ann) display_img = ann_img.draw(thickness=1, color_by_category=True) try: display_msg = self.bridge.cv2_to_imgmsg(display_img.astype( np.uint8), encoding="bgr8") except CvBridgeError as err: rospy.logerr(err) return display_msg.header = img_msg.header self.image_pub.publish(display_msg)
# resize breast_mask_gray = cv2.resize(breast_mask_gray, scaled_dim) pm_mask_gray = cv2.resize(pm_mask_gray, scaled_dim) # save images img_rescaled_fn = osp.join(out_images_dir, '%d.png' % case_id) img_breat_mask_fn = osp.join(out_annotations_dir, '%d_pixels%d.png' % (case_id, index_breast)) pm_mask_fn = osp.join(out_annotations_dir, '%d_pixels%d.png' % (case_id, index_pectoral_muscle)) img_rescaled = cv2.resize(img, scaled_dim) cv2.imwrite(img_rescaled_fn, img_rescaled) cv2.imwrite(img_breat_mask_fn, breast_mask_gray) cv2.imwrite(pm_mask_fn, pm_mask_gray) # add to coco coco_image = imantics.Image(id=case_id, width=scaled_dim[1], height=scaled_dim[0]) coco_image.file_name = '%d.png' % case_id breast_mask_mask = imantics.Mask.create(breast_mask_gray) pm_mask_mask = imantics.Mask.create(pm_mask_gray) breast_annotation = imantics.Annotation(mask=breast_mask_mask, category=category_breast, color=category_breast.color) pectoral_muscle_annotation = imantics.Annotation( mask=pm_mask_mask, category=category_pectoral_muscle, color=category_pectoral_muscle.color) coco_image.add(breast_annotation) coco_image.add(pectoral_muscle_annotation) dataset.add(coco_image) # show debug