def polygon_loss_from_cuda_gt_mask(score_cuda, gt_mask_cuda): try: gt_mask = cuda.to_cpu(gt_mask_cuda)[0] except: gt_mask = cuda.to_cpu(gt_mask_cuda.data)[0] gt_mask = gt_mask.astype(bool) score1 = F.softmax(score_cuda) score1_cpu = cuda.to_cpu(score1.data)[0] building_mask_pred = (np.argmax(score1_cpu, axis=0) == 1) pred_polygons = Mask(building_mask_pred).polygons() n_pred_polygons = len(predict_polygons(pred_polygons)) polygons_gt = Mask(gt_mask).polygons() n_gt_polygons = len(predict_polygons(polygons_gt)) if n_gt_polygons == 0: if n_pred_polygons == 0: poly_loss = 0.0 else: poly_loss = 1.0 else: poly_loss = abs(n_pred_polygons - n_gt_polygons) / n_gt_polygons return poly_loss
def test_subtract(self, array_a, array_b, e_subtract): mask_a = Mask(array_a) mask_b = Mask(array_b) assert mask_a.subtract(mask_b) == e_subtract assert mask_a - mask_b == e_subtract assert mask_a - np.array(array_b) == e_subtract
def test_contains_mask(self, array_a, array_b, e_contains): mask_a = Mask(array_a) mask_b = Mask(array_b) array_b = np.array(array_b) assert mask_a.contains(mask_b) == e_contains assert (mask_b in mask_a) == e_contains assert mask_a.contains(array_b) == e_contains assert (array_b in mask_a) == e_contains
def numpy_to_shp(n_class, tif_image: TifImage, inNumpyPath, outShpPath, code): if not os.path.exists(outShpPath): os.makedirs(outShpPath) xo = tif_image.xOffset yo = tif_image.yOffset xs = tif_image.xScale ys = tif_image.yScale whole_result = np.load(os.path.join(inNumpyPath, code + ".npz"))["pred"].astype(np.uint8) print(f'\t{whole_result.shape}') w = shapefile.Writer(os.path.join(outShpPath, code + ".shp")) w.field("DLBM", "C") w.field("DLMC", "C") for i in range(1, n_class): print(f"{i}/{n_class - 1}") if np.any(whole_result == i): mask = (whole_result == i).astype(np.uint8) polygons = Mask(mask).polygons() for pp in polygons.points: pp = pp.astype(np.float32) pp[:, 0] *= xs pp[:, 1] *= ys pp[:, 0] += xo pp[:, 1] += yo if cv2.contourArea(pp) < 1000: continue w.poly([pp[::-1].tolist()]) w.record(i, i) w.close()
def inference2(image, weights, mean): mean = np.load(mean) model = segmentation_cpu.SegmentationModel(weights, mean) image = np.array(Image.open(image)) score = model.apply_segmentation(image) building_score = score[1] building_mask_pred = (np.argmax(score, axis=0) == 1) polygons = Mask(building_mask_pred).polygons() new_predictions = [] for poly in polygons: if len(poly) >= 3: f = poly.reshape(-1, 2) simplified_vw = simplify_coords_vwp(f, .3) if len(simplified_vw) > 2: mpoly = [] # Rebuilding the polygon in the way that PIL expects the values [(x1,y1),(x2,y2)] for i in simplified_vw: mpoly.append((i[0], i[1])) # Adding the first point to the last to close the polygon mpoly.append((simplified_vw[0][0], simplified_vw[0][1])) new_predictions.append(mpoly) # Creating the json with the predicted and then adjusted polygons output_json = create_json(new_predictions) return output_json
def generate_polygon_images(num_images, IMAGE_DIR, ANNOTATION_DIR, RUST_DIR, BG_DIR): data = { 'images': [], 'annotations': [], 'categories': [ { 'id': 1, 'name': 'rust' }, ] } for x in range(num_images): rust = random.choice(os.listdir(RUST_DIR)) bg = random.choice(os.listdir(BG_DIR)) num_locs = random.choice((1, 3, 5)) image = Image(x) image.create_image('{}/{}'.format(RUST_DIR, rust), '{}/{}'.format(BG_DIR, bg), sigmas_x=(5, 40, 80), sigmas_y=(5, 40, 80), radius=3, num_locs=num_locs, num_points=500, kernel=cv2.getStructuringElement( cv2.MORPH_ELLIPSE, (20, 30))) im_data = image_data(image.filename, image.image_shape[0], image.image_shape[1], image.id) data['images'].append(im_data) for mask in image.masks: segmentations = Mask(mask).polygons().segmentation filtered_segmentations = [] for segmentation in segmentations: if len(segmentation) > 4: filtered_segmentations.append(segmentation) area = 0 for segmentation in filtered_segmentations: x = segmentation[::2] y = segmentation[1::2] area = area + get_poly_area(x, y) bbox = get_bbox(mask) if len(filtered_segmentations): an_data = poly_annotation_data(image.filename, filtered_segmentations, area, bbox, image.id) data['annotations'].append(an_data) cv2.imwrite('{}/{}'.format(IMAGE_DIR, image.filename), image.image) print('SAVED {} IN {}'.format(image.filename, IMAGE_DIR)) with open('{}/{}'.format(ANNOTATION_DIR, 'annotations.json'), 'w') as f: json.dump(data, f)
def inference(image, score, output_file): building_score = score[1] building_mask_pred = (np.argmax(score, axis=0) == 1) polygons = Mask(building_mask_pred).polygons() new_predictions = [] for poly in polygons: if len(poly) >= 3: f = poly.reshape(-1, 2) simplified_vw = simplify_coords_vwp(f, .3) if len(simplified_vw) > 2: mpoly = [] # Rebuilding the polygon in the way that PIL expects the values [(x1,y1),(x2,y2)] for i in simplified_vw: mpoly.append((i[0], i[1])) # Adding the first point to the last to close the polygon mpoly.append((simplified_vw[0][0], simplified_vw[0][1])) new_predictions.append(mpoly) # Creating the json with the predicted and then adjusted polygons output_json = create_json(new_predictions) with open(output_file, 'w') as out_file: json.dump(output_json, out_file)
def get_hair_dicts_d(dir_path, dir_name="train", dataset_name="large", class_name="hair"): dataset_dicts = [] imgs = [f for f in os.listdir(dir_path) if f.endswith(".jpg")] for idx in tqdm(range(len(imgs)), "Accessing json annotation: "): img_file = imgs[idx] img_path = os.path.join(dir_path, img_file) mask_path = img_path[:-4] + "!!.png" mask = cv2.imread(mask_path, cv2.IMREAD_UNCHANGED) bbox = cv2.boundingRect(cv2.findNonZero(mask)) # only one object per image objs = [{ "bbox": bbox, "bbox_mode": BoxMode.XYWH_ABS, "segmentation": Mask(mask).polygons().segmentation, "category_id": 0, "image_id": idx, "id": idx }] dataset_dicts.append({ "file_name": img_path, "width": mask.shape[1], "height": mask.shape[0], "image_id": idx, "annotations": objs, "sem_seg_file_name": mask_path }) return dataset_dicts
def hello_world(): if request.method == "POST": start = time.time() content = request.json path = content["path"] points = np.array(content["points"]) print(f"Processing Image: {path}") image = Image.open(path) print(f"Image Size: {image.size}", flush=True) # points come in [x,y] order; this must be flipped points = points[:, ::-1] mask = model.predict([image], [points])[0] mask_bin = mask >= 0.5 polygons = Mask(mask_bin).polygons().points polygons = [ polygon.tolist() for polygon in polygons if len(polygon) > 2 ] print(f"Result: {polygons}", flush=True) end = time.time() process_time = end - start print(f"Process Time: {process_time:9.3} seconds", flush=True) return jsonify({"polygons": polygons}) return "<h4>DEXTR Action is running.</h4>"
def prepare_result(img: np.ndarray, pred_probs: np.ndarray, clicks: Clicker, gt_mask_file, tolerance: int = 1, view_img: bool = False, filename: str = None): """prepare result Args: img (np.ndarray): img in numpy.ndarray pred_mask (np.ndarray): predicted mask from model clicks(Clicker): Cliker object for click history gt_mask_file (FileStorage): ground truth mask file tolerance (int, optional): Precision to convert from mask to polygon in pixel. Defaults to 1. view_img (bool, optional): Return result image url. Defaults to False. """ # gen mask assert len( pred_probs ) == 1, f'Only one output is expected, but got {len(pred_probs)}' pred_probs = pred_probs[0] pred_mask = pred_probs > MODEL_THRESH # convert mask to polygon regions = Mask(pred_mask).polygons().points polygons = [] for polygon in regions: polygon2 = measure.approximate_polygon(polygon, tolerance) polygons.append(polygon2.tolist()) results = {'polygons': polygons} # calculate iou if gt_mask_file: gt_mask = Image.open(gt_mask_file) mask_np = np.asarray(gt_mask, dtype=np.int32) if len(mask_np.shape) > 2: assert len(mask_np.shape) == 3 mask_np = np.max(mask_np, axis=2) mask_np = np.where(mask_np > 0, 1, 0) iou = utils.get_iou(mask_np, pred_mask) results['iou'] = iou print(iou) # save img with minor delay if view_img: ext = filename.split('.')[-1] draw = vis.draw_with_blend_and_clicks(img, mask=pred_mask, clicks_list=clicks.clicks_list) filename = filename.split('.')[0] + f'[{len(clicks.clicks_list)}].jpg' result_path = TEMP_PATH + filename Image.fromarray(draw).save(result_path) # return send_file(result_path) results['result'] = filename return results
def polygon_loss(score_cuda, n_gt_polygons): score1 = F.softmax(score_cuda) score1_cpu = cuda.to_cpu(score1.data)[0] building_mask_pred = (np.argmax(score1_cpu, axis=0) == 1) polygons_pred = Mask(building_mask_pred).polygons() n_polygons_pred = len(predict_polygons(polygons_pred)) if n_gt_polygons == 0 and n_polygons_pred > 0: poly_loss = 1 else: poly_loss = abs(n_polygons_pred-n_gt_polygons)/n_gt_polygons return poly_loss, n_gt_polygons, n_polygons_pred
def generate_json(img_path, result, save_path): with open(img_path, 'rb') as r_obj: imgData = r_obj.read() imgData = base64.b64encode(imgData).decode('utf-8') img = cv2.imread(img_path) height = img.shape[0] width = img.shape[1] meta = dict(version='3.6.16', flags={}, lineColor=[0, 255, 0, 128], fillColor=[255, 0, 0, 128], imagePath=img_path, imageData=imgData, imageHeight=height, imageWidth=width) shapes = [] bbox_result, segm_result = result for category, segm in enumerate(segm_result): if len(segm) == 0: continue _mask = maskUtils.decode(segm).astype(np.bool) _mask = _mask[..., 0] polygons = Mask(_mask).polygons() for points in polygons.points: points_keep = [] idx_remove = [] ori_area = cv2.contourArea(points) if len(points) < 4 or ori_area < 0.001: continue for p in range(len(points)): index = list(range(len(points))) index.remove(p) for k in idx_remove: index.remove(k) area = cv2.contourArea(points[index]) if np.abs(ori_area - area) / ori_area > 0.01: points_keep.append(points[p]) else: idx_remove.append(p) points_keep = np.array(points_keep) region_dict = dict(label=CLS_NAME[category], line_color=COLOR_MASK[category], fill_color=None, points=points_keep.tolist(), shape_type='polygon') shapes.append(region_dict) meta['shapes'] = shapes with open(save_path, 'w') as w_obj: json.dump(meta, w_obj, ensure_ascii=False, indent=2)
def generate_poly_images(num_images, BG_DIR, RUST_DIR, IMAGE_DIR, ANNOTATION_DIR): #Storing data in coco format data = { 'images': [], 'annotations': [], 'categories': [ { 'id': 1, 'name': 'rust' }, ], } for n in range(num_images): filename = '{}.jpg'.format(n) image_id = n #Initialzing image object image = Image(BG_DIR, RUST_DIR) #Creating images with random number of rust spots num_spots = np.random.choice((1, 2, 3)) image.create_image(num_spots) for mask in image.masks: segmentations = Mask(mask).polygons().segmentation area = 0 for segmentation in segmentations: x = segmentation[::2] y = segmentation[1::2] area = area + get_poly_area(x, y) bbox = get_bbox(mask) im_data = image_data(filename, image.background.shape[0], image.background.shape[1], image_id) an_data = poly_annotation_data(filename, segmentations, area, bbox, image_id) data['images'].append(im_data) data['annotations'].append(an_data) cv2.imwrite('{}/{}'.format(IMAGE_DIR, filename), image.background) print('{} saved in {}'.format(filename, IMAGE_DIR)) if n % 1000 == 0: print('---Saving Annotation Data---') with open('{}/{}'.format(ANNOTATION_DIR, 'annotations.json'), 'w') as f: json.dump(data, f) with open('{}/{}'.format(ANNOTATION_DIR, 'annotations.json'), 'w') as f: json.dump(data, f)
def test(model, data_loader_test, device, predict_path) : class_name = {1 :'sidewalk_blocks' , 2 : 'alley_damaged', 3 :'sidewalk_damaged', 4 : 'caution_zone_manhole', 5 : 'braille_guide_blocks_damaged', 6 : 'alley_speed_bump', 7 : 'roadway_crosswalk', 8 : 'sidewalk_urethane', 9 : 'caution_zone_repair_zone', 10 : 'sidewalk_asphalt', 11 : 'sidewalk_other', 12 : 'alley_crosswalk', 13 : 'caution_zone_tree_zone', 14 : 'caution_zone_grating', 15 : 'roadway_normal', 16 : 'bike_lane', 17 : 'caution_zone_stairs', 18 : 'alley_normal', 19 : 'sidewalk_cement', 20 : 'braille_guide_blocks_normal', 21 : 'sidewalk_soil_stone'} model.to(device) model.eval() try : os.mkdir(os.path.join('./predictions')) except : pass # 이미지 전체 반복 pred_xml = elemTree.Element('predictions') pred_xml.text = '\n ' for idx, data in enumerate(data_loader_test) : print('{} / {}'.format(idx+1, len(data_loader_test))) images, target = data images = list(image.to(device) for image in images) outputs = model(images) output = outputs[0] masks, labels, scores = output['masks'], output['labels'], output['scores'] texts = [] # 이미지 한장에 대하여 xml_image = elemTree.SubElement(pred_xml, 'image') xml_image.attrib['name'] = target[0]['image_id'].split('.')[0] xml_image.text = '\n ' for index in range(len(masks)) : mask, label, score = masks[index], int(labels[index]), scores[index] # class, score, x1, y1, x2, y2 mask_arr = mask[0].cpu().detach().numpy() mask_bin = np.where(mask_arr > 0.3, True, False) polygons = Mask(mask_bin).polygons() points = polygons.points point = '' for p in points[0]: point += str(p[0]) + ',' + str(p[1]) +';' xml_predict = elemTree.SubElement(xml_image, 'predict') xml_predict.tail = '\n ' xml_predict.attrib['class_name'] = class_name[label] xml_predict.attrib['score'] = str(float(score)) xml_predict.attrib['polygon'] = point if index == len(masks) - 1 : xml_predict.tail = '\n ' xml_image.tail = '\n ' if idx == len(data_loader_test) - 1: xml_image.tail = '\n' pred_xml = elemTree.ElementTree(pred_xml) pred_xml.write('./predictions/'+ predict_path + '.xml')
def createAnnots(self, dataset_dicts, mapping, sample_ratio=1): test_annots = [] for idx, d in tqdm(enumerate(dataset_dicts[::sample_ratio]), total=len(dataset_dicts[::sample_ratio])): im = cv2.imread(d['file_name']) outputs = self.predictor(im) pred_classes = outputs['instances'].get( 'pred_classes').cpu().numpy() masks = outputs['instances'].get('pred_masks').cpu().permute( 1, 2, 0).numpy() image_name = d['file_name'] annotations = [] try: if masks.shape[2] != 0: for i in range(masks.shape[2]): polygons = Mask(masks[:, :, i]).polygons() annotation = remo.Annotation() annotation.img_filename = image_name annotation.classes = mapping[pred_classes[i]] annotation.segment = polygons.segmentation[0] annotations.append(annotation) elif masks.sum() == 0: continue else: polygons = Mask(masks[:, :, 0]).polygons() annotation = remo.Annotation() annotation.img_filename = image_name annotation.classes = mapping[pred_classes[0]] annotation.segment = polygons.segmentation[0] annotations.append(annotation) except IndexError: raise IndexError( f"No preds at idx: {idx} \n - instance: \n{d} \n - outputs: \n{outputs}" ) test_annots += annotations return test_annots
def test_intersect(self, array_a, array_b, e_intersect): mask_a = Mask(array_a) mask_b = Mask(array_b) assert mask_a.intersect(mask_b) == e_intersect assert mask_b.intersect(mask_a) == e_intersect assert mask_a * mask_b == e_intersect assert mask_a * np.array(array_b) == e_intersect
def test_union(self, array_a, array_b, e_union): mask_a = Mask(array_a) mask_b = Mask(array_b) assert mask_a.union(mask_b) == e_union assert mask_b.union(mask_a) == e_union assert mask_a + mask_b == e_union assert mask_a + np.array(array_b) == e_union
def labels_to_objects(labels, objects): res = {} for i_float in np.unique(labels): i = int(i_float) if i == 0: continue res[i - 1] = objects[ i - 1] # Do this in the for loop cause some objects aren't returned mask_points_nd = Mask(np.where(labels == i, 1, 0)).polygons().points mask_points = list(map(lambda x: x.tolist(), mask_points_nd)) res[i - 1]["mask"] = mask_points res[i - 1]["type"] = "annotate" return res
def generate_json(self, bg_sample, image_index): raw_mask = bg_sample['fg_mask'] json_data = {} json_data['file_name'] = self.prefix + '_' + str(image_index) + '.jpg' json_data['height'] = bg_sample['height'] json_data['width'] = bg_sample['width'] json_data['annotations'] = [] inst_ids = np.unique(raw_mask) inst_ids = inst_ids[inst_ids >= 1] for inst_id in inst_ids: inst_anno = {} inst_mask = np.zeros_like(raw_mask) inst_mask[raw_mask == inst_id] = inst_id if inst_mask.sum() < 200: continue inst_poly = Mask(inst_mask).polygons() inst_anno['bbox'] = list(inst_poly.bbox()) inst_anno['segmentation'] = [(np.array(poly) + 0.5).tolist() for \ poly in inst_poly.segmentation \ if len(poly) % 2 == 0 and len(poly) >= 6] # Adjust bbox for removed parts if len(inst_anno['segmentation']) != len(inst_poly.segmentation): min_x, min_y, max_x, max_y = float('inf'), float('inf'), 0, 0 for poly in inst_anno['segmentation']: min_x = min(min_x, min(poly[0::2])) max_x = max(max_x, max(poly[0::2])) min_y = min(min_y, min(poly[1::2])) max_y = max(max_y, max(poly[1::2])) inst_anno['bbox'] = [int(min_x - 0.5), int(min_y - 0.5), \ int(max_x - 0.5), int(max_y - 0.5)] inst_anno['category_id'] = 0 inst_anno['iscrowd'] = 0 json_data['annotations'].append(inst_anno) return json_data
def export_results(ret, markers, out): """ write the resulting mask to a file """ # should we save the segments as a mask or as bounding boxes? if out.endswith('.npy'): np.save(out, markers) elif out.endswith('.json'): # import extra required modules from imantics import Mask import import_labelme segments = [(int(i), largest_polygon(Mask(markers == i).polygons()).tolist()) for i in (range(1, ret) if ret is not None else np. unique(markers).astype(int))] import_labelme.write(out, segments, args.ortho) else: raise Exception("Unsupported output file format.")
def export_results(mask, out): """ write the resulting mask to a file """ ret, markers = cv.connectedComponents(mask.astype(np.uint8)) # should we save the segments as a mask or as bounding boxes? if out.endswith('.npy'): np.save(out, markers) elif out.endswith('.json'): # import extra required modules from imantics import Mask import import_labelme segments = [ (int(i), largest_polygon(Mask(markers == i).polygons()).tolist()) for i in range(1, ret) ] import_labelme.write(out, segments, args.image) else: raise Exception("Unsupported output file format.")
def to_struct(self): bbox = self._maybe_bbox(self.bbox, self.mask) mask_arr = [] if self.mask is not None and isinstance(self.mask, np.ndarray): mask_arr = self.mask if self.mask is not None and isinstance(self.mask, torch.Tensor): mask_arr = self.mask.cpu().detach().numpy() mask_points_nd = Mask(mask_arr).polygons().points mask_points = list(map(lambda x: x.tolist(), mask_points_nd)) return { "id": self.eid, "xyz": list(self.xyz), "bbox": bbox, "label": self.label, "properties": "\n ".join(self.properties if self.properties is not None else ""), "mask": mask_points, }
def save_json(image, bboxes, segm_result, labels, class_names, score_thr=0, out_file=None): assert bboxes.ndim == 2 assert labels.ndim == 1 assert bboxes.shape[0] == labels.shape[0] assert bboxes.shape[1] == 4 or bboxes.shape[1] == 5 img = image.copy() if segm_result is not None: segms = mmcv.concat_list(segm_result) indxs = np.where(bboxes[:, -1] > score_thr)[0] if score_thr > 0: assert bboxes.shape[1] == 5 scores = bboxes[:, -1] inds = scores > score_thr bboxes = bboxes[inds, :] labels = labels[inds] record = {} record['imgHeight'] = img.shape[0] record['imgWidth'] = img.shape[1] objects = [] if out_file is not None: for i, (bbox, label, indx) in enumerate(zip(bboxes, labels, indxs)): bbox_int = bbox.astype(np.int32) mask = maskUtils.decode(segms[indx]).astype(np.bool) polygons = Mask(mask).polygons().segmentation label_text = class_names[ label] if class_names is not None else 'cls {}'.format(label) objects.append({ 'label': label_text, 'polygon': polygons, 'bbox': bbox_int[:-1].tolist() }) record['objects'] = objects with open(out_file.split('.')[0] + '.json', 'w') as f: json.dump(record, f, indent=2)
def post(self, image_id): """ COCO data test """ print("WTF") if not SUPER_LOADED: print("SSN not loaded") return {"disabled": True, "message": "DEXTR is disabled"}, 400 print("hannah du geiles super stücki") image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 image = Image.open(image_model.path) result = superpixel.predict_mask(image) main() return {"segmentaiton": Mask(result).polygons().segmentation}
def test_fn( model: torch.nn, data_loader: DataLoader, class_nums: Dict, device: torch.device): image_ids = [image_id.split('.')[1][-17:] for image_id in data_loader.dataset.image_ids] xml_root = ET.Element('predictions') model.eval() batch_size = data_loader.batch_size with torch.no_grad(): for i, (images, _) in tqdm(enumerate(data_loader)): images = list(image.to(device) for image in images) outputs = model(images) for j, output in enumerate(outputs): image_name = image_ids[i*batch_size+j] xml_image = ET.SubElement(xml_root, 'image', {'name': image_name}) masks = output['masks'].detach().cpu().numpy() labels = output['labels'].detach().cpu().numpy() scores = output['scores'].detach().cpu().numpy() for mask, label, score in zip(masks, labels, scores): mask_bin = np.where(mask[0] > 0.1, True, False) polygons = Mask(mask_bin).polygons() points = polygons.points point = ''.join([str(p[0]) + ',' + str(p[1]) +';' for p in points[0]]) attribs = { 'class_name': class_nums[label], 'score': str(float(score)), 'polygon': point, } ET.SubElement(xml_image, 'predict', attribs) indent(xml_root) tree = ET.ElementTree(xml_root) if not os.path.exists('./output/'): print('Not exists ./output/ making an weight folder...') os.mkdir('./output/') tree.write('./output/prediction.xml') print('Save predicted labels.xml...\n')
def result2dict(img, result, class_names, score_thr=0.3, out_file=None): assert isinstance(class_names, (tuple, list)) img = mmcv.imread(img) if isinstance(result, tuple): bbox_result, segm_result = result else: bbox_result, segm_result = result, None bboxes = np.vstack(bbox_result) # draw segmentation masks arr_poly = [] arr_masks = [] if segm_result is not None: segms = mmcv.concat_list(segm_result) inds = np.where(bboxes[:, -1] > score_thr)[0] for i in inds: color_mask = np.random.randint(0, 256, (1, 3), dtype=np.uint8) mask = maskUtils.decode(segms[i]).astype(np.bool) # img[mask] = img[mask] * 0.5 + color_mask * 0.5 polygons = Mask(mask).polygons() if polygons.points: arr_poly.append(polygons.points[0]) arr_masks.append(mask) else: arr_poly.append(np.empty([])) arr_masks.append(np.empty([])) # draw bounding boxes labels = [ np.full(bbox.shape[0], i, dtype=np.int32) for i, bbox in enumerate(bbox_result) ] if not arr_poly: return None keep = np.array( [i for i, val in enumerate(arr_poly) if (val != np.empty([])).any()]) labels = np.concatenate(labels) ret_poly = [i.tolist() for i in np.array(arr_poly)[keep]] ret_mask = [i.tolist() for i in np.array(arr_masks)[keep]] return bboxes[keep].tolist(), ret_poly, np.array( [class_names[i] for i in labels])[keep].tolist(), ret_mask
def post(self, image_id): """ COCO data test """ if not DEXTR_LOADED: return {"disabled": True, "message": "DEXTR is disabled"}, 400 args = dextr_args.parse_args() points = args.get('points') padding = args.get('padding') threshold = args.get('threshold') if len(points) != 4: return {"message": "Invalid points entered"}, 400 image_model = ImageModel.objects(id=image_id).first() if not image_model: return {"message": "Invalid image ID"}, 400 image = Image.open(image_model.path) result = dextr.predict_mask(image, points) return {"segmentaiton": Mask(result).polygons().segmentation}
r['rois'] # Number of identifyed features len(r['class_ids']) # Scores of each feature r['scores'] # ---------------------------------------------------- # ---------------------------------------------------- # Determine total area (fraction of image) totalGeometry = [] for i in range(r['masks'].shape[-1]): polygons = Mask(r['masks'][:, :, i]).polygons() polygons = polygons.points[:][0] polygonF = Polygon(polygons) totalGeometry.append(polygonF) totalGeometryPredicted = cascaded_union(totalGeometry) boundaryP = gpd.GeoSeries(totalGeometryPredicted) boundaryP.plot(color='red') plt.show() plt.close() # To shapefile # use the geature loop in case you polygon is a multipolygon features = [i for i in range(len(totalGeometryPredicted))] # add crs using wkt or EPSG to have a .prj file
def fillAnnotationsFile(path_dataset_created, augmented_dataset_name, dataset_masks_name, annotations_file_name, type_of_border): # Open masks newly created by data augmentation and calculate de box or mask to fill the annotations file file_annotations = open( path_dataset_created + augmented_dataset_name + '/' + annotations_file_name, 'a') paths_masks = list( paths.list_images(path_dataset_created + augmented_dataset_name + '/' + dataset_masks_name)) for path in paths_masks: mask = cv2.imread(path, 0) polygon = Mask(mask).polygons().points # The type of annotation is a mask if type_of_border == 'mask': str_polygon = '[' for poly in polygon: str_polygon += '[' for points in poly: str_polygon = str_polygon + '[' + str( points[0]) + ',' + str(points[1]) + '],' str_polygon = str_polygon[:-1] str_polygon += '],' str_polygon = str_polygon[:-1] + ']' # The type of annotation is a box elif type_of_border == 'box': str_polygon = '[' minX = sys.maxsize minY = sys.maxsize maxX = -sys.maxsize maxY = -sys.maxsize for poly in polygon: for point in poly: if point[0] < minX: minX = point[0] if point[1] < minY: minY = point[1] if point[0] > maxX: maxX = point[0] if point[1] > maxY: maxY = point[1] str_polygon += '[' + str(minX) + ',' + str(minY) + ',' + str( maxX) + ',' + str(maxY) + ',0],' minX = sys.maxsize minY = sys.maxsize maxX = -sys.maxsize maxY = -sys.maxsize str_polygon = str_polygon[:-1] + ']' # Wrong value for the type of annotation else: raise ValueError( 'The type of border given does not exist (location : config.cfg - Annotations - type_of_border) : ', type_of_border) mask_name = path.split('/')[-1] file_annotations.write(path_dataset_created + augmented_dataset_name + '/' + dataset_images_name + '/' + mask_name + ' ' + str_polygon + '\n') file_annotations.close() if display_logs: print('Fill annotations file with newly generated masks')
mask_image_path = csv_list[i][0] label_name = csv_list[i][2] x_min_rel = float(csv_list[i][4]) x_max_rel = float(csv_list[i][5]) y_min_rel = float(csv_list[i][6]) y_max_rel = float(csv_list[i][7]) x_min = x_min_rel * width y_min = y_min_rel * height x_max = x_max_rel * width y_max = y_max_rel * height mask_image = cv2.imread( 'C:\\Users\\Admin\\Documents\\open-images-1\\masks' + '\\' + mask_image_path, 0) idx = os.path.splitext(mask_image_path)[0] print(idx) polygons = Mask(mask_image).polygons() coco_dict['annotations'].append({ 'id': idx, 'image_id': image_id, 'category_id': label_name, 'segmentation': polygons.segmentation, #'bbox': [float(x_min), float(y_min), bbox_width, bbox_height], 'bbox': [float(x_min), float(y_min), float(x_max), float(y_max)],