def get_image(coco,map_source_class_id,class_ids,i,mask_shape,image_size): annotations = coco.loadAnns(coco.getAnnIds(imgIds=[i], catIds=class_ids, iscrowd=False)) img_url = os.path.join(config.coco_image_dir,coco.imgs[i]["file_name"]) instance_masks = [] cls_ids = [] for annotation in annotations: class_id = map_source_class_id[annotation['category_id']] m = annToMask(annotation, coco.imgs[i]["height"], coco.imgs[i]["width"]) if m.max() < 1: continue instance_masks.append(m) cls_ids.append(class_id) img = cv2.imread(img_url) img = cv2.cvtColor(img,cv2.COLOR_BGR2RGB) cls_ids = np.asarray(cls_ids) original_shape = img.shape image, window, scale, padding, crop = utils.resize_image(img,min_dim=image_size,max_dim=image_size) mask = np.asarray(instance_masks) mask = np.transpose(mask,axes=[1,2,0]) mask = utils.resize_mask(mask, scale, padding, crop) image,mask = aug(image,mask) boxes = utils.extract_bboxes(mask) mask = utils.minimize_mask(boxes, mask, mini_shape=(mask_shape, mask_shape)) boxes = boxes*1.0 / image_size return image,cls_ids,boxes,mask
def pull_item(self, idx): json_pth = self.data[idx] image_pth = json_pth.replace('.json', '.png') ig_data = io.imread(image_pth)[:, :, 0:3] instance_masks = np.zeros( shape=[self.image_size[0], self.image_size[1], 3]) js_data = json.loads(open(json_pth).read()) msk = [] ids = [] for b in js_data['boundary']: label = b['correctionType'] if label == 'land': points = b['points'] p = [] for pp in points: p.append([pp['pix_x'], pp['pix_y']]) poly_land = np.zeros( shape=[self.image_size[0], self.image_size[1], 3], dtype=np.uint8) cv2.fillPoly(poly_land, np.asarray([p], np.int), (255, 255, 255)) msk.append(poly_land[:, :, 0:1]) ids.append(0) if len(msk) > 1: msk = np.concatenate(msk, axis=2) elif len(msk) == 1: msk = msk[0] else: return None ig, window, scale, padding, crop = utils.resize_image_fixed_size( ig_data, self.image_size) msk = utils.resize_mask(msk, scale, padding, crop) if random.randint(0, 1) == 2: ag = self.aug.to_deterministic() ig_data = ag.augment_image(ig_data) msk = ag.augment_image(msk) box = utils.extract_bboxes(msk) ids = np.asarray(ids) mask = np.sum(msk, 2) mask[np.where(mask > 255)] = 255 box = box / np.asarray([ self.image_size[0], self.image_size[1], self.image_size[0], self.image_size[1] ]) return ig, box, ids, mask
def pull_item(self, idx): item_data = self.data[idx] image_name = item_data['name'] labels = item_data['labels'] label_ix = [] box = [] ig_data = io.imread(os.path.join(self.image_dr, image_name)) instance_masks = [] cls_ids = [] for ll in labels: category_name = ll['category'] if category_name == 'drivable area': if ll['attributes']['areaType'] == 'direct': pts = ll['poly2d'] for x in pts: direct = np.zeros(shape=ig_data.shape, dtype=np.uint8) cv2.fillPoly(direct, [np.asarray(x['vertices'], np.int)], (255, 255, 255)) cls_ids.append(0) instance_masks.append(direct[:, :, 0]) elif ll['attributes']['areaType'] == 'alternative': pts = ll['poly2d'] for x in pts: alter = np.zeros(shape=ig_data.shape, dtype=np.uint8) cv2.fillPoly(alter, [np.asarray(x['vertices'], np.int)], (255, 255, 255)) cls_ids.append(1) instance_masks.append(alter[:, :, 0]) mask = np.asarray(instance_masks) ig, window, scale, padding, crop = utils.resize_image_fixed_size( ig_data, self.image_size) if len(labels) == 0: return mask = np.transpose(mask, axes=[1, 2, 0]) mask = utils.resize_mask(mask, scale, padding, crop) image, mask = coco_handler.aug(ig, mask) boxes = utils.extract_bboxes(mask) mask = utils.minimize_mask(boxes, mask, mini_shape=(self.mask_shape, self.mask_shape)) #boxes = boxes /np.asarray([self.image_size[1], self.image_size[0],self.image_size[1], self.image_size[0]]) return ig, cls_ids, boxes, mask
def pull_item(self, index): image_path = self.images[index] json_path = image_path.replace('.png', '.json') bundry = json.loads(open(json_path).read().encode('utf8')) ig = cv2.imread(image_path) ig = cv2.cvtColor(ig, cv2.COLOR_BGR2RGB) ig = aug_utils.pytorch_aug_color(ig) shape = ig.shape[0:2] total = len(bundry) msk = np.zeros((shape[0], shape[1], total), dtype=np.uint8) ids = [] orchard = np.zeros(shape=(shape[0], shape[1], 3), dtype=np.uint8) msk = [] for idx, b in enumerate(bundry): if b['correction_type'] == 'tree': mask = np.zeros(shape=(shape[0], shape[1], 3), dtype=np.uint8) pts = [] for p in b['boundary']: pts.append([int(p['x']), int(p['y'])]) pts = np.array(pts, np.int32) cv2.fillPoly(mask, [pts], color=(255, 255, 255)) msk.append(mask[:, :, 0:1]) ids.append(0) elif b['correction_type'] == 'orchard': mask = np.zeros(shape=(shape[0], shape[1], 3), dtype=np.uint8) pts = [] for p in b['boundary']: pts.append([int(p['x']), int(p['y'])]) pts = np.array(pts, np.int32) cv2.fillPoly(mask, [pts], color=(255, 255, 255)) orchard += mask ig = ig * (orchard / 255) if len(msk) > 1: msk = np.concatenate(msk, axis=2) elif len(msk) == 1: msk = msk[0] else: return None ig, window, scale, padding, crop = utils.resize_image_fixed_size( ig, self.image_size) msk = utils.resize_mask(msk, scale, padding, crop) if random.randint(0, 1) != 1: ag = self.aug.to_deterministic() ig = ag.augment_image(ig) msk = ag.augment_image(msk) box = utils.extract_bboxes(msk) ids = np.asarray(ids) mj = (box[:, 3] - box[:, 1]) * (box[:, 2] - box[:, 0]) mk = np.where(mj > self.image_size[0] * self.image_size[1] / 32 / 32) box = box[mk] ids = ids[mk] mj = (box[:, 3] - box[:, 1]) / (box[:, 2] - box[:, 0]) mk = np.where(mj > 0.25) box = box[mk] ids = ids[mk] mj = (box[:, 3] - box[:, 1]) / (box[:, 2] - box[:, 0]) mk = np.where(mj < 4) box = box[mk] ids = ids[mk] box = box / np.asarray([ self.image_size[0], self.image_size[1], self.image_size[0], self.image_size[1] ]) return ig, box, ids
def pull_item(self, index): image_path = self.images[index] json_path = image_path.replace('.png', '.json') bundry = json.loads(open(json_path).read().encode('utf8')) ig = cv2.imread(image_path) ig = cv2.cvtColor(ig, cv2.COLOR_BGR2RGB) shape = ig.shape[0:2] orchard = [] msk = [] ids = [] for idx, b in enumerate(bundry): if b['correction_type'] == 'tree': mask = np.zeros(shape=(shape[0], shape[1], 3)) pts = [] for p in b['boundary']: pts.append([int(p['x']), int(p['y'])]) pts = np.array(pts, np.int32) cv2.fillPoly(mask, [pts], color=(255, 255, 255)) msk.append(mask[:, :, 0:1]) ids.append(0) elif b['correction_type'] == 'orchard': mask = np.zeros(shape=(shape[0], shape[1], 3)) pts = [] for p in b['boundary']: pts.append([int(p['x']), int(p['y'])]) pts = np.array(pts, np.int32) cv2.fillPoly(mask, [pts], color=(255, 255, 255)) orchard.append(mask[:, :, 0:1]) if len(msk) > 1: msk = np.concatenate(msk, axis=2) elif len(msk) == 1: msk = msk[0] else: return None if len(orchard) > 1: orchard = np.concatenate(orchard, axis=2) elif len(orchard) == 1: orchard = orchard[0] else: return None ig, window, scale, padding, crop = utils.resize_image_fixed_size( ig, self.image_size) msk = utils.resize_mask(msk, scale, padding, crop) orchard = utils.resize_mask(orchard, scale, padding, crop) if random.randint(0, 1) == 1: ag = self.aug.to_deterministic() ig = ag.augment_image(ig) msk = ag.augment_image(msk) orchard = ag.augment_image(orchard) box = utils.extract_bboxes(msk) ids = np.asarray(ids) mask = np.transpose(msk, [2, 0, 1]) mj = (box[:, 3] - box[:, 1]) * (box[:, 2] - box[:, 0]) mk = np.where(mj < self.image_size[0] * self.image_size[1] / 3 / 3) box = box[mk] ids = ids[mk] mask = mask[mk] mj = (box[:, 3] - box[:, 1]) * (box[:, 2] - box[:, 0]) mk = np.where(mj > self.image_size[0] * self.image_size[1] / 32 / 32) box = box[mk] ids = ids[mk] mask = mask[mk] mj = (box[:, 3] - box[:, 1]) / (box[:, 2] - box[:, 0]) mk = np.where(mj > 0.3) box = box[mk] ids = ids[mk] mask = mask[mk] mj = (box[:, 3] - box[:, 1]) / (box[:, 2] - box[:, 0]) mk = np.where(mj < 3) box = box[mk] ids = ids[mk] mask = mask[mk] orchard = np.sum(orchard, axis=2) mask = np.transpose(mask, [1, 2, 0]) #mask = utils.minimize_mask(box, mask, mini_shape=(28, 28)) mask = np.sum(mask, 2) mask[np.where(mask > 255)] = 255 box = box / np.asarray([ self.image_size[0], self.image_size[1], self.image_size[0], self.image_size[1] ]) #ig = ig*(orchard/255).astype(np.uint8) return ig, box, ids, mask