def cocoSegmentationToPngDemo(dataDir='/home/zyq/data/coco', dataTypeAnn='train2014', dataTypeRes='examples', \
        pngFolderName='export_png', isAnnotation=True, exportImageLimit=10):
    '''
    Converts COCO segmentation .json files (GT or results) to one .png file per image.
    :param dataDir: location of the COCO root folder
    :param dataTypeAnn: identifier of the ground-truth annotation file
    :param dataTypeRes: identifier of the result annotation file (if any)
    :param pngFolderName: the name of the subfolder where we store .png images
    :param isAnnotation: whether the COCO file is a GT annotation or a result file
    :return: None
    '''

    # Define paths
    annPath = '%s/annotations/instances_%s.json' % (dataDir, dataTypeAnn)
    pngFolder = '%s/annotations/%s' % (dataDir, pngFolderName)

    # Initialize COCO ground-truth API
    coco = COCO(annPath)
    imgIds = coco.getImgIds()

    # if exportImageLimit < len(imgIds):
    #     imgIds = imgIds[0:exportImageLimit]
    txt_path = '%s/annotations/train_coco_seg.txt' % (dataDir)
    with open(txt_path, "w") as f:
        # Convert each image to a png
        imgCount = len(imgIds)
        for i in xrange(0, imgCount):
            imgId = imgIds[i]
            imgName = coco.loadImgs(ids=imgId)[0]['file_name'].replace(
                '.jpg', '')
            print('Exporting image %d of %d: %s' % (i + 1, imgCount, imgName))
            segmentationPath = '%s/%s.png' % (pngFolder, imgName)
            if cocoSegmentationToPng(coco, imgId, segmentationPath):
                f.write(imgName + "\n")
Example #2
0
 def __init__(self,
              dataset_root,
              transform,
              subset,
              batchsize,
              trainsizes,
              testsize,
              istrain,
              gt_pergrid=3):
     self.dataset_root = dataset_root
     self.image_dir = "{}/images/{}2017".format(dataset_root, subset)
     self.coco = COCO("{}/annotations/instances_{}2017.json".format(
         dataset_root, subset))
     self.istrain = istrain
     self.testsize = testsize
     self.batch_size = batchsize
     # get the mapping from original category ids to labels
     self.cat_ids = self.coco.getCatIds()
     self.numcls = len(self.cat_ids)
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self.img_ids, self.img_infos = self._filter_imgs()
     self._transform = transform
     self.multisizes = trainsizes
     self.strides = np.array([8, 16, 32])
     self._gt_per_grid = gt_pergrid
Example #3
0
 def __init__(self, cfg, subset, istrain):
     super().__init__(cfg, subset, istrain)
     self.image_dir = "{}/images/{}2017".format(self.dataset_root, subset)
     self.coco = COCO("{}/annotations/instances_{}2017.json".format(
         self.dataset_root, subset))
     # get the mapping from original category ids to labels
     self.cat_ids = self.coco.getCatIds()
     self.numcls = len(self.cat_ids)
     self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
     self._ids, self.img_infos = self._filter_imgs()
Example #4
0
    def _load_all(self, anno_file, shuffle):
        """
        initialize all entries given annotation json file

        Parameters:
        ----------
        anno_file: str
            annotation json file
        shuffle: bool
            whether to shuffle image list
        """
        image_set_index = []
        labels = []
        coco = COCO(anno_file)
        img_ids = coco.getImgIds()
        # deal with class names
        cats = [cat['name'] for cat in coco.loadCats(coco.getCatIds())]
        class_to_coco_ind = dict(zip(cats, coco.getCatIds()))
        class_to_ind = dict(zip(self.classes, range(len(self.classes))))
        coco_ind_to_class_ind = dict([(class_to_coco_ind[cls], class_to_ind[cls])
                                     for cls in self.classes[0:]])
        for img_id in img_ids:
            # filename
            image_info = coco.loadImgs(img_id)[0]
            filename = image_info["file_name"]
            subdir = filename.split('_')[1]
            height = image_info["height"]
            width = image_info["width"]
            # label
            anno_ids = coco.getAnnIds(imgIds=img_id)
            annos = coco.loadAnns(anno_ids)
            label = []
            for anno in annos:
                cat_id = coco_ind_to_class_ind[anno['category_id']]
                bbox = anno["bbox"]
                assert len(bbox) == 4
                xmin = float(bbox[0]) / width
                ymin = float(bbox[1]) / height
                xmax = xmin + float(bbox[2]) / width
                ymax = ymin + float(bbox[3]) / height
                label.append([cat_id, xmin, ymin, xmax, ymax, 0])
            if label:
                labels.append(np.array(label))
                image_set_index.append(os.path.join(subdir, filename))

        if shuffle:
            import random
            indices = list(range(len(image_set_index)))
            random.shuffle(indices)
            image_set_index = [image_set_index[i] for i in indices]
            labels = [labels[i] for i in indices]
        # store the results
        self.image_set_index = image_set_index
        self.labels = labels
Example #5
0
    def _load_all(self, anno_file, shuffle):
        """
        initialize all entries given annotation json file

        Parameters:
        ----------
        anno_file: str
            annotation json file
        shuffle: bool
            whether to shuffle image list
        """
        image_set_index = []
        labels = []
        coco = COCO(anno_file)
        img_ids = coco.getImgIds()
        for img_id in img_ids:
            # filename
            image_info = coco.loadImgs(img_id)[0]
            filename = image_info["file_name"]
            subdir = filename.split('_')[1]
            height = image_info["height"]
            width = image_info["width"]
            # label
            anno_ids = coco.getAnnIds(imgIds=img_id)
            annos = coco.loadAnns(anno_ids)
            label = []
            for anno in annos:
                cat_id = int(anno["category_id"])
                bbox = anno["bbox"]
                assert len(bbox) == 4
                xmin = float(bbox[0]) / width
                ymin = float(bbox[1]) / height
                xmax = xmin + float(bbox[2]) / width
                ymax = ymin + float(bbox[3]) / height
                label.append([cat_id, xmin, ymin, xmax, ymax, 0])
            if label:
                labels.append(np.array(label))
                image_set_index.append(os.path.join(subdir, filename))

        if shuffle:
            import random
            indices = range(len(image_set_index))
            random.shuffle(indices)
            image_set_index = [image_set_index[i] for i in indices]
            labels = [labels[i] for i in indices]
        # store the results
        self.image_set_index = image_set_index
        self.labels = labels
Example #6
0
 def __init__(self, dataset_root, transform, subset, batchsize, netsize, istrain):
   self.dataset_root = dataset_root
   self.image_dir = "{}/images/{}2017".format(dataset_root, subset)
   self.coco = COCO("{}/annotations/instances_{}2017.json".format(dataset_root, subset))
   self.anchors = np.array(eval("COCO_ANCHOR_{}".format(netsize)))
   self.istrain = istrain
   self.netsize = netsize
   self.batch_size = batchsize
   # get the mapping from original category ids to labels
   self.cat_ids = self.coco.getCatIds()
   self.cat2label = {
     cat_id: i
     for i, cat_id in enumerate(self.cat_ids)
   }
   self.img_ids, self.img_infos = self._filter_imgs()
   self._transform = transform
   self.multisizes = TRAIN_INPUT_SIZES_COCO
Example #7
0
def load_coco_annotation(index, anns_path, img_folder, num_classes=1):
    """
        coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
        iscrowd:
            crowd instances are handled by marking their overlaps with all categories to -1
            and later excluded in training
        bbox:
            [x1, y1, w, h]
        :param index: coco image id
        :return: roidb entry
        """
    coco = COCO(anns_path)
    im_ann = coco.loadImgs(index)[0]
    width = im_ann['width']
    height = im_ann['height']

    annIds = coco.getAnnIds(imgIds=index, iscrowd=False)
    objs = coco.loadAnns(annIds)

    # sanitize bboxes
    valid_objs = []
    for obj in objs:
        x, y, w, h = obj['bbox']
        x1 = np.max((0, x))
        y1 = np.max((0, y))
        x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
        y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
        if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
            obj['clean_bbox'] = [x1, y1, x2, y2]
            valid_objs.append(obj)
    objs = valid_objs
    num_objs = len(objs)

    boxes = np.zeros((num_objs, 4), dtype=np.uint16)
    gt_classes = np.zeros((num_objs), dtype=np.int32)
    overlaps = np.zeros((num_objs, num_classes), dtype=np.float32)

    for ix, obj in enumerate(objs):
        cls = 0  # we only have 1 class
        boxes[ix, :] = obj['clean_bbox']
        gt_classes[ix] = cls
        if obj['iscrowd']:
            overlaps[ix, :] = -1.0
        else:
            overlaps[ix, cls] = 1.0

    roi_rec = {
        'image': os.path.join(img_folder,
                              coco.loadImgs(index)[0]['file_name']),
        'height': height,
        'width': width,
        'boxes': boxes,
        'gt_classes': gt_classes,
        'gt_overlaps': overlaps,
        'max_classes': overlaps.argmax(axis=1),
        'max_overlaps': overlaps.max(axis=1),
        'masks': rle_to_mask_arrs(coco, index),
        'flipped': False
    }
    return roi_rec
Example #8
0
    def _load_all(self, anno_file, shuffle):
        """
        initialize all entries given annotation json file

        Parameters:
        ----------
        anno_file: str
            annotation json file
        shuffle: bool
            whether to shuffle image list
        """
        image_set_index = []
        labels = []
        coco = COCO(anno_file)
        print(coco)
        img_ids = coco.getImgIds()
        #print (anno_file)
        subdir = anno_file.split('/')[-1].split('.')[0].split('_')[2]
        #print (subdir)
        print('img_ids_len:', len(img_ids))
        for img_id in img_ids:
            # filename
            image_info = coco.loadImgs(img_id)[0]
            filename = image_info["file_name"]
            #print(image_info)
            height = image_info["height"]
            width = image_info["width"]
            # label
            anno_ids = coco.getAnnIds(imgIds=img_id)
            #print(anno_ids)
            annos = coco.loadAnns(anno_ids)
            #print(annos)
            label = []
            for anno in annos:
                print(len(annos))
                cat_id = int(anno["category_id"])
                cat_id = labeltrans_dict[cat_id]
                #                print (cat_id)
                bbox = anno["bbox"]
                assert len(bbox) == 4
                xmin = float(bbox[0]) / width
                ymin = float(bbox[1]) / height
                xmax = xmin + float(bbox[2]) / width
                ymax = ymin + float(bbox[3]) / height
                label.append([cat_id, xmin, ymin, xmax, ymax, 0])
            if label:
                labels.append(np.array(label))
                image_set_index.append(os.path.join(subdir, filename))

        if shuffle:
            import random
            indices = range(len(image_set_index))
            random.shuffle(indices)
            image_set_index = [image_set_index[i] for i in indices]
            labels = [labels[i] for i in indices]
        # store the results
        self.image_set_index = image_set_index
        self.labels = labels
Example #9
0
class COCOdataset(data.Dataset):
  def __init__(self, dataset_root, transform, subset, batchsize, netsize, istrain):
    self.dataset_root = dataset_root
    self.image_dir = "{}/images/{}2017".format(dataset_root, subset)
    self.coco = COCO("{}/annotations/instances_{}2017.json".format(dataset_root, subset))
    self.anchors = np.array(eval("COCO_ANCHOR_{}".format(netsize)))
    self.istrain = istrain
    self.netsize = netsize
    self.batch_size = batchsize
    # get the mapping from original category ids to labels
    self.cat_ids = self.coco.getCatIds()
    self.cat2label = {
      cat_id: i
      for i, cat_id in enumerate(self.cat_ids)
    }
    self.img_ids, self.img_infos = self._filter_imgs()
    self._transform = transform
    self.multisizes = TRAIN_INPUT_SIZES_COCO

  def _filter_imgs(self, min_size=32):
    # Filter images without ground truths.
    all_img_ids = list(set([_['image_id'] for _ in self.coco.anns.values()]))
    # Filter images too small.
    img_ids = []
    img_infos = []
    for i in all_img_ids:
      info = self.coco.loadImgs(i)[0]
      ann_ids = self.coco.getAnnIds(imgIds=i)
      ann_info = self.coco.loadAnns(ann_ids)
      ann = self._parse_ann_info(ann_info)
      if min(info['width'], info['height']) >= min_size and ann['labels'].shape[0] != 0:
        img_ids.append(i)
        img_infos.append(info)
    return img_ids, img_infos

  def _load_ann_info(self, idx):
    img_id = self.img_ids[idx]
    ann_ids = self.coco.getAnnIds(imgIds=img_id)
    ann_info = self.coco.loadAnns(ann_ids)
    return ann_info

  def _parse_ann_info(self, ann_info):
    gt_bboxes = []
    gt_labels = []
    gt_bboxes_ignore = []

    for i, ann in enumerate(ann_info):
      if ann.get('ignore', False):
        continue
      x1, y1, w, h = ann['bbox']
      if ann['area'] <= 0 or w < 1 or h < 1:
        continue
      bbox = [x1, y1, x1 + w, y1 + h]
      if ann['iscrowd']:
        gt_bboxes_ignore.append(bbox)
      else:
        gt_bboxes.append(bbox)
        gt_labels.append(self.cat2label[ann['category_id']])

    if gt_bboxes:
      gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
      gt_labels = np.array(gt_labels, dtype=np.int64)
    else:
      gt_bboxes = np.zeros((0, 4), dtype=np.float32)
      gt_labels = np.array([], dtype=np.int64)

    if gt_bboxes_ignore:
      gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
    else:
      gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)

    ann = dict(bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore)

    return ann

  def __len__(self):
    return len(self.img_infos) // self.batch_size

  def __getitem__(self, index):
    if self.istrain:
      trainsize = random.choice(self.multisizes)
    else:
      trainsize = self.netsize

    return self._load_batch(index, trainsize)

  def _load_batch(self, idx_batch, random_trainsize):
    img_batch = []
    imgpath_batch = []
    annpath_batch = []
    ori_shape_batch = []
    grid0_batch = []
    grid1_batch = []
    grid2_batch = []
    for idx in range(self.batch_size):
      img_info = self.img_infos[idx_batch * self.batch_size + idx]
      ann_info = self._load_ann_info(idx_batch * self.batch_size + idx)
      # load the image.
      img = cv2.imread(osp.join(self.image_dir, img_info['file_name']), cv2.IMREAD_COLOR)
      img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
      ori_shape = img.shape[:2][::-1] #yx-->xy

      # Load the annotation.
      ann = self._parse_ann_info(ann_info)
      bboxes = ann['bboxes']  # [x1,y1,x2,y2]
      labels = ann['labels']
      img, bboxes = self._transform(random_trainsize, random_trainsize, img, bboxes)
      list_grids = transform.preprocess(bboxes, labels, img.shape[:2], class_num=80, anchors=self.anchors)
      img_batch.append(img)
      imgpath_batch.append(osp.join(self.image_dir, img_info['file_name']))
      annpath_batch.append(osp.join(self.image_dir, img_info['file_name']))
      ori_shape_batch.append(ori_shape)
      grid0_batch.append(list_grids[0])
      grid1_batch.append(list_grids[1])
      grid2_batch.append(list_grids[2])

    return torch.from_numpy(np.array(img_batch).transpose((0,3,1,2)).astype(np.float32)), \
           imgpath_batch, \
           annpath_batch, \
           torch.from_numpy(np.array(ori_shape_batch).astype(np.float32)), \
           torch.from_numpy(np.array(grid0_batch).astype(np.float32)), \
           torch.from_numpy(np.array(grid1_batch).astype(np.float32)), \
           torch.from_numpy(np.array(grid2_batch).astype(np.float32))
Example #10
0
class EvaluatorCOCO(Evaluator):
    def __init__(self, anchors, cateNames, rootpath, score_thres, iou_thres):
        self.coco_imgIds = set([])
        self.coco_results = []
        self.idx2cat = {
            "0": 1,
            "1": 2,
            "2": 3,
            "3": 4,
            "4": 5,
            "5": 6,
            "6": 7,
            "7": 8,
            "8": 9,
            "9": 10,
            "10": 11,
            "11": 13,
            "12": 14,
            "13": 15,
            "14": 16,
            "15": 17,
            "16": 18,
            "17": 19,
            "18": 20,
            "19": 21,
            "20": 22,
            "21": 23,
            "22": 24,
            "23": 25,
            "24": 27,
            "25": 28,
            "26": 31,
            "27": 32,
            "28": 33,
            "29": 34,
            "30": 35,
            "31": 36,
            "32": 37,
            "33": 38,
            "34": 39,
            "35": 40,
            "36": 41,
            "37": 42,
            "38": 43,
            "39": 44,
            "40": 46,
            "41": 47,
            "42": 48,
            "43": 49,
            "44": 50,
            "45": 51,
            "46": 52,
            "47": 53,
            "48": 54,
            "49": 55,
            "50": 56,
            "51": 57,
            "52": 58,
            "53": 59,
            "54": 60,
            "55": 61,
            "56": 62,
            "57": 63,
            "58": 64,
            "59": 65,
            "60": 67,
            "61": 70,
            "62": 72,
            "63": 73,
            "64": 74,
            "65": 75,
            "66": 76,
            "67": 77,
            "68": 78,
            "69": 79,
            "70": 80,
            "71": 81,
            "72": 82,
            "73": 84,
            "74": 85,
            "75": 86,
            "76": 87,
            "77": 88,
            "78": 89,
            "79": 90
        }
        self.cat2idx = {int(v): int(k) for k, v in self.idx2cat.items()}
        self.reset()
        super().__init__(anchors, cateNames, rootpath, score_thres, iou_thres)

    def reset(self):
        self.coco_imgIds = set([])
        self.coco_results = []
        self.visual_imgs = []

    def build_GT(self):
        self.cocoGt = COCO(
            os.path.join(self.dataset_root,
                         'annotations/instances_val2017.json'))

    def append(self,
               imgpath,
               annpath,
               nms_boxes,
               nms_scores,
               nms_labels,
               visualize=True):

        imgid = int(imgpath[-16:-4])
        if nms_boxes is not None:  #do have bboxes
            for i in range(nms_boxes.shape[0]):
                self.coco_imgIds.add(imgid)
                self.coco_results.append({
                    "image_id":
                    imgid,
                    "category_id":
                    self.idx2cat[str(nms_labels[i])],
                    "bbox": [
                        nms_boxes[i][0], nms_boxes[i][1],
                        nms_boxes[i][2] - nms_boxes[i][0],
                        nms_boxes[i][3] - nms_boxes[i][1]
                    ],
                    "score":
                    float(nms_scores[i])
                })
            if len(self.visual_imgs) < self.num_visual:
                annIDs = self.cocoGt.getAnnIds(imgIds=[imgid])
                boxGT = []
                labelGT = []
                for id in annIDs:
                    ann = self.cocoGt.anns[id]
                    x, y, w, h = ann['bbox']
                    boxGT.append([x, y, x + w, y + h])
                    labelGT.append(self.cat2idx[ann['category_id']])
                boxGT = np.array(boxGT)
                labelGT = np.array(labelGT)
                self.append_visulize(imgpath, nms_boxes, nms_labels,
                                     nms_scores, boxGT, labelGT)

    def evaluate(self):
        try:
            cocoDt = self.cocoGt.loadRes(self.coco_results)
        except:
            print("no boxes detected, coco eval aborted")
            return 1
        cocoEval = COCOeval(self.cocoGt, cocoDt, "bbox")
        cocoEval.params.imgIds = list(self.coco_imgIds)
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        return cocoEval.stats
Example #11
0
 def build_GT(self):
     self.cocoGt = COCO(
         os.path.join(self.dataset_root,
                      'annotations/instances_val2017.json'))
Example #12
0
class COCOdataset(data.Dataset):
    def __init__(self,
                 dataset_root,
                 transform,
                 subset,
                 batchsize,
                 trainsizes,
                 testsize,
                 istrain,
                 gt_pergrid=3):
        self.dataset_root = dataset_root
        self.image_dir = "{}/images/{}2017".format(dataset_root, subset)
        self.coco = COCO("{}/annotations/instances_{}2017.json".format(
            dataset_root, subset))
        self.istrain = istrain
        self.testsize = testsize
        self.batch_size = batchsize
        # get the mapping from original category ids to labels
        self.cat_ids = self.coco.getCatIds()
        self.numcls = len(self.cat_ids)
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self.img_ids, self.img_infos = self._filter_imgs()
        self._transform = transform
        self.multisizes = trainsizes
        self.strides = np.array([8, 16, 32])
        self._gt_per_grid = gt_pergrid

    def _filter_imgs(self, min_size=32):
        # Filter images without ground truths.
        all_img_ids = list(
            set([_['image_id'] for _ in self.coco.anns.values()]))
        # Filter images too small.
        img_ids = []
        img_infos = []
        for i in all_img_ids:
            info = self.coco.loadImgs(i)[0]
            ann_ids = self.coco.getAnnIds(imgIds=i)
            ann_info = self.coco.loadAnns(ann_ids)
            ann = self._parse_ann_info(ann_info)
            if min(info['width'],
                   info['height']) >= min_size and ann['labels'].shape[0] != 0:
                img_ids.append(i)
                img_infos.append(info)
        return img_ids, img_infos

    def _load_ann_info(self, idx):
        img_id = self.img_ids[idx]
        ann_ids = self.coco.getAnnIds(imgIds=img_id)
        ann_info = self.coco.loadAnns(ann_ids)
        return ann_info

    def _parse_ann_info(self, ann_info):
        gt_bboxes = []
        gt_labels = []
        gt_bboxes_ignore = []

        for i, ann in enumerate(ann_info):
            if ann.get('ignore', False):
                continue
            x1, y1, w, h = ann['bbox']
            if ann['area'] <= 0 or w < 1 or h < 1:
                continue
            bbox = [x1, y1, x1 + w, y1 + h]
            if ann['iscrowd']:
                gt_bboxes_ignore.append(bbox)
            else:
                gt_bboxes.append(bbox)
                gt_labels.append(self.cat2label[ann['category_id']])

        if gt_bboxes:
            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
            gt_labels = np.array(gt_labels, dtype=np.int64)
        else:
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
            gt_labels = np.array([], dtype=np.int64)

        if gt_bboxes_ignore:
            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
        else:
            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)

        ann = dict(bboxes=gt_bboxes,
                   labels=gt_labels,
                   bboxes_ignore=gt_bboxes_ignore)

        return ann

    def __len__(self):
        return len(self.img_infos) // self.batch_size

    def __getitem__(self, index):
        if self.istrain:
            trainsize = random.choice(self.multisizes)
        else:
            trainsize = self.testsize

        return self._load_batch(index, trainsize)

    def _load_batch(self, idx_batch, random_trainsize):
        outputshapes = random_trainsize // self.strides

        batch_image = np.zeros(
            (self.batch_size, random_trainsize, random_trainsize, 3))
        batch_label_sbbox = np.zeros(
            (self.batch_size, outputshapes[0], outputshapes[0],
             self._gt_per_grid, 6 + self.numcls))
        batch_label_mbbox = np.zeros(
            (self.batch_size, outputshapes[1], outputshapes[1],
             self._gt_per_grid, 6 + self.numcls))
        batch_label_lbbox = np.zeros(
            (self.batch_size, outputshapes[2], outputshapes[2],
             self._gt_per_grid, 6 + self.numcls))
        temp_batch_sbboxes = []
        temp_batch_mbboxes = []
        temp_batch_lbboxes = []
        imgpath_batch = []
        orishape_batch = []
        max_sbbox_per_img = 0
        max_mbbox_per_img = 0
        max_lbbox_per_img = 0
        for idx in range(self.batch_size):
            img_info = self.img_infos[idx_batch * self.batch_size + idx]
            ann_info = self._load_ann_info(idx_batch * self.batch_size + idx)
            # load the image.
            img = cv2.imread(osp.join(self.image_dir, img_info['file_name']),
                             cv2.IMREAD_COLOR)
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            ori_shape = img.shape[:2]  # yx-->xy
            # Load the annotation.
            ann = self._parse_ann_info(ann_info)
            bboxes = ann['bboxes']  # [x1,y1,x2,y2]
            labels = ann['labels']
            img, bboxes = self._transform(random_trainsize, random_trainsize,
                                          img, bboxes)
            # data augmentation in original-strongeryolo
            # if self.istrain:
            #     img, bboxes = dataAug.random_horizontal_flip(np.copy(img), np.copy(bboxes))
            #     img, bboxes = dataAug.random_crop(np.copy(img), np.copy(bboxes))
            #     img, bboxes = dataAug.random_translate(np.copy(img), np.copy(bboxes))
            # img, bboxes = dataAug.img_preprocess2(np.copy(img), np.copy(bboxes),
            #                                       (random_trainsize, random_trainsize), True)

            label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = \
                self.preprocess_anchorfree(bboxes, labels, outputshapes)
            batch_image[idx, :, :, :] = img
            batch_label_sbbox[idx, :, :, :, :] = label_sbbox
            batch_label_mbbox[idx, :, :, :, :] = label_mbbox
            batch_label_lbbox[idx, :, :, :, :] = label_lbbox

            zeros = np.zeros((1, 4), dtype=np.float32)
            sbboxes = sbboxes if len(sbboxes) != 0 else zeros
            mbboxes = mbboxes if len(mbboxes) != 0 else zeros
            lbboxes = lbboxes if len(lbboxes) != 0 else zeros
            temp_batch_sbboxes.append(sbboxes)
            temp_batch_mbboxes.append(mbboxes)
            temp_batch_lbboxes.append(lbboxes)
            max_sbbox_per_img = max(max_sbbox_per_img, len(sbboxes))
            max_mbbox_per_img = max(max_mbbox_per_img, len(mbboxes))
            max_lbbox_per_img = max(max_lbbox_per_img, len(lbboxes))
            imgpath_batch.append(
                osp.join(self.image_dir, img_info['file_name']))
            orishape_batch.append(ori_shape)

        batch_sbboxes = np.array([
            np.concatenate([
                sbboxes,
                np.zeros((max_sbbox_per_img + 1 - len(sbboxes), 4),
                         dtype=np.float32)
            ],
                           axis=0) for sbboxes in temp_batch_sbboxes
        ])
        batch_mbboxes = np.array([
            np.concatenate([
                mbboxes,
                np.zeros((max_mbbox_per_img + 1 - len(mbboxes), 4),
                         dtype=np.float32)
            ],
                           axis=0) for mbboxes in temp_batch_mbboxes
        ])
        batch_lbboxes = np.array([
            np.concatenate([
                lbboxes,
                np.zeros((max_lbbox_per_img + 1 - len(lbboxes), 4),
                         dtype=np.float32)
            ],
                           axis=0) for lbboxes in temp_batch_lbboxes
        ])

        return torch.from_numpy(np.array(batch_image).transpose((0, 3, 1, 2)).astype(np.float32)), \
               imgpath_batch, \
               torch.from_numpy(np.array(orishape_batch).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_sbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_mbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_lbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_sbboxes).astype(np.float32)), \
               torch.from_numpy(np.array(batch_mbboxes).astype(np.float32)), \
               torch.from_numpy(np.array(batch_lbboxes).astype(np.float32))

    def preprocess_anchorfree(self, bboxes, labels, outputshapes):
        '''
        :param boxes:n,x,y,x2,y2
        :param labels: n,1
        :param img_size:(h,w)
        :param class_num:
        :return:
        '''
        label = [
            np.zeros((outputshapes[i], outputshapes[i], self._gt_per_grid,
                      6 + self.numcls)) for i in range(3)
        ]
        # mixup weight位默认为1.0
        for i in range(3):
            label[i][:, :, :, -1] = 1.0
        bboxes_coor = [[] for _ in range(3)]
        bboxes_count = [
            np.zeros((outputshapes[i], outputshapes[i])) for i in range(3)
        ]

        for bbox in bboxes:
            # (1)获取bbox在原图上的顶点坐标、类别索引、mix up权重、中心坐标、高宽、尺度
            bbox_coor = bbox[:4]
            bbox_class_ind = labels
            bbox_xywh = np.concatenate([(bbox_coor[2:] + bbox_coor[:2]) * 0.5,
                                        bbox_coor[2:] - bbox_coor[:2]],
                                       axis=-1)
            bbox_scale = np.sqrt(np.multiply.reduce(bbox_xywh[2:]))

            # label smooth
            onehot = np.zeros(self.numcls, dtype=np.float)
            onehot[bbox_class_ind] = 1.0
            uniform_distribution = np.full(self.numcls, 1.0 / self.numcls)
            deta = 0.01
            smooth_onehot = onehot * (1 - deta) + deta * uniform_distribution

            if bbox_scale <= 30:
                match_branch = 0
            elif (30 < bbox_scale) and (bbox_scale <= 90):
                match_branch = 1
            else:
                match_branch = 2

            xind, yind = np.floor(1.0 * bbox_xywh[:2] /
                                  self.strides[match_branch]).astype(np.int32)
            gt_count = int(bboxes_count[match_branch][yind, xind])
            if gt_count < self._gt_per_grid:
                if gt_count == 0:
                    gt_count = slice(None)
                bbox_label = np.concatenate(
                    [bbox_coor, [1.0], smooth_onehot, [1]], axis=-1)
                label[match_branch][yind, xind, gt_count, :] = 0
                label[match_branch][yind, xind, gt_count, :] = bbox_label
                bboxes_count[match_branch][yind, xind] += 1
                bboxes_coor[match_branch].append(bbox_coor)
        label_sbbox, label_mbbox, label_lbbox = label
        sbboxes, mbboxes, lbboxes = bboxes_coor
        return label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes
Example #13
0
    def __init__(self,
                 image_set,
                 root_path,
                 data_path,
                 result_path=None,
                 mask_size=-1,
                 binary_thresh=None,
                 load_mask=False):
        """
        fill basic information to initialize imdb
        :param image_set: train2014, val2014, test2015
        :param root_path: 'data', will write 'rpn_data', 'cache'
        :param data_path: 'data/coco'
        """
        super(coco, self).__init__('COCO', image_set, root_path, data_path,
                                   result_path)
        self.root_path = root_path
        self.data_path = data_path
        self.coco = COCO(self._get_ann_file())
        # train0829.txt
        print('>>>>>>>>>>> image set {}'.format(image_set))
        self.trainsetpath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/ImageSets/train0829.txt'
        self.valsetpath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/ImageSets/val0629.txt'
        self.imagepath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/JPGImages'
        self.trainset = ['index0']
        self.valset = ['index0']
        with open(self.trainsetpath) as tsf:
            for line in tsf:
                self.trainset.append(
                    os.path.join(self.imagepath,
                                 line.strip() + '.jpg'))
        #print self.trainset[1]

        # val0629.txt
        with open(self.valsetpath) as vsf:
            for line in vsf:
                self.valset.append(
                    os.path.join(self.imagepath,
                                 line.strip() + '.jpg'))
        # deal with class names
        #print self.valset[2]
        cats = [
            cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())
        ]
        self.classes = ['__background__'] + cats
        #print('>>>> cats {}'.format(cats))
        self.num_classes = len(self.classes)
        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))
        self._coco_ind_to_class_ind = dict([(self._class_to_coco_ind[cls],
                                             self._class_to_ind[cls])
                                            for cls in self.classes[1:]])

        # load image file names
        self.image_set_index = self._load_image_set_index()
        self.num_images = len(self.image_set_index)
        print 'num_images', self.num_images
        self.mask_size = mask_size
        self.binary_thresh = binary_thresh
        self.load_mask = load_mask

        # deal with data name
        view_map = {
            'minival2014': 'val2014',
            'sminival2014': 'val2014',
            'valminusminival2014': 'val2014',
            'test-dev2015': 'test2015',
            'test2015': 'test2015'
        }

        self.data_name = view_map[
            image_set] if image_set in view_map else image_set
Example #14
0
class coco(IMDB):
    def __init__(self,
                 image_set,
                 root_path,
                 data_path,
                 result_path=None,
                 mask_size=-1,
                 binary_thresh=None,
                 load_mask=False):
        """
        fill basic information to initialize imdb
        :param image_set: train2014, val2014, test2015
        :param root_path: 'data', will write 'rpn_data', 'cache'
        :param data_path: 'data/coco'
        """
        super(coco, self).__init__('COCO', image_set, root_path, data_path,
                                   result_path)
        self.root_path = root_path
        self.data_path = data_path
        self.coco = COCO(self._get_ann_file())
        # train0829.txt
        print('>>>>>>>>>>> image set {}'.format(image_set))
        self.trainsetpath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/ImageSets/train0829.txt'
        self.valsetpath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/ImageSets/val0629.txt'
        self.imagepath = '/private/luyujie/obstacle_detector/obstacle_detector/data/obstacle2d/JPGImages'
        self.trainset = ['index0']
        self.valset = ['index0']
        with open(self.trainsetpath) as tsf:
            for line in tsf:
                self.trainset.append(
                    os.path.join(self.imagepath,
                                 line.strip() + '.jpg'))
        #print self.trainset[1]

        # val0629.txt
        with open(self.valsetpath) as vsf:
            for line in vsf:
                self.valset.append(
                    os.path.join(self.imagepath,
                                 line.strip() + '.jpg'))
        # deal with class names
        #print self.valset[2]
        cats = [
            cat['name'] for cat in self.coco.loadCats(self.coco.getCatIds())
        ]
        self.classes = ['__background__'] + cats
        #print('>>>> cats {}'.format(cats))
        self.num_classes = len(self.classes)
        self._class_to_ind = dict(zip(self.classes, xrange(self.num_classes)))
        self._class_to_coco_ind = dict(zip(cats, self.coco.getCatIds()))
        self._coco_ind_to_class_ind = dict([(self._class_to_coco_ind[cls],
                                             self._class_to_ind[cls])
                                            for cls in self.classes[1:]])

        # load image file names
        self.image_set_index = self._load_image_set_index()
        self.num_images = len(self.image_set_index)
        print 'num_images', self.num_images
        self.mask_size = mask_size
        self.binary_thresh = binary_thresh
        self.load_mask = load_mask

        # deal with data name
        view_map = {
            'minival2014': 'val2014',
            'sminival2014': 'val2014',
            'valminusminival2014': 'val2014',
            'test-dev2015': 'test2015',
            'test2015': 'test2015'
        }

        self.data_name = view_map[
            image_set] if image_set in view_map else image_set

    def _get_ann_file(self):
        """ self.data_path / annotations / instances_train2014.json """
        prefix = 'instances' if 'test' not in self.image_set else 'image_info'
        path = os.path.join(self.data_path, 'annotations',
                            prefix + '_' + self.image_set + '.json')
        print('>>>>>>>> get_ann_file {}'.format(path))
        return os.path.join(self.data_path, 'annotations',
                            prefix + '_' + self.image_set + '.json')

    def _load_image_set_index(self):
        """ image id: int """
        image_ids = self.coco.getImgIds()
        return image_ids

    def image_path_from_index(self, index):
        """ example: images / train2014 / COCO_train2014_000000119993.jpg """
        #self.data_name = 'train2014'
        #filename = 'COCO_%s_%012d.jpg' % (self.data_name, index)
        #data_name = self.data_name
        #if data_name == 'train1030':
        #    data_name = 'train2014'
        filename = 'COCO_%s_%012d.jpg' % (self.data_name, index)

        image_path = os.path.join(self.data_path, 'images', self.data_name,
                                  filename)
        #print '>> self.data_name'
        #print self.data_name

        #if self.data_name == 'train2014':
        #    image_path = self.trainset[index]
        #if self.data_name == 'val2014':
        #    image_path = self.valset[index]

        #print '>> index'
        #print index
        #print '>> self.trainsetpath'
        #print self.trainset[0]
        #print '>> self.valsetpath'
        #print self.valsetpath[0]
        #print '>> image_path'
        #print image_path
        assert os.path.exists(image_path), 'Path does not exist: {}'.format(
            image_path)
        return image_path

    def gt_roidb(self):
        cache_file = os.path.join(self.cache_path, self.name + '_gt_roidb.pkl')
        index_file = os.path.join(self.cache_path,
                                  self.name + '_index_roidb.pkl')
        sindex_file = os.path.join(self.cache_path,
                                   self.name + '_sindex_roidb.pkl')
        if os.path.exists(cache_file) and os.path.exists(index_file):
            with open(cache_file, 'rb') as fid:
                roidb = cPickle.load(fid)
            with open(index_file, 'rb') as fid:
                self.image_set_index = cPickle.load(fid)
            print '{} gt roidb loaded from {}'.format(self.name, cache_file)
            return roidb

        gt_roidb = []
        valid_id = []
        vids = []
        ct = 0
        for index in self.image_set_index:
            roientry, flag = self._load_coco_annotation(index)
            if flag:
                gt_roidb.append(roientry)
                valid_id.append(index)
                vids.append(ct)
            ct = ct + 1
        self.image_set_index = valid_id

        with open(cache_file, 'wb') as fid:
            cPickle.dump(gt_roidb, fid, cPickle.HIGHEST_PROTOCOL)
        with open(index_file, 'wb') as fid:
            cPickle.dump(valid_id, fid, cPickle.HIGHEST_PROTOCOL)
        with open(sindex_file, 'wb') as fid:
            cPickle.dump(vids, fid, cPickle.HIGHEST_PROTOCOL)

        print 'wrote gt roidb to {}'.format(cache_file)
        return gt_roidb

    def _load_coco_annotation(self, index):
        def _polys2boxes(polys):
            boxes_from_polys = np.zeros((len(polys), 4), dtype=np.float32)
            for i in range(len(polys)):
                poly = polys[i]
                x0 = min(min(p[::2]) for p in poly)
                x1 = max(max(p[::2]) for p in poly)
                y0 = min(min(p[1::2]) for p in poly)
                y1 = max(max(p[1::2]) for p in poly)
                boxes_from_polys[i, :] = [x0, y0, x1, y1]
            return boxes_from_polys

        """
        coco ann: [u'segmentation', u'area', u'iscrowd', u'image_id', u'bbox', u'category_id', u'id']
        iscrowd:
            crowd instances are handled by marking their overlaps with all categories to -1
            and later excluded in training
        bbox:
            [x1, y1, w, h]
        :param index: coco image id
        :return: roidb entry
        """
        im_ann = self.coco.loadImgs(index)[0]
        width = im_ann['width']
        height = im_ann['height']

        annIds = self.coco.getAnnIds(imgIds=index, iscrowd=False)
        objs = self.coco.loadAnns(annIds)

        annIds = self.coco.getAnnIds(imgIds=index, iscrowd=True)
        objsc = self.coco.loadAnns(annIds)

        # sanitize bboxes
        valid_objs = []
        for obj in objs:
            x, y, w, h = obj['bbox']
            x1 = np.max((0, x))
            y1 = np.max((0, y))
            #yujie
            #x2 = np.min((width - 1, x1 + np.max((0, w - 1))))
            #y2 = np.min((height - 1, y1 + np.max((0, h - 1))))
            x2 = np.min((width, x1 + np.max((0, w))))
            y2 = np.min((height, y1 + np.max((0, h))))
            if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
                obj['clean_bbox'] = [x1, y1, x2, y2]
                valid_objs.append(obj)

        valid_objsc = []
        for obj in objsc:
            x, y, w, h = obj['bbox']
            x1 = np.max((0, x))
            y1 = np.max((0, y))
            #yujie
            x2 = np.min((width, x1 + np.max((0, w))))
            y2 = np.min((height, y1 + np.max((0, h))))
            if obj['area'] > 0 and x2 >= x1 and y2 >= y1:
                obj['clean_bbox'] = [x1, y1, x2, y2]
                valid_objsc.append(obj)

        objs = valid_objs
        objc = valid_objsc
        num_objs = len(objs)
        num_objsc = len(objsc)

        boxes = np.zeros((num_objs, 4), dtype=np.uint16)
        boxesc = np.zeros((num_objsc, 4), dtype=np.uint16)
        gt_classes = np.zeros((num_objs), dtype=np.int32)
        overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)

        #for ix, obj in enumerate(objsc):
        #    boxesc[ix, :] = obj['clean_bbox']

        for ix, obj in enumerate(objs):
            cls = self._coco_ind_to_class_ind[obj['category_id']]
            boxes[ix, :] = obj['clean_bbox']
            gt_classes[ix] = cls
            if obj['iscrowd']:
                overlaps[ix, :] = -1.0
            else:
                overlaps[ix, cls] = 1.0

        ws = boxes[:, 2] - boxes[:, 0]
        hs = boxes[:, 3] - boxes[:, 1]

        flag = True

        roi_rec = {
            'image': self.image_path_from_index(index),
            'height': height,
            'width': width,
            'boxes': boxes,
            'boxesc': boxesc,
            'gt_classes': gt_classes,
            'gt_overlaps': overlaps,
            'max_classes': overlaps.argmax(axis=1),
            'max_overlaps': overlaps.max(axis=1),
            'flipped': False
        }
        #print '>>>>>> roi_rec'
        #print roi_rec
        if self.load_mask:
            # we only care about valid polygons
            print '>>>>>>> load mask'
            segs = []
            for obj in objs:
                if not isinstance(obj['segmentation'], list):
                    # This is a crowd box
                    segs.append([])
                else:
                    segs.append([
                        np.array(p) for p in obj['segmentation'] if len(p) >= 6
                    ])

            roi_rec['gt_masks'] = segs

            # Uncomment if you need to compute gts based on segmentation masks
            # seg_boxes = _polys2boxes(segs)
            # roi_rec['mask_boxes'] = seg_boxes
        return roi_rec, flag

    def evaluate_detections(self,
                            detections,
                            ann_type='bbox',
                            all_masks=None,
                            extra_path=''):
        """ detections_val2014_results.json """
        res_folder = os.path.join(self.result_path + extra_path, 'results')
        if not os.path.exists(res_folder):
            os.makedirs(res_folder)
        res_file = os.path.join(res_folder,
                                'detections_%s_results.json' % self.image_set)
        print('>>>> res_file {}'.format(res_file))
        self._write_coco_results(detections, res_file, ann_type, all_masks)
        #yujie
        print('>>>>> evaluate_detections info_str')
        info_str = self._do_python_eval(res_file, res_folder, ann_type)
        return info_str
        '''
        if 'test' not in self.image_set:
            info_str = self._do_python_eval(res_file, res_folder, ann_type)
            return info_str
        '''

    def evaluate_sds(self, all_boxes, all_masks):
        #info_str = self.evaluate_detections(all_boxes, 'segm', all_masks)
        info_str = self.evaluate_detections(all_boxes, 'bbox', all_masks)
        return info_str

    def _write_coco_results(self, all_boxes, res_file, ann_type, all_masks):
        """ example results
        [{"image_id": 42,
          "category_id": 18,
          "bbox": [258.15,41.29,348.26,243.78],
          "score": 0.236}, ...]
        """
        all_im_info = [{
            'index': index,
            'height': self.coco.loadImgs(index)[0]['height'],
            'width': self.coco.loadImgs(index)[0]['width']
        } for index in self.image_set_index]
        print '>>>>>>>> _write_coco_results ann_type'
        print ann_type
        if ann_type == 'bbox':
            data_pack = [{
                'cat_id': self._class_to_coco_ind[cls],
                'cls_ind': cls_ind,
                'cls': cls,
                'ann_type': ann_type,
                'binary_thresh': self.binary_thresh,
                'all_im_info': all_im_info,
                'boxes': all_boxes[cls_ind]
            } for cls_ind, cls in enumerate(self.classes)
                         if not cls == '__background__']
        elif ann_type == 'segm':
            data_pack = [{
                'cat_id': self._class_to_coco_ind[cls],
                'cls_ind': cls_ind,
                'cls': cls,
                'ann_type': ann_type,
                'binary_thresh': self.binary_thresh,
                'all_im_info': all_im_info,
                'boxes': all_boxes[cls_ind],
                'masks': all_masks[cls_ind]
            } for cls_ind, cls in enumerate(self.classes)
                         if not cls == '__background__']
        else:
            print 'unimplemented ann_type: ' + ann_type
        # results = coco_results_one_category_kernel(data_pack[1])
        # print results[0]
        pool = mp.Pool(mp.cpu_count())
        results = pool.map(coco_results_one_category_kernel, data_pack)
        pool.close()
        pool.join()
        results = sum(results, [])
        print 'Writing results json to %s' % res_file
        with open(res_file, 'w') as f:
            json.dump(results, f, sort_keys=True, indent=4)

    def _do_python_eval(self, res_file, res_folder, ann_type):
        print 'do python eval'
        coco_dt = self.coco.loadRes(res_file)
        #print('>>>> do python eval resfile {}'.format(res_file))
        coco_eval = COCOeval(self.coco, coco_dt)
        coco_eval.params.useSegm = (ann_type == 'segm')
        coco_eval.evaluate()
        coco_eval.accumulate()
        info_str = self._print_detection_metrics(coco_eval)

        eval_file = os.path.join(res_folder,
                                 'detections_%s_results.pkl' % self.image_set)
        with open(eval_file, 'w') as f:
            cPickle.dump(coco_eval, f, cPickle.HIGHEST_PROTOCOL)
        print 'coco eval results saved to %s' % eval_file
        info_str += 'coco eval results saved to %s\n' % eval_file
        return info_str

    def _print_detection_metrics(self, coco_eval):
        info_str = ''
        IoU_lo_thresh = 0.4
        IoU_hi_thresh = 0.4

        def _get_thr_ind(coco_eval, thr):
            ind = np.where((coco_eval.params.iouThrs > thr - 1e-5)
                           & (coco_eval.params.iouThrs < thr + 1e-5))[0][0]
            iou_thr = coco_eval.params.iouThrs[ind]
            assert np.isclose(iou_thr, thr)
            return ind

        ind_lo = _get_thr_ind(coco_eval, IoU_lo_thresh)
        ind_hi = _get_thr_ind(coco_eval, IoU_hi_thresh)

        # precision has dims (iou, recall, cls, area range, max dets)
        # area range index 0: all area ranges
        # max dets index 2: 100 per image

        precision = \
            coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]

        recall = \
            coco_eval.eval['recall'][ind_lo:(ind_hi + 1), :, 0, 2]

        ap_default = np.mean(precision[precision > -1])
        ar_default = np.mean(recall[recall > -1])

        print '~~~~ Mean and per-category AP @ IoU=%.2f,%.2f] ~~~~' % (
            IoU_lo_thresh, IoU_hi_thresh)
        info_str += '~~~~ Mean and per-category AP @ IoU=%.2f,%.2f] ~~~~\n' % (
            IoU_lo_thresh, IoU_hi_thresh)
        print '%-15s %5.1f' % ('all', 100 * ap_default)
        info_str += '%-15s %5.1f\n' % ('all', 100 * ap_default)
        print('>>>> self.classes {}'.format(self.classes))
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            # minus 1 because of __background__
            precision = coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :,
                                                    cls_ind - 1, 0, 2]
            #print('>>>> coco eval precision {}'.format(coco_eval.eval['precision']))
            ap = np.mean(precision[precision > -1])
            print '%-15s %5.1f' % (cls, 100 * ap)
            info_str += '%-15s %5.1f\n' % (cls, 100 * ap)

        print '~~~~ Mean and per-category AR @ IoU=%.2f,%.2f] ~~~~' % (
            IoU_lo_thresh, IoU_hi_thresh)
        info_str += '~~~~ Mean and per-category AR @ IoU=%.2f,%.2f] ~~~~\n' % (
            IoU_lo_thresh, IoU_hi_thresh)
        print '%-15s %5.1f' % ('all', 100 * ar_default)
        info_str += '%-15s %5.1f\n' % ('all', 100 * ar_default)
        print('>>>> self.classes {}'.format(self.classes))
        for cls_ind, cls in enumerate(self.classes):
            if cls == '__background__':
                continue
            # minus 1 because of __background__
            recall = coco_eval.eval['recall'][ind_lo:(ind_hi + 1), cls_ind - 1,
                                              0, 2]
            #print('>>>> coco eval precision {}'.format(coco_eval.eval['precision']))
            ar = np.mean(recall[recall > -1])
            print '%-15s %5.1f' % (cls, 100 * ar)
            info_str += '%-15s %5.1f\n' % (cls, 100 * ar)

        print '~~~~ Summary metrics ~~~~'
        coco_eval.summarize()

        return info_str
Example #15
0
class COCOdataset(BaseDataset):
    def __init__(self, cfg, subset, istrain):
        super().__init__(cfg, subset, istrain)
        self.image_dir = "{}/images/{}2017".format(self.dataset_root, subset)
        self.coco = COCO("{}/annotations/instances_{}2017.json".format(
            self.dataset_root, subset))
        # get the mapping from original category ids to labels
        self.cat_ids = self.coco.getCatIds()
        self.numcls = len(self.cat_ids)
        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
        self._ids, self.img_infos = self._filter_imgs()

    def _filter_imgs(self, min_size=32):
        # Filter images without ground truths.
        all_img_ids = list(
            set([_['image_id'] for _ in self.coco.anns.values()]))
        # Filter images too small.
        img_ids = []
        img_infos = []
        for i in all_img_ids:
            info = self.coco.loadImgs(i)[0]
            ann_ids = self.coco.getAnnIds(imgIds=i)
            ann_info = self.coco.loadAnns(ann_ids)
            ann = self._parse_ann_info(ann_info)
            if min(info['width'],
                   info['height']) >= min_size and ann['labels'].shape[0] != 0:
                img_ids.append(i)
                img_infos.append(info)
        return img_ids, img_infos

    def _load_ann_info(self, idx):
        img_id = self._ids[idx]
        ann_ids = self.coco.getAnnIds(imgIds=img_id)
        ann_info = self.coco.loadAnns(ann_ids)
        return ann_info

    def _parse_ann_info(self, ann_info):
        gt_bboxes = []
        gt_labels = []
        gt_bboxes_ignore = []

        for i, ann in enumerate(ann_info):
            if ann.get('ignore', False):
                continue
            x1, y1, w, h = ann['bbox']
            if ann['area'] <= 0 or w < 1 or h < 1:
                continue
            bbox = [x1, y1, x1 + w, y1 + h]
            if ann['iscrowd']:
                gt_bboxes_ignore.append(bbox)
            else:
                gt_bboxes.append(bbox)
                gt_labels.append(self.cat2label[ann['category_id']])

        if gt_bboxes:
            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
            gt_labels = np.array(gt_labels, dtype=np.int64)
        else:
            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
            gt_labels = np.array([], dtype=np.int64)

        if gt_bboxes_ignore:
            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
        else:
            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)

        ann = dict(bboxes=gt_bboxes,
                   labels=gt_labels,
                   bboxes_ignore=gt_bboxes_ignore)

        return ann

    def __len__(self):
        return len(self._ids) // self.batch_size

    def _parse_annotation(self, itemidx, random_trainsize):
        img_info = self.img_infos[itemidx]
        ann_info = self._load_ann_info(itemidx)
        ann = self._parse_ann_info(ann_info)
        bboxes = ann['bboxes']  # [x1,y1,x2,y2]
        labels = ann['labels']
        # load the image.
        imgpath = osp.join(self.image_dir, img_info['file_name'])
        img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
        if self.istrain:
            img, bboxes = dataAug.random_horizontal_flip(
                np.copy(img), np.copy(bboxes))
            img, bboxes = dataAug.random_crop(np.copy(img), np.copy(bboxes))
            img, bboxes = dataAug.random_translate(np.copy(img),
                                                   np.copy(bboxes))
        ori_shape = img.shape[:2]
        img, bboxes = dataAug.img_preprocess2(
            np.copy(img), np.copy(bboxes),
            (random_trainsize, random_trainsize), True)
        return img, bboxes, labels, imgpath, ori_shape