Esempio n. 1
0
 def build_GT(self):
   for file in glob.glob(os.path.join(self.dataset_root, 'train', 'labels', "*.xml")):
     _, boxGT, labelGT, difficult = PascalVocXmlParser(file, self.cateNames).parse(filterdiff=False)
     for box, label, difficult in zip(boxGT, labelGT, difficult):
       self.rec_gt[file].append({
         'label': label,
         'bbox': box,
         'difficult': difficult
       })
Esempio n. 2
0
  def build_GT(self):
    filepath = os.path.join(self.dataset_root, 'VOC2007', 'ImageSets', 'Main', 'test.txt')
    with open(filepath, 'r') as f:
      filelist = f.readlines()

    filelist = [file.strip() for file in filelist]
    for file in filelist:
      _, boxGT, labelGT, difficult = PascalVocXmlParser(self._annopath.format(file), self.cateNames).parse(filterdiff=False)
      for box, label, difficult in zip(boxGT, labelGT, difficult):
        self.rec_gt[file].append({
          'label': label,
          'bbox': box,
          'difficult': difficult
        })
Esempio n. 3
0
 def _parse_annotation(self,itemidx,random_trainsize):
     rootpath, filename = self._ids[itemidx]
     annpath = self._annopath.format(rootpath, filename)
     imgpath = self._imgpath.format(rootpath, filename)
     fname, bboxes, labels = PascalVocXmlParser(annpath, self.labels).parse()
     img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
     if self.istrain:
         img, bboxes = dataAug.random_horizontal_flip(np.copy(img), np.copy(bboxes))
         img, bboxes = dataAug.random_crop(np.copy(img), np.copy(bboxes))
         img, bboxes = dataAug.random_translate(np.copy(img), np.copy(bboxes))
     ori_shape=img.shape[:2]
     img, bboxes = dataAug.img_preprocess2(np.copy(img), np.copy(bboxes),
                                           (random_trainsize, random_trainsize), True)
     return img,bboxes,labels,imgpath,ori_shape
Esempio n. 4
0
 def append(self, imgpath,annpath,nms_boxes,nms_scores,nms_labels,visualize=True):
   imgpath = imgpath.decode('UTF-8')
   annpath = annpath.decode('UTF-8')
   if nms_boxes is not None:  # do have bboxes
     for i in range(nms_boxes.shape[0]):
       rec = {
         "img_idx": imgpath.split('/')[-1].split('.')[0],
         "bbox": nms_boxes[i],
         "score": float(nms_scores[i])
       }
       self.rec_pred[nms_labels[i]].append(rec)
     if visualize and len(self.visual_imgs) < self.num_visual:
       _, boxGT, labelGT, _ = PascalVocXmlParser(str(annpath), self.cateNames).parse()
       boxGT=np.array(boxGT)
       labelGT=np.array(labelGT)
       self.append_visulize(imgpath, nms_boxes, nms_labels, nms_scores, boxGT, labelGT)
Esempio n. 5
0
 def _load_batch(self, idx_batch, random_trainsize):
     img_batch = []
     imgpath_batch = []
     annpath_batch = []
     pad_scale_batch = []
     ori_shape_batch = []
     grid0_batch = []
     grid1_batch = []
     grid2_batch = []
     for idx in range(self.batch_size):
         rootpath, filename = self._ids[idx_batch * self.batch_size + idx]
         annpath = self._annopath.format(rootpath, filename)
         imgpath = self._imgpath.format(rootpath, filename)
         fname, bboxes, labels, _ = PascalVocXmlParser(
             annpath, self.labels).parse()
         img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
         img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
         ori_shape = img.shape[:2]
         # Load the annotation.
         img, bboxes = self._transform(random_trainsize, random_trainsize,
                                       img, bboxes)
         list_grids = transform.preprocess(bboxes,
                                           labels,
                                           img.shape[:2],
                                           class_num=len(self.labels),
                                           anchors=self.anchors)
         pad_scale = (1, 1)
         img_batch.append(img)
         imgpath_batch.append(imgpath)
         annpath_batch.append(annpath)
         ori_shape_batch.append(ori_shape)
         pad_scale_batch.append(pad_scale)
         grid0_batch.append(list_grids[0])
         grid1_batch.append(list_grids[1])
         grid2_batch.append(list_grids[2])
     return np.array(img_batch).astype(np.float32), \
            imgpath_batch, \
            annpath_batch, \
            np.array(pad_scale_batch).astype(np.float32), \
            np.array(ori_shape_batch).astype(np.float32), \
            np.array(grid0_batch).astype(np.float32), \
            np.array(grid1_batch).astype(np.float32), \
            np.array(grid2_batch).astype(np.float32)
Esempio n. 6
0
    def __getitem__(self, idx):
        fname, bboxes, labels = PascalVocXmlParser(self.anns[idx],
                                                   self.labels).parse()
        img = cv2.imread(fname, cv2.IMREAD_COLOR)
        img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
        ori_shape = img.shape[:2]

        # Load the annotation.
        img, bboxes = self._transform(img, bboxes)
        list_grids = transform.preprocess(bboxes,
                                          labels,
                                          img.shape[:2],
                                          class_num=1,
                                          anchors=self.anchors)

        pad_scale = (1, 1)
        return img.astype(np.float32), \
               fname, \
               np.array(pad_scale).astype(np.float32), \
               np.array(ori_shape).astype(np.float32), \
               list_grids[0].astype(np.float32), \
               list_grids[1].astype(np.float32), \
               list_grids[2].astype(np.float32),
Esempio n. 7
0
   "car",
   "cat",
   "chair",
   "cow",
   "diningtable",
   "dog",
   "horse",
   "motorbike",
   "person",
   "pottedplant",
   "sheep",
   "sofa",
   "train",
   "tvmonitor"
 ]
 filelist = [file.strip() for file in filelist]
 rec_gt = defaultdict(list)
 for file in filelist:
   _, boxGT, labelGT, difficult = PascalVocXmlParser(_annopath.format(file), cateNames).parse()
   for box, label, difficult in zip(boxGT, labelGT, difficult):
     rec_gt[file].append({
       'label': label,
       'bbox': box,
       'difficult': difficult
     })
 cls="person"
 img_idxs = ['000001']
 for imgidx in set(img_idxs):
   print(rec_gt[imgidx])
   _rec = [rec for rec in rec_gt[imgidx] if rec['label'] == cls]
   print(_rec)
Esempio n. 8
0
    def _load_batch(self, idx_batch, random_trainsize):
        outputshapes = random_trainsize // self.strides

        batch_image = np.zeros((self.batch_size, random_trainsize, random_trainsize, 3))
        batch_label_sbbox = np.zeros((self.batch_size, outputshapes[0], outputshapes[0],
                                      self._gt_per_grid, 6 + self.numcls))
        batch_label_mbbox = np.zeros((self.batch_size, outputshapes[1], outputshapes[1],
                                      self._gt_per_grid, 6 + self.numcls))
        batch_label_lbbox = np.zeros((self.batch_size, outputshapes[2], outputshapes[2],
                                      self._gt_per_grid, 6 + self.numcls))
        temp_batch_sbboxes = []
        temp_batch_mbboxes = []
        temp_batch_lbboxes = []
        imgpath_batch = []
        orishape_batch = []
        max_sbbox_per_img = 0
        max_mbbox_per_img = 0
        max_lbbox_per_img = 0
        for idx in range(self.batch_size):
            rootpath, filename = self._ids[idx_batch * self.batch_size + idx]
            annpath = self._annopath.format(rootpath, filename)
            imgpath = self._imgpath.format(rootpath, filename)
            fname, bboxes, labels = PascalVocXmlParser(annpath, self.labels).parse()
            img = cv2.imread(imgpath, cv2.IMREAD_COLOR)
            ori_shape = img.shape[:2]
            # Load the annotation.
            img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
            img, bboxes = self._transform(random_trainsize, random_trainsize, img, bboxes)

            # data augmentation in original-strongeryolo
            # if self.istrain:
            #     img, bboxes = dataAug.random_horizontal_flip(np.copy(img), np.copy(bboxes))
            #     img, bboxes = dataAug.random_crop(np.copy(img), np.copy(bboxes))
            #     img, bboxes = dataAug.random_translate(np.copy(img), np.copy(bboxes))
            # img, bboxes = dataAug.img_preprocess2(np.copy(img), np.copy(bboxes),
            #                                       (random_trainsize, random_trainsize), True)

            label_sbbox, label_mbbox, label_lbbox, sbboxes, mbboxes, lbboxes = \
                self.preprocess_anchorfree(bboxes, labels, outputshapes)
            batch_image[idx, :, :, :] = img
            batch_label_sbbox[idx, :, :, :, :] = label_sbbox
            batch_label_mbbox[idx, :, :, :, :] = label_mbbox
            batch_label_lbbox[idx, :, :, :, :] = label_lbbox

            zeros = np.zeros((1, 4), dtype=np.float32)
            sbboxes = sbboxes if len(sbboxes) != 0 else zeros
            mbboxes = mbboxes if len(mbboxes) != 0 else zeros
            lbboxes = lbboxes if len(lbboxes) != 0 else zeros
            temp_batch_sbboxes.append(sbboxes)
            temp_batch_mbboxes.append(mbboxes)
            temp_batch_lbboxes.append(lbboxes)
            max_sbbox_per_img = max(max_sbbox_per_img, len(sbboxes))
            max_mbbox_per_img = max(max_mbbox_per_img, len(mbboxes))
            max_lbbox_per_img = max(max_lbbox_per_img, len(lbboxes))
            imgpath_batch.append(imgpath)
            orishape_batch.append(ori_shape)

        batch_sbboxes = np.array(
            [np.concatenate([sbboxes, np.zeros((max_sbbox_per_img + 1 - len(sbboxes), 4), dtype=np.float32)], axis=0)
             for sbboxes in temp_batch_sbboxes])
        batch_mbboxes = np.array(
            [np.concatenate([mbboxes, np.zeros((max_mbbox_per_img + 1 - len(mbboxes), 4), dtype=np.float32)], axis=0)
             for mbboxes in temp_batch_mbboxes])
        batch_lbboxes = np.array(
            [np.concatenate([lbboxes, np.zeros((max_lbbox_per_img + 1 - len(lbboxes), 4), dtype=np.float32)], axis=0)
             for lbboxes in temp_batch_lbboxes])
        return torch.from_numpy(np.array(batch_image).transpose((0, 3, 1, 2)).astype(np.float32)), \
               imgpath_batch, \
               torch.from_numpy(np.array(orishape_batch).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_sbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_mbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_label_lbbox).astype(np.float32)), \
               torch.from_numpy(np.array(batch_sbboxes).astype(np.float32)), \
               torch.from_numpy(np.array(batch_mbboxes).astype(np.float32)), \
               torch.from_numpy(np.array(batch_lbboxes).astype(np.float32))