예제 #1
0
 def _init_metrics(self):
     if self.mode == 'eval':
         if self.cfg.metric == 'COCO':
             mask_resolution = self.model.mask_post_process.mask_resolution if hasattr(
                 self.model, 'mask_post_process') else None
             self._metrics = [
                 COCOMetric(
                     anno_file=self.dataset.get_anno(),
                     with_background=self.cfg.with_background,
                     mask_resolution=mask_resolution)
             ]
         elif self.cfg.metric == 'VOC':
             self._metrics = [
                 VOCMetric(
                     anno_file=self.dataset.get_anno(),
                     with_background=self.cfg.with_background,
                     class_num=self.cfg.num_classes,
                     map_type=self.cfg.map_type)
             ]
         else:
             logger.warn("Metric not support for metric type {}".format(
                 self.cfg.metric))
             self._metrics = []
     else:
         self._metrics = []
예제 #2
0
def eval():
    dataset = reader_cfg['EvalDataset']
    val_loader = create('TestReader')(dataset,
                                      reader_cfg['worker_num'],
                                      return_list=True)

    place = paddle.CUDAPlace(0) if args.devices == 'gpu' else paddle.CPUPlace()
    exe = paddle.static.Executor(place)

    val_program, feed_target_names, fetch_targets = paddle.fluid.io.load_inference_model(
        args.model_dir,
        exe,
        model_filename=args.model_filename,
        params_filename=args.params_filename)
    clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}

    anno_file = dataset.get_anno()
    metric = COCOMetric(anno_file=anno_file,
                        clsid2catid=clsid2catid,
                        bias=0,
                        IouType='bbox')
    for batch_id, data in enumerate(val_loader):
        data_new = {k: np.array(v) for k, v in data.items()}
        outs = exe.run(val_program,
                       feed={
                           'image': data['image'],
                           'im_shape': data['im_shape'],
                           'scale_factor': data['scale_factor']
                       },
                       fetch_list=fetch_targets,
                       return_numpy=False)
        res = {}
        for out in outs:
            v = np.array(out)
            if len(v.shape) > 1:
                res['bbox'] = v
            else:
                res['bbox_num'] = v

        metric.update(data_new, res)
        if batch_id % 100 == 0:
            print('Eval iter:', batch_id)
    metric.accumulate()
    metric.log()
    metric.reset()
예제 #3
0
    def _init_metrics(self, validate=False):
        if self.mode == 'test' or (self.mode == 'train' and not validate):
            self._metrics = []
            return
        classwise = self.cfg['classwise'] if 'classwise' in self.cfg else False
        if self.cfg.metric == 'COCO':
            # TODO: bias should be unified
            bias = self.cfg['bias'] if 'bias' in self.cfg else 0
            output_eval = self.cfg['output_eval'] \
                if 'output_eval' in self.cfg else None
            save_prediction_only = self.cfg['save_prediction_only'] \
                if 'save_prediction_only' in self.cfg else False

            # pass clsid2catid info to metric instance to avoid multiple loading
            # annotation file
            clsid2catid = {v: k for k, v in self.dataset.catid2clsid.items()} \
                                if self.mode == 'eval' else None

            # when do validation in train, annotation file should be get from
            # EvalReader instead of self.dataset(which is TrainReader)
            anno_file = self.dataset.get_anno()
            if self.mode == 'train' and validate:
                eval_dataset = self.cfg['EvalDataset']
                eval_dataset.check_or_download_dataset()
                anno_file = eval_dataset.get_anno()

            self._metrics = [
                COCOMetric(anno_file=anno_file,
                           clsid2catid=clsid2catid,
                           classwise=classwise,
                           output_eval=output_eval,
                           bias=bias,
                           save_prediction_only=save_prediction_only)
            ]
        elif self.cfg.metric == 'VOC':
            self._metrics = [
                VOCMetric(label_list=self.dataset.get_label_list(),
                          class_num=self.cfg.num_classes,
                          map_type=self.cfg.map_type,
                          classwise=classwise)
            ]
        elif self.cfg.metric == 'WiderFace':
            multi_scale = self.cfg.multi_scale_eval if 'multi_scale_eval' in self.cfg else True
            self._metrics = [
                WiderFaceMetric(image_dir=os.path.join(
                    self.dataset.dataset_dir, self.dataset.image_dir),
                                anno_file=self.dataset.get_anno(),
                                multi_scale=multi_scale)
            ]
        else:
            logger.warn("Metric not support for metric type {}".format(
                self.cfg.metric))
            self._metrics = []
예제 #4
0
 def _init_metrics(self):
     if self.cfg.metric == 'COCO':
         self._metrics = [COCOMetric(anno_file=self.dataset.get_anno())]
     elif self.cfg.metric == 'VOC':
         self._metrics = [
             VOCMetric(
                 anno_file=self.dataset.get_anno(),
                 class_num=self.cfg.num_classes,
                 map_type=self.cfg.map_type)
         ]
     else:
         logger.warn("Metric not support for metric type {}".format(
             self.cfg.metric))
         self._metrics = []
예제 #5
0
 def _init_metrics(self):
     if self.mode == 'test':
         self._metrics = []
         return
     if self.cfg.metric == 'COCO':
         # TODO: bias should be unified
         bias = 1 if 'bias' in self.cfg else 0
         self._metrics = [
             COCOMetric(anno_file=self.dataset.get_anno(), bias=bias)
         ]
     elif self.cfg.metric == 'VOC':
         self._metrics = [
             VOCMetric(anno_file=self.dataset.get_anno(),
                       class_num=self.cfg.num_classes,
                       map_type=self.cfg.map_type)
         ]
     else:
         logger.warn("Metric not support for metric type {}".format(
             self.cfg.metric))
         self._metrics = []
예제 #6
0
def eval_function(exe, compiled_test_program, test_feed_names,
                  test_fetch_list):
    clsid2catid = {v: k for k, v in dataset.catid2clsid.items()}

    anno_file = dataset.get_anno()
    metric = COCOMetric(anno_file=anno_file,
                        clsid2catid=clsid2catid,
                        bias=1,
                        IouType='bbox')
    for batch_id, data in enumerate(val_loader):
        data_new = {k: np.array(v) for k, v in data.items()}
        outs = exe.run(compiled_test_program,
                       feed={
                           'image': data['image'],
                           'im_shape': data['im_shape'],
                           'scale_factor': data['scale_factor']
                       },
                       fetch_list=test_fetch_list,
                       return_numpy=False)
        res = {}
        for out in outs:
            v = np.array(out)
            if len(v.shape) > 1:
                res['bbox'] = v
            else:
                res['bbox_num'] = v

        metric.update(data_new, res)
        if batch_id % 100 == 0:
            print('Eval iter:', batch_id)
    metric.accumulate()
    metric.log()
    map_res = metric.get_results()
    metric.reset()
    return map_res['bbox'][0]