Beispiel #1
0
 def _do_python_eval(self, _coco):
     coco_dt = _coco.loadRes(self._result_file)
     coco_eval = COCOeval(_coco, coco_dt)
     coco_eval.params.useSegm = False
     coco_eval.evaluate()
     coco_eval.accumulate()
     self._print_detection_metrics(coco_eval)
def _do_segmentation_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'segm')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'segmentation_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
def _do_detection_eval(json_dataset, res_file, output_dir):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.evaluate()
    coco_eval.accumulate()
    _log_detection_eval_metrics(json_dataset, coco_eval)
    eval_file = os.path.join(output_dir, 'detection_results.pkl')
    save_object(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    return coco_eval
Beispiel #4
0
 def _do_detection_eval(self, res_file, output_dir):
   ann_type = 'bbox'
   coco_dt = self._COCO.loadRes(res_file)
   coco_eval = COCOeval(self._COCO, coco_dt)
   coco_eval.params.useSegm = (ann_type == 'segm')
   coco_eval.evaluate()
   coco_eval.accumulate()
   self._print_detection_eval_metrics(coco_eval)
   eval_file = osp.join(output_dir, 'detection_results.pkl')
   with open(eval_file, 'wb') as fid:
     pickle.dump(coco_eval, fid, pickle.HIGHEST_PROTOCOL)
   print('Wrote COCO eval results to: {}'.format(eval_file))
Beispiel #5
0
def evaluate_coco(model, dataset, coco, config, eval_type="bbox", limit=None, image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    """
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]
        
    # Get corresponding COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    for i, image_id in enumerate(image_ids):
        if i%10==0:
            print('Processed %d images'%i )
        # Load image
        image = dataset.load_image(image_id)
        # Run detection
        t = time.time()
        r = inference(image, model, config)
        t_prediction += (time.time() - t)

        # Convert results to COCO format
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"], r["masks"])
        results.extend(image_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = coco_image_ids
    # Only evaluate for person.
    cocoEval.params.catIds = coco.getCatIds(catNms=['person']) 
    cocoEval.evaluate()
    a=cocoEval.accumulate()
    b=cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
def _do_eval(res_file, output_dir,_COCO,classes):
## The function is borrowed from https://github.com/rbgirshick/fast-rcnn/ and changed
        ann_type = 'bbox'
        coco_dt = _COCO.loadRes(res_file)
        coco_eval = COCOeval(_COCO, coco_dt)
        coco_eval.params.useSegm = (ann_type == 'segm')
        coco_eval.evaluate()
        coco_eval.accumulate()
        _print_eval_metrics(coco_eval,classes)
        # Write the result file
        eval_file = osp.join(output_dir)
        eval_result = {}
        eval_result['precision'] = coco_eval.eval['precision']
        eval_result['recall'] = coco_eval.eval['recall']
        sio.savemat(eval_file,eval_result)
        print 'Wrote COCO eval results to: {}'.format(eval_file)
def evaluate():
    cocoGt = COCO('annotations.json')
    cocoDt = cocoGt.loadRes('detections.json')
    cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Beispiel #8
0
def coco_evaluate(json_dataset, res_file, image_ids):
    coco_dt = json_dataset.COCO.loadRes(str(res_file))
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval
def cocoval(detected_json):
    eval_json = config.eval_json
    eval_gt = COCO(eval_json)

    eval_dt = eval_gt.loadRes(detected_json)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='bbox')

    # cocoEval.params.imgIds = eval_gt.getImgIds()
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Beispiel #10
0
 def evaluate_detections(self, all_boxes, output_dir=None):
     resFile = self._write_coco_results_file(all_boxes)
     cocoGt = self._annotations
     cocoDt = cocoGt.loadRes(resFile)
     # running evaluation
     cocoEval = COCOeval(cocoGt,cocoDt)
     # useSegm should default to 0
     #cocoEval.params.useSegm = 0
     cocoEval.evaluate()
     cocoEval.accumulate()
     cocoEval.summarize()
Beispiel #11
0
	def __init__(self, dataset_json, preds_json):
		# load dataset ground truths
		self.dataset = COCO(dataset_json)
		category_ids = self.dataset.getCatIds()
		categories = [x['name'] for x in self.dataset.loadCats(category_ids)]
		self.category_to_id_map = dict(zip(categories, category_ids))
		self.classes = ['__background__'] + categories
		self.num_classes = len(self.classes)

		# load predictions
		self.preds = self.dataset.loadRes(preds_json)
		self.coco_eval = COCOeval(self.dataset, self.preds, 'segm')
		self.coco_eval.params.maxDets = [1, 50, 255]
Beispiel #12
0
    def compute_ap(self):
        coco_res = self.loader.coco.loadRes(self.filename)

        cocoEval = COCOeval(self.loader.coco, coco_res)
        cocoEval.params.imgIds = self.loader.get_filenames()
        cocoEval.params.useSegm = False

        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        return cocoEval
Beispiel #13
0
 def _do_coco_eval(self, dtFile, output_dir):
     """
     Evaluate using COCO API
     """
     if self._image_set == 'train' or self._image_set == 'val':
         cocoGt = self._coco[0]
         cocoDt = COCO(dtFile)
         E = COCOeval(cocoGt, cocoDt)
         E.evaluate()
         E.accumulate()
         E.summarize()
    def _update(self):
        """Use coco to get real scores. """
        if not self._current_id == len(self._img_ids):
            warnings.warn(
                'Recorded {} out of {} validation images, incompelete results'.format(
                    self._current_id, len(self._img_ids)))
        import json
        try:
            with open(self._filename, 'w') as f:
                json.dump(self._results, f)
        except IOError as e:
            raise RuntimeError("Unable to dump json file, ignored. What(): {}".format(str(e)))

        pred = self.dataset.coco.loadRes(self._filename)
        gt = self.dataset.coco
        # lazy import pycocotools
        try_import_pycocotools()
        from pycocotools.cocoeval import COCOeval
        coco_eval = COCOeval(gt, pred, 'bbox')
        coco_eval.evaluate()
        coco_eval.accumulate()
        self._coco_eval = coco_eval
        return coco_eval
def _do_keypoint_eval(json_dataset, res_file, output_dir):
    ann_type = 'keypoints'
    imgIds = json_dataset.COCO.getImgIds()
    imgIds.sort()
    coco_dt = json_dataset.COCO.loadRes(res_file)
    coco_eval = COCOeval(json_dataset.COCO, coco_dt, ann_type)
    coco_eval.params.imgIds = imgIds
    coco_eval.evaluate()
    coco_eval.accumulate()
    eval_file = os.path.join(output_dir, 'keypoint_results.pkl')
    robust_pickle_dump(coco_eval, eval_file)
    logger.info('Wrote json eval results to: {}'.format(eval_file))
    coco_eval.summarize()
Beispiel #16
0
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    # Get corresponding COCO image IDs.
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]

    t_prediction = 0
    t_start = time.time()

    results = []
    for i, image_id in enumerate(image_ids):
        # Load image
        image = dataset.load_image(image_id)

        # Run detection
        t = time.time()
        r = model.detect([image], verbose=0)[0]
        t_prediction += (time.time() - t)

        # Convert results to COCO format
        # Cast masks to uint8 because COCO tools errors out on bool
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"],
                                           r["masks"].astype(np.uint8))
        results.extend(image_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, eval_type)
    cocoEval.params.imgIds = coco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
Beispiel #17
0
def validate(val_loader, model, i, silence=False):
    batch_time = AverageMeter()
    coco_gt = val_loader.dataset.coco
    coco_pred = COCO()
    coco_pred.dataset['images'] = [img for img in coco_gt.datasets['images']]
    coco_pred.dataset['categories'] = copy.deepcopy(coco_gt.dataset['categories'])
    id = 0

    # switch to evaluate mode
    model.eval()

    end = time.time()
    for i, (inputs, anns) in enumerate(val_loader):

        # forward images one by one (TODO: support batch mode later, or
        # multiprocess)
        for j, input in enumerate(inputs):
            input_anns= anns[j] # anns of this input
            gt_bbox= np.vstack([ann['bbox'] + [ann['ordered_id']] for ann in input_anns])
            im_info= [[input.size(1), input.size(2),
                        input_anns[0]['scale_ratio']]]
            input_var= Variable(input.unsqueeze(0),
                                 requires_grad=False).cuda()

            cls_prob, bbox_pred, rois = model(input_var, im_info)
            scores, pred_boxes = model.interpret_outputs(cls_prob, bbox_pred, rois, im_info)
            print(scores, pred_boxes)
            # for i in range(scores.shape[0]):


        # measure elapsed time
        batch_time.update(time.time() - end)
        end= time.time()

    coco_pred.createIndex()
    coco_eval = COCOeval(coco_gt, coco_pred, 'bbox')
    coco_eval.params.imgIds= sorted(coco_gt.getImgIds())
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()

    print('iter: [{0}] '
          'Time {batch_time.avg:.3f} '
          'Val Stats: {1}'
          .format(i, coco_eval.stats,
                  batch_time=batch_time))

    return coco_eval.stats[0]
def evaluate_predictions_on_coco(
    coco_gt, coco_results, json_result_file, iou_type="bbox"
):
    import json

    with open(json_result_file, "w") as f:
        json.dump(coco_results, f)

    from pycocotools.cocoeval import COCOeval

    coco_dt = coco_gt.loadRes(str(json_result_file))
    # coco_dt = coco_gt.loadRes(coco_results)
    coco_eval = COCOeval(coco_gt, coco_dt, iou_type)
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval
def calc_coco_metrics(coco_annotations, predictions, classes):
  annotations = ObjectDetectorJson.convert_coco_to_toolbox_format(coco_annotations, classes)
  detections = []
  for annotation, prediction in zip(annotations, predictions):
    width, height = annotation['image_size']
    image_id = annotation['image_id']

    for obj_id, obj in enumerate(prediction):
      label = int(obj[1])
      score = float(obj[2])
      if obj_id != 0 and score == 0:  # At least one prediction must be (COCO API issue)
        continue
      bbox = (obj[3:]).tolist()
      bbox[::2] = [width * i for i in bbox[::2]]
      bbox[1::2] = [height * i for i in bbox[1::2]]

      xmin, ymin, xmax, ymax = bbox
      w_bbox = round(xmax - xmin, 1)
      h_bbox = round(ymax - ymin, 1)
      xmin, ymin = round(xmin, 1), round(ymin, 1)

      coco_det = {}
      coco_det['image_id'] = image_id
      coco_det['category_id'] = label
      coco_det['bbox'] = [xmin, ymin, w_bbox, h_bbox]
      coco_det['score'] = score
      detections.append(coco_det)

  coco_dt = coco_annotations.loadRes(detections)
  img_ids = sorted(coco_annotations.getImgIds())
  coco_eval = COCOeval(coco_annotations, coco_dt, 'bbox')
  coco_eval.params.imgIds = img_ids
  coco_eval.evaluate()
  coco_eval.accumulate()
  coco_eval.summarize()

  metrics = {}
  for metric_name, value in zip(METRICS_NAMES, coco_eval.stats):
    metrics[metric_name] = value

  return metrics
Beispiel #20
0
    dets = pd.read_csv(pred_path)

    for detid, row in tqdm(dets.iterrows()):

        entry = get_entry_dict()

        entry['image_id'] = imgIds.index(row['image_name'])
        entry['id'] = detid
        entry['bbox'] = [row['x1'], row['y1'], row['x2'], row['y2']]
        entry['area'] = (row['x2'] - row['x1']) * (row['y2'] - row['y1'])
        entry['score'] = row['score']

        dt_json.append(entry)

    return dt_json


if __name__ == "__main__":

    gt_json, dt_json = get_gt_json(), get_dt_json()

    from coco_custom import CustomCOCO
    gt_coco_format = CustomCOCO(gt_json)
    dt_coco_format = CustomCOCO(dt_json)

    # running evaluation
    cocoEval = COCOeval(gt_coco_format, dt_coco_format, iouType='bbox')
    cocoEval.params.imgIds = range(0, 100)
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
def evaluate_coco(generator, model, threshold=0.05):
    # start collecting results
    results = []
    image_ids = []
    for i in range(generator.size()):
        image = generator.load_image(i)
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        # run network
        _, _, detections = model.predict_on_batch(np.expand_dims(image, axis=0))

        # clip to image shape
        detections[:, :, 0] = np.maximum(0, detections[:, :, 0])
        detections[:, :, 1] = np.maximum(0, detections[:, :, 1])
        detections[:, :, 2] = np.minimum(image.shape[1], detections[:, :, 2])
        detections[:, :, 3] = np.minimum(image.shape[0], detections[:, :, 3])

        # correct boxes for image scale
        detections[0, :, :4] /= scale

        # change to (x, y, w, h) (MS COCO standard)
        detections[:, :, 2] -= detections[:, :, 0]
        detections[:, :, 3] -= detections[:, :, 1]

        # compute predicted labels and scores
        for detection in detections[0, ...]:
            positive_labels = np.where(detection[4:] > threshold)[0]

            # append detections for each positively labeled class
            for label in positive_labels:
                image_result = {
                    'image_id'    : generator.image_ids[i],
                    'category_id' : generator.label_to_coco_label(label),
                    'score'       : float(detection[4 + label]),
                    'bbox'        : (detection[:4]).tolist(),
                }

                # append detection to results
                results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[i])

        # print progress
        print('{}/{}'.format(i, generator.size()), end='\r')

    if not len(results):
        return

    # write output
    json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
    json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Beispiel #22
0
def main(args):

    #pdb.set_trace()
    mscoco = dataset_feeder.MscocoFeeder(args,args.dataset_meta[0])
    nsamples = len(mscoco.samples)
    nclasses = len(mscoco.cat_names)+ 1 #categories + background
    batch_size_per_gpu = args.batch_size
    batch_size = args.num_gpus*batch_size_per_gpu
    pretrained_model_path = args.pretrained_model_path 
    smallest_ratio = [int(x) for x in args.smallest_ratio.split(',')]
    tfobj = preprocess.TFdata(args, nsamples, ssd_augmenter_vgg.augment)

    if args.minival_ids_file is not None:
        minival_ids = np.loadtxt(args.minival_ids_file, dtype=int).tolist()



    ANCHORS_MAP, NUM_ANCHORS = ssd_common.get_anchors(ANCHORS_STRIDE,ANCHORS_ASPECT_RATIOS,MIN_SIZE_RATIO, MAX_SIZE_RATIO, INPUT_DIM, smallest_ratio)
    #get_sample = mscoco.get_samples_fn()
    graph = tf.Graph()
    with graph.as_default(), tf.device('/cpu:0'):

        dataset = tfobj.create_tfdataset([mscoco], batch_size, args.num_epochs)
        iterator = dataset.make_one_shot_iterator()
        image_id, images, classes, boxes, scale, translation, filename = iterator.get_next()

        split_images=tf.split(images, args.num_gpus)
        split_classes = tf.split(classes, args.num_gpus)
        split_boxes = tf.split(boxes, args.num_gpus)

        pred_classes_array= []
        pred_boxes_array= []

        with tf.variable_scope(tf.get_variable_scope()):
            for i in range(args.num_gpus):
                with tf.name_scope('tower_%d' % (i)), tf.device('/gpu:%d' % i):
                    pred_classes, pred_boxes  = ssd300.net(split_images[i], nclasses, NUM_ANCHORS, False, 'my_vgg3')
                    pred_classes_array.append(pred_classes)
                    pred_boxes_array.append(pred_boxes)
                    tf.get_variable_scope().reuse_variables()


        pred_classes_array = tf.concat(values=pred_classes_array,axis=0)
        pred_boxes_array = tf.concat(values=pred_boxes_array,axis=0)

        topk_scores, topk_labels, topk_bboxes, _ = ssd300.detect(pred_classes_array, pred_boxes_array, ANCHORS_MAP, batch_size, nclasses, args.confidence_threshold)

        global_init=tf.global_variables_initializer() # initializes all variables in the graph

        variables_to_restore = tf.contrib.framework.get_variables_to_restore()
        #variables_to_restore = [v for v  in tf.trainable_variables()]
        #other_variables = [v for v in tf.global_variables() if (v not in variables_to_restore and 'batch_normalization' in v.name and 'moving' in v.name)]
        #variables_to_restore.extend(other_variables)
        var_load_fn = tf.contrib.framework.assign_from_checkpoint_fn(pretrained_model_path,  variables_to_restore)


    #pdb.set_trace()
    with tf.Session(graph=graph) as sess:
        print('Start counting time ..')
        sess.run(global_init)
        var_load_fn(sess)
        #classes , boxes, split_classes, split_boxes = sess.run([classes,boxes,split_classes,split_boxes])
        nbatches = int(nsamples/batch_size)
        
        all_detections = []
        all_image_ids = []
        for bb in range(nbatches):
            #if bb>0 and (bb%100==0): print('batch = ',bb) 
            image_ids,imgnames,scores,classes,boxes = sess.run([image_id, filename,topk_scores, topk_labels, topk_bboxes]) 
            
            num_images = len(imgnames)
            for i in range(num_images):
                file_name = imgnames[i][0].decode('ascii') 

                if (args.minival_ids_file is not None) and (image_ids[i][0] not in minival_ids):
                    continue

                # print(file_name)
                num_detections = len(classes[i])

                input_image = np.array(Image.open(file_name))
                h, w = input_image.shape[:2]

                # COCO evaluation is based on per detection
                for d in range(num_detections):
                    box = boxes[i][d]
                    box = box * [float(w), float(h), float(w), float(h)]
                    box[0] = round(np.clip(box[0], 0, w),1)
                    box[1] = round(np.clip(box[1], 0, h),1)
                    box[2] = round(np.clip(box[2], 0, w),1)
                    box[3] = round(np.clip(box[3], 0, h),1)
                    box[2] = round(box[2] - box[0], 1)
                    box[3] = round(box[3] - box[1], 1)
                    result = {
                        "image_id": int(image_ids[i][0]),
                        "category_id": int(mscoco.class_id_to_category_id[classes[i][d]]),
                        "bbox": box.tolist(),
                        "score": float(scores[i][d])
                    }
                    all_detections.append(result)

                all_image_ids.append(image_ids[i][0])

        print('batch = ',bb)
        print('Finished prediction ... ')

        if args.output_file is not None:
            pdb.set_trace()
            fid=open(args.output_file,'wt')
            json.dump(all_detections, fid)
            fid.close()

        elif len(all_detections) > 0:
            annotation_file = os.path.join(args.dataset_dir, "annotations","instances_" + args.dataset_meta[0] + ".json") #"instances_" + DATASET_META + ".json")
            coco = COCO(annotation_file)

            coco_results = coco.loadRes(all_detections)

            cocoEval = COCOeval(coco, coco_results, "bbox")
            cocoEval.params.imgIds = all_image_ids
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
Beispiel #23
0
def coco_eval(result_files,
              result_types,
              coco,
              max_dets=(100, 300, 1000),
              classwise=False):
    for res_type in result_types:
        assert res_type in [
            'proposal', 'proposal_fast', 'bbox', 'segm', 'keypoints'
        ]

    if mmcv.is_str(coco):
        coco = COCO(coco)
    assert isinstance(coco, COCO)

    if result_types == ['proposal_fast']:
        ar = fast_eval_recall(result_files, coco, np.array(max_dets))
        for i, num in enumerate(max_dets):
            print('AR@{}\t= {:.4f}'.format(num, ar[i]))
        return

    for res_type in result_types:
        if isinstance(result_files, str):
            result_file = result_files
        elif isinstance(result_files, dict):
            result_file = result_files[res_type]
        else:
            assert TypeError('result_files must be a str or dict')
        assert result_file.endswith('.json')

        coco_dets = coco.loadRes(result_file)
        img_ids = coco.getImgIds()
        iou_type = 'bbox' if res_type == 'proposal' else res_type
        cocoEval = COCOeval(coco, coco_dets, iou_type)
        cocoEval.params.imgIds = img_ids
        if res_type == 'proposal':
            cocoEval.params.useCats = 0
            cocoEval.params.maxDets = list(max_dets)
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()

        if classwise:
            # Compute per-category AP
            # from https://github.com/facebookresearch/detectron2/blob/03064eb5bafe4a3e5750cc7a16672daf5afe8435/detectron2/evaluation/coco_evaluation.py#L259-L283 # noqa
            precisions = cocoEval.eval['precision']
            catIds = coco.getCatIds()
            # precision has dims (iou, recall, cls, area range, max dets)
            assert len(catIds) == precisions.shape[2]

            results_per_category = []
            for idx, catId in enumerate(catIds):
                # area range index 0: all area ranges
                # max dets index -1: typically 100 per image
                nm = coco.loadCats(catId)[0]
                precision = precisions[:, :, idx, 0, -1]
                precision = precision[precision > -1]
                ap = np.mean(precision) if precision.size else float('nan')
                results_per_category.append(
                    ('{}'.format(nm['name']),
                     '{:0.3f}'.format(float(ap * 100))))

            N_COLS = min(6, len(results_per_category) * 2)
            results_flatten = list(itertools.chain(*results_per_category))
            headers = ['category', 'AP'] * (N_COLS // 2)
            results_2d = itertools.zip_longest(
                *[results_flatten[i::N_COLS] for i in range(N_COLS)])
            table_data = [headers]
            table_data += [result for result in results_2d]
            table = AsciiTable(table_data)
            print(table.table)
Beispiel #24
0
dataDir='..'
resFile='%s/results/instances_%s_fake%s100_results.json'
resFile='/home/maheenrashid/Downloads/deep_proposals/coco-master/results/instances_val2014_maheen_results.json'
# resFile = resFile%(dataDir, dataType, annType)
print len(resFile);
cocoDt=cocoGt.loadRes(resFile)


imgIds=sorted(cocoGt.getImgIds())
imgIds=imgIds[:5000]
print cocoDt[0],cocoDt.keys();
raw_input();

# print imgIds

cocoEval = COCOeval(cocoGt,cocoDt)
cocoEval.params.imgIds  = imgIds
cocoEval.params.useSegm = 0
cocoEval.params.useCats = 0;
cocoEval.params.maxDets = range(1000);
cocoEval.params.recThrs = [1];

# imgIds     - [all] N img ids to use for evaluation
    #  catIds     - [all] K cat ids to use for evaluation
    #  iouThrs    - [.5:.05:.95] T=10 IoU thresholds for evaluation
    #  recThrs    - [0:.01:1] R=101 recall thresholds for evaluation
    #  areaRng    - [...] A=4 object area ranges for evaluation
    #  maxDets    - [1 10 100] M=3 thresholds on max detections per image
    #  useSegm    - [1] if true evaluate against ground-truth segments
    #  useCats    - [1] if true use category labels for evaluation    # Note: if useSegm=0 the evaluation is run on bounding boxes.
    # Note: if useCats=0 category labels are ignored as in proposal scoring.
    def evaluate(self, result_json, cls_ids, image_ids, gt_json=None):
        if self._split == "testdev":
            return None

        coco = self._coco if gt_json is None else COCO(gt_json)

        eval_ids = [self._coco_eval_ids[image_id] for image_id in image_ids]
        cat_ids  = [self._classes[cls_id] for cls_id in cls_ids]

        coco_dets = coco.loadRes(result_json)
        coco_eval = COCOeval(coco, coco_dets, "bbox")
        coco_eval.params.imgIds = eval_ids
        coco_eval.params.catIds = cat_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()
        coco_eval.evaluate_fd()
        coco_eval.accumulate_fd()
        coco_eval.summarize_fd()
        return coco_eval.stats[0], coco_eval.stats[12:]
Beispiel #26
0
    def evaluate(self, model, half=False, distributed=False):
        """
        COCO average precision (AP) Evaluation. Iterate inference on the test dataset
        and the results are evaluated by COCO API.
        Args:
            model : model object
        Returns:
            ap50_95 (float) : calculated COCO AP for IoU=50:95
            ap50 (float) : calculated COCO AP for IoU=50
        """
        if isinstance(model, apex.parallel.DistributedDataParallel):
            model = model.module

        model=model.eval()
        cuda = torch.cuda.is_available()
        if half:
            Tensor = torch.cuda.HalfTensor if cuda else torch.HalfTensor
        else:
            Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        ids = []
        data_dict = []
        img_num = 0

        indices = list(range(self.num_images))
        if distributed:
            dis_indices = indices[distributed_util.get_rank()::distributed_util.get_world_size()]
        else:
            dis_indices = indices
        progress_bar = tqdm if distributed_util.is_main_process() else iter
        num_classes = 80 if not self.voc else 20

        inference_time=0
        nms_time=0
        n_samples=len(dis_indices)-10

        for k, i in enumerate(progress_bar(dis_indices)):
            img, _, info_img, id_ = self.dataset[i]  # load a batch
            info_img = [float(info) for info in info_img]
            id_ = int(id_)
            ids.append(id_)
            with torch.no_grad():
                img = Variable(img.type(Tensor).unsqueeze(0))
                if k > 9:
                    start=time.time()

                if self.vis:
                    outputs,fuse_weights,fused_f = model(img)
                else:
                    outputs = model(img)

                if k > 9:
                    infer_end=time.time()
                    inference_time += (infer_end-start)

                outputs = postprocess(
                    outputs, num_classes, self.confthre, self.nmsthre)

                if k > 9:
                    nms_end=time.time()
                    nms_time +=(nms_end-infer_end)

                if outputs[0] is None:
                    continue
                outputs = outputs[0].cpu().data

            bboxes = outputs[:, 0:4]
            bboxes[:, 0::2] *= info_img[0] / self.img_size[0]
            bboxes[:, 1::2] *= info_img[1] / self.img_size[1]
            bboxes[:, 2] = bboxes[:,2] - bboxes[:,0]
            bboxes[:, 3] = bboxes[:,3] - bboxes[:,1]
            cls = outputs[:, 6]
            scores = outputs[:, 4]* outputs[:,5]
            for i in range(bboxes.shape[0]):
                label = self.dataset.class_ids[int(cls[i])]
                A = {"image_id": id_, "category_id": label, "bbox": bboxes[i].numpy().tolist(),
                 "score": scores[i].numpy().item(), "segmentation": []} # COCO json format
                data_dict.append(A)
            
            if self.vis:
                o_img,_,_,_  = self.dataset.pull_item(i)
                make_vis('COCO', i, o_img, fuse_weights, fused_f)
                class_names = self.dataset._classes
                make_pred_vis('COCO', i, o_img, class_names, bboxes, cls, scores)

            if DEBUG and distributed_util.is_main_process():
                o_img,_  = self.dataset.pull_item(i)
                class_names = self.dataset._classes
                make_pred_vis('COCO', i, o_img, class_names, bboxes, cls, scores)

        if distributed:
            distributed_util.synchronize()
            data_dict = _accumulate_predictions_from_multiple_gpus(data_dict)
            inference_time = torch.FloatTensor(1).type(Tensor).fill_(inference_time)
            nms_time = torch.FloatTensor(1).type(Tensor).fill_(nms_time)
            n_samples = torch.LongTensor(1).type(Tensor).fill_(n_samples)
            distributed_util.synchronize()
            torch.distributed.reduce(inference_time, dst=0)
            torch.distributed.reduce(nms_time, dst=0)
            torch.distributed.reduce(n_samples, dst=0)
            inference_time = inference_time.item()
            nms_time = nms_time.item()
            n_samples = n_samples.item()

        if not distributed_util.is_main_process():
            return 0, 0


        print('Main process Evaluating...')

        annType = ['segm', 'bbox', 'keypoints']
        a_infer_time = 1000*inference_time / (n_samples)
        a_nms_time= 1000*nms_time / (n_samples)

        print('Average forward time: %.2f ms, Average NMS time: %.2f ms, Average inference time: %.2f ms' %(a_infer_time, \
                a_nms_time, (a_infer_time+a_nms_time)))

        # Evaluate the Dt (detection) json comparing with the ground truth
        if len(data_dict) > 0:
            cocoGt = self.dataset.coco
            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
            if self.testset:
                json.dump(data_dict, open('yolov3_2017.json', 'w'))
                cocoDt = cocoGt.loadRes('yolov3_2017.json')
            else:
                _, tmp = tempfile.mkstemp()
                json.dump(data_dict, open(tmp, 'w'))
                cocoDt = cocoGt.loadRes(tmp)
            cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            return cocoEval.stats[0], cocoEval.stats[1]
        else:
            return 0, 0
Beispiel #27
0
def evaluate_coco(model, dataset, coco, eval_type="bbox", limit=0, image_ids=None):
    """Runs official COCO evaluation.
    dataset: A Dataset object with valiadtion data
    eval_type: "bbox" or "segm" for bounding box or segmentation evaluation
    limit: if not 0, it's the number of images to use for evaluation
    """
    # Pick COCO images from the dataset
    image_ids = image_ids or dataset.image_ids

    # Limit to a subset
    if limit:
        image_ids = image_ids[:limit]

    # Get corresponding COCO image IDs.
    print(len(image_ids))
    coco_image_ids = [dataset.image_info[id]["id"] for id in image_ids]
    kaggle_image_ids = [dataset.image_info[id]["path"].split('/')[-1].split('.')[0] for id in image_ids]
    
    t_prediction = 0
    t_start = time.time()

    rs, results = [], []
    prev_image = None
    for i, image_id in enumerate(image_ids):
        # Load image
        image = dataset.load_image(image_id)

        # Run detection
        t = time.time()
        if prev_image is None:
            prev_image = image
        r = model.detect([image, prev_image])[0]
        prev_image = image
        t_prediction += (time.time() - t)
        # rs.append(r)
        # create submission one by one
        print("detecting image with image_id = {} and kaggle_id = {}".format(image_id, kaggle_image_ids[i]))
        kaggle_results = transform_coco.transform_coco_results([copy.deepcopy(r)])
        transform_coco.create_submit_csv([kaggle_image_ids[i]], kaggle_results)

        # Convert results to COCO format
        image_results = build_coco_results(dataset, coco_image_ids[i:i + 1],
                                           r["rois"], r["class_ids"],
                                           r["scores"], r["masks"])
        results.extend(image_results)
        class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
               'bus', 'train', 'truck', 'boat', 'traffic light',
               'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
               'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
               'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
               'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
               'kite', 'baseball bat', 'baseball glove', 'skateboard',
               'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
               'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
               'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
               'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
               'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
               'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
               'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
               'teddy bear', 'hair drier', 'toothbrush']
        # print("{} {} {}".format(r['rois'].shape[0], r['masks'].shape[-1], r['class_ids'].shape[0]))
        visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
                            class_names, r['scores'])
        plt.show()
        plt.savefig('./coco2017_data/' + str(kaggle_image_ids[i]) + '.png')
    
    # Create submission for kaggle
    # kaggle_results = transform_coco.transform_coco_results(rs)
    # transform_coco.create_submit_csv(kaggle_image_ids, kaggle_results)

    # Load results. This modifies results with additional attributes.
    coco_results = coco.loadRes(results)

    # Evaluate
    cocoEval = COCOeval(coco, coco_results, "bbox")
    cocoEval.params.imgIds = coco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    cocoEval = COCOeval(coco, coco_results, "segm")
    cocoEval.params.imgIds = coco_image_ids
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    print("Prediction time: {}. Average {}/image".format(
        t_prediction, t_prediction / len(image_ids)))
    print("Total time: ", time.time() - t_start)
Beispiel #28
0
def benchmark_model(frozen_graph,
                    images_dir,
                    annotation_path,
                    batch_size=1,
                    image_shape=None,
                    num_images=4096,
                    tmp_dir='.benchmark_model_tmp_dir',
                    remove_tmp_dir=True,
                    output_path=None,
                    display_every=100,
                    use_synthetic=False,
                    num_warmup_iterations=50):
    """Computes accuracy and performance statistics

    Computes accuracy and performance statistics by executing over many images
    from the MSCOCO dataset defined by images_dir and annotation_path.

    Args
    ----
        frozen_graph: A GraphDef representing the object detection model to
            test.  Alternatively, a string representing the path to the saved
            frozen graph.
        images_dir: A string representing the path of the COCO images
            directory.
        annotation_path: A string representing the path of the COCO annotation
            file.
        batch_size: An integer representing the batch size to use when feeding
            images to the model.
        image_shape: An optional tuple of integers representing a fixed shape
            to resize all images before testing. For synthetic data the default
            image_shape is [600, 600, 3]
        num_images: An integer representing the number of images in the
            dataset to evaluate with.
        tmp_dir: A string representing the path where the function may create
            a temporary directory to store intermediate files.
        output_path: An optional string representing a path to store the
            statistics in JSON format.
        display_every: int, print log every display_every iteration
        num_warmup_iteration: An integer represtening number of initial iteration,
            that are not cover in performance statistics
    Returns
    -------
        statistics: A named dictionary of accuracy and performance statistics
        computed for the model.
    """
    if os.path.exists(tmp_dir):
        if not remove_tmp_dir:
            raise RuntimeError('Temporary directory exists; %s' % tmp_dir)
        subprocess.call(['rm', '-rf', tmp_dir])
    if batch_size > 1 and image_shape is None:
        raise RuntimeError(
            'Fixed image shape must be provided for batch size > 1')

    if not use_synthetic:
        coco = COCO(annotation_file=annotation_path)

        # get list of image ids to use for evaluation
        image_ids = coco.getImgIds()
        if num_images > len(image_ids):
            print(
                'Num images provided %d exceeds number in dataset %d, using %d images instead'
                % (num_images, len(image_ids), len(image_ids)))
            num_images = len(image_ids)
        image_ids = image_ids[0:num_images]

    # load frozen graph from file if string, otherwise must be GraphDef
    if isinstance(frozen_graph, str):
        frozen_graph_path = frozen_graph
        frozen_graph = tf.GraphDef()
        with open(frozen_graph_path, 'rb') as f:
            frozen_graph.ParseFromString(f.read())
    elif not isinstance(frozen_graph, tf.GraphDef):
        raise TypeError('Expected frozen_graph to be GraphDef or str')

    tf_config = tf.ConfigProto()
    tf_config.gpu_options.allow_growth = True

    coco_detections = []  # list of all bounding box detections in coco format
    runtimes = []  # list of runtimes for each batch
    image_counts = []  # list of number of images in each batch

    with tf.Graph().as_default() as tf_graph:
        with tf.Session(config=tf_config) as tf_sess:
            tf.import_graph_def(frozen_graph, name='')
            tf_input = tf_graph.get_tensor_by_name(INPUT_NAME + ':0')
            tf_boxes = tf_graph.get_tensor_by_name(BOXES_NAME + ':0')
            tf_classes = tf_graph.get_tensor_by_name(CLASSES_NAME + ':0')
            tf_scores = tf_graph.get_tensor_by_name(SCORES_NAME + ':0')
            tf_num_detections = tf_graph.get_tensor_by_name(
                NUM_DETECTIONS_NAME + ':0')

            # load batches from coco dataset
            for image_idx in range(0, num_images, batch_size):
                if use_synthetic:
                    if image_shape is None:
                        batch_images = np.random.randint(0,
                                                         256,
                                                         size=(batch_size, 600,
                                                               600, 3))
                    else:
                        batch_images = np.random.randint(
                            0,
                            256,
                            size=(batch_size, image_shape[0], image_shape[1],
                                  3))
                else:
                    batch_image_ids = image_ids[image_idx:image_idx +
                                                batch_size]
                    batch_images = []
                    batch_coco_images = []
                    # read images from file
                    for image_id in batch_image_ids:
                        coco_img = coco.imgs[image_id]
                        batch_coco_images.append(coco_img)
                        image_path = os.path.join(images_dir,
                                                  coco_img['file_name'])
                        image = _read_image(image_path, image_shape)
                        batch_images.append(image)

                # run num_warmup_iterations outside of timing
                if image_idx < num_warmup_iterations:
                    boxes, classes, scores, num_detections = tf_sess.run(
                        [tf_boxes, tf_classes, tf_scores, tf_num_detections],
                        feed_dict={tf_input: batch_images})
                else:
                    # execute model and compute time difference
                    t0 = time.time()
                    boxes, classes, scores, num_detections = tf_sess.run(
                        [tf_boxes, tf_classes, tf_scores, tf_num_detections],
                        feed_dict={tf_input: batch_images})
                    t1 = time.time()

                    # log runtime and image count
                    runtimes.append(float(t1 - t0))
                    if len(runtimes) % display_every == 0:
                        print("    step %d/%d, iter_time(ms)=%.4f" %
                              (len(runtimes),
                               (num_images + batch_size - 1) / batch_size,
                               np.mean(runtimes) * 1000))
                    image_counts.append(len(batch_images))

                if not use_synthetic:
                    # add coco detections for this batch to running list
                    batch_coco_detections = []
                    for i, image_id in enumerate(batch_image_ids):
                        image_width = batch_coco_images[i]['width']
                        image_height = batch_coco_images[i]['height']

                        for j in range(int(num_detections[i])):
                            bbox = boxes[i][j]
                            bbox_coco_fmt = [
                                bbox[1] * image_width,  # x0
                                bbox[0] * image_height,  # x1
                                (bbox[3] - bbox[1]) * image_width,  # width
                                (bbox[2] - bbox[0]) * image_height,  # height
                            ]

                            coco_detection = {
                                'image_id': image_id,
                                'category_id': int(classes[i][j]),
                                'bbox': bbox_coco_fmt,
                                'score': float(scores[i][j])
                            }

                            coco_detections.append(coco_detection)

    if not use_synthetic:
        # write coco detections to file
        subprocess.call(['mkdir', '-p', tmp_dir])
        coco_detections_path = os.path.join(tmp_dir, 'coco_detections.json')
        with open(coco_detections_path, 'w') as f:
            json.dump(coco_detections, f)

        # compute coco metrics
        cocoDt = coco.loadRes(coco_detections_path)
        eval = COCOeval(coco, cocoDt, 'bbox')
        eval.params.imgIds = image_ids

        eval.evaluate()
        eval.accumulate()
        eval.summarize()

        statistics = {
            'map': eval.stats[0],
            'avg_latency_ms': 1000.0 * np.mean(runtimes),
            'avg_throughput_fps': np.sum(image_counts) / np.sum(runtimes),
            'runtimes_ms': [1000.0 * r for r in runtimes]
        }
    else:
        statistics = {
            'avg_latency_ms': 1000.0 * np.mean(runtimes),
            'avg_throughput_fps': np.sum(image_counts) / np.sum(runtimes),
            'runtimes_ms': [1000.0 * r for r in runtimes]
        }

    if output_path is not None:
        subprocess.call(['mkdir', '-p', os.path.dirname(output_path)])
        with open(output_path, 'w') as f:
            json.dump(statistics, f)
    subprocess.call(['rm', '-rf', tmp_dir])

    return statistics
Beispiel #29
0
    from pycocotools.cocoeval import COCOeval
    from easydict import EasyDict as edict
    from apis import get_detector
    from tqdm import tqdm
    import json

    opt = edict()
    _coco = COCO(sys.argv[1] + '/annotations/instances_val2017.json')
    # _coco = COCO(sys.argv[1]+'/annotations/person_keypoints_val2017.json')
    opt.detector = sys.argv[2]
    opt.gpus = [0] if torch.cuda.device_count() >= 1 else [-1]
    opt.device = torch.device("cuda:" +
                              str(opt.gpus[0]) if opt.gpus[0] >= 0 else "cpu")
    image_ids = sorted(_coco.getImgIds())
    det_model = get_detector(opt)
    dets = []
    for entry in tqdm(_coco.loadImgs(image_ids)):
        abs_path = os.path.join(sys.argv[1], 'val2017', entry['file_name'])
        det = det_model.check_detector(abs_path)
        if det:
            dets += det
    result_file = 'results.json'
    json.dump(dets, open(result_file, 'w'))

    coco_results = _coco.loadRes(result_file)
    coco_eval = COCOeval(_coco, coco_results, 'bbox')
    coco_eval.params.imgIds = image_ids  # score only ids we've used
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Beispiel #30
0
    def evaluate(self, results, save_dir, rank=-1):
        results_json = self.results2json(results)
        if len(results_json) == 0:
            warnings.warn(
                "Detection result is empty! Please check whether "
                "training set is too small (need to increase val_interval "
                "in config and train more epochs). Or check annotation "
                "correctness."
            )
            empty_eval_results = {}
            for key in self.metric_names:
                empty_eval_results[key] = 0
            return empty_eval_results
        json_path = os.path.join(save_dir, "results{}.json".format(rank))
        json.dump(results_json, open(json_path, "w"))
        coco_dets = self.coco_api.loadRes(json_path)
        coco_eval = COCOeval(
            copy.deepcopy(self.coco_api), copy.deepcopy(coco_dets), "bbox"
        )
        coco_eval.evaluate()
        coco_eval.accumulate()

        # use logger to log coco eval results
        redirect_string = io.StringIO()
        with contextlib.redirect_stdout(redirect_string):
            coco_eval.summarize()
        logger.info("\n" + redirect_string.getvalue())

        # print per class AP
        headers = ["class", "AP50", "mAP"]
        colums = 6
        per_class_ap50s = []
        per_class_maps = []
        precisions = coco_eval.eval["precision"]
        # dimension of precisions: [TxRxKxAxM]
        # precision has dims (iou, recall, cls, area range, max dets)
        assert len(self.class_names) == precisions.shape[2]

        for idx, name in enumerate(self.class_names):
            # area range index 0: all area ranges
            # max dets index -1: typically 100 per image
            precision_50 = precisions[0, :, idx, 0, -1]
            precision_50 = precision_50[precision_50 > -1]
            ap50 = np.mean(precision_50) if precision_50.size else float("nan")
            per_class_ap50s.append(float(ap50 * 100))

            precision = precisions[:, :, idx, 0, -1]
            precision = precision[precision > -1]
            ap = np.mean(precision) if precision.size else float("nan")
            per_class_maps.append(float(ap * 100))

        num_cols = min(colums, len(self.class_names) * len(headers))
        flatten_results = []
        for name, ap50, mAP in zip(self.class_names, per_class_ap50s, per_class_maps):
            flatten_results += [name, ap50, mAP]

        row_pair = itertools.zip_longest(
            *[flatten_results[i::num_cols] for i in range(num_cols)]
        )
        table_headers = headers * (num_cols // len(headers))
        table = tabulate(
            row_pair,
            tablefmt="pipe",
            floatfmt=".1f",
            headers=table_headers,
            numalign="left",
        )
        logger.info("\n" + table)

        aps = coco_eval.stats[:6]
        eval_results = {}
        for k, v in zip(self.metric_names, aps):
            eval_results[k] = v
        return eval_results
Beispiel #31
0
            image = Image.open(image_path)

            box_thre, class_thre, class_ids, masks_arg, masks_sigmoid = yolact.get_map_out(
                image)
            if box_thre is None:
                continue
            prep_metrics(box_thre, class_thre, class_ids, masks_sigmoid, id,
                         make_json)
        make_json.dump()
        print(
            f'\nJson files dumped, saved in: \'eval_results/\', start evaluting.'
        )

    if map_mode == 0 or map_mode == 2:
        bbox_dets = test_coco.loadRes(
            osp.join(map_out_path, "bbox_detections.json"))
        mask_dets = test_coco.loadRes(
            osp.join(map_out_path, "mask_detections.json"))

        print('\nEvaluating BBoxes:')
        bbox_eval = COCOeval(test_coco, bbox_dets, 'bbox')
        bbox_eval.evaluate()
        bbox_eval.accumulate()
        bbox_eval.summarize()

        print('\nEvaluating Masks:')
        bbox_eval = COCOeval(test_coco, mask_dets, 'segm')
        bbox_eval.evaluate()
        bbox_eval.accumulate()
        bbox_eval.summarize()
Beispiel #32
0
    def evaluate(self, log_level=tf.compat.v1.logging.INFO):
        """Evaluates with detections from all images with COCO API.

    Args:
      log_level: Logging lavel to print logs.
    Returns:
      coco_metric: float numpy array with shape [12] representing the
        coco-style evaluation metrics.
    Raises:
      ImportError: if the pip package `pycocotools` is not installed.
    """
        if COCO is None or COCOeval is None:
            message = (
                'You must install pycocotools (`pip install pycocotools`) '
                '(see github repo at https://github.com/cocodataset/cocoapi) '
                'for efficientdet/coco_metric to work.')
            raise ImportError(message)

        original_stdout = sys.stdout
        block_print(log_level)
        if self.filename:
            coco_gt = COCO(self.filename)
        else:
            coco_gt = COCO()
            coco_gt.dataset = self.dataset
            coco_gt.createIndex()
        enable_print(original_stdout)

        if self.testdev_dir:
            # Run on test-dev dataset.
            box_result_list = []
            for det in self.detections:
                box_result_list.append({
                    'image_id':
                    int(det[0]),
                    'category_id':
                    int(det[6]),
                    'bbox':
                    np.around(det[1:5].astype(np.float64),
                              decimals=2).tolist(),
                    'score':
                    float(np.around(det[5], decimals=3)),
                })
            json.encoder.FLOAT_REPR = lambda o: format(o, '.3f')
            # Must be in the formst of 'detections_test-dev2017_xxx_results'.
            fname = 'detections_test-dev2017_test_results'
            output_path = os.path.join(self.testdev_dir, fname + '.json')
            logging.info('Writing output json file to: %s', output_path)
            with tf.io.gfile.GFile(output_path, 'w') as fid:
                json.dump(box_result_list, fid)
            return np.array([-1.], dtype=np.float32)
        else:
            # Run on validation dataset.
            block_print(log_level)
            detections = np.array(self.detections)
            image_ids = list(set(detections[:, 0]))
            coco_dt = coco_gt.loadRes(detections)
            coco_eval = COCOeval(coco_gt, coco_dt, iouType='bbox')
            coco_eval.params.imgIds = image_ids
            coco_eval.evaluate()
            coco_eval.accumulate()
            coco_eval.summarize()
            enable_print(original_stdout)
            coco_metrics = coco_eval.stats

            if self.label_map:
                # Get per_class AP, see pycocotools/cocoeval.py:334
                # TxRxKxAxM: iouThrs x recThrs x catIds x areaRng x maxDets
                # Use areaRng_id=0 ('all') and maxDets_id=-1 (200) in default
                precision = coco_eval.eval['precision'][:, :, :, 0, -1]
                # Ideally, label_map should match the eval set, but it is possible that
                # some classes has no data in the eval set.
                ap_perclass = [0] * max(precision.shape[-1], len(
                    self.label_map))
                for c in range(
                        precision.shape[-1]):  # iterate over all classes
                    precision_c = precision[:, :, c]
                    # Only consider values if > -1.
                    precision_c = precision_c[precision_c > -1]
                    ap_c = np.mean(precision_c) if precision_c.size else -1.
                    ap_perclass[c] = ap_c
                coco_metrics = np.concatenate((coco_metrics, ap_perclass))

            # Return the concat normal and per-class AP.
            return np.array(coco_metrics, dtype=np.float32)
Beispiel #33
0
def validate(args):
    # might as well try to validate something
    args.pretrained = args.pretrained or not args.checkpoint
    args.prefetcher = not args.no_prefetcher
    args.redundant_bias = not args.no_redundant_bias

    # create model
    config = get_efficientdet_config(args.model)
    config.redundant_bias = args.redundant_bias
    model = EfficientDet(config)
    if args.checkpoint:
        load_checkpoint(model, args.checkpoint)
        print(model)
    return

    param_count = sum([m.numel() for m in model.parameters()])
    print('Model %s created, param count: %d' % (args.model, param_count))

    bench = DetBenchEval(model, config)
    bench = bench.cuda()
    if has_amp:
        print('Using AMP mixed precision.')
        bench = amp.initialize(bench, opt_level='O1')
    else:
        print('AMP not installed, running network in FP32.')

    if args.num_gpu > 1:
        bench = torch.nn.DataParallel(bench,
                                      device_ids=list(range(args.num_gpu)))

    if 'test' in args.anno:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'image_info_{args.anno}.json')
        image_dir = 'test2017'
    else:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'instances_{args.anno}.json')
        image_dir = args.anno
    dataset = CocoDetection(os.path.join(args.data, image_dir),
                            annotation_path)

    loader = create_loader(dataset,
                           input_size=config.image_size,
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=args.interpolation,
                           fill_color=args.fill_color,
                           num_workers=args.workers,
                           pin_mem=args.pin_mem)

    img_ids = []
    results = []
    model.eval()
    batch_time = AverageMeter()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            output = bench(input, target['scale'])
            output = output.cpu()
            sample_ids = target['img_id'].cpu()
            for index, sample in enumerate(output):
                image_id = int(sample_ids[index])
                for det in sample:
                    score = float(det[4])
                    if score < .001:  # stop when below this threshold, scores in descending order
                        break
                    coco_det = dict(image_id=image_id,
                                    bbox=det[0:4].tolist(),
                                    score=score,
                                    category_id=int(det[5]))
                    img_ids.append(image_id)
                    results.append(coco_det)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                    ))

    json.dump(results, open(args.results, 'w'), indent=4)
    if 'test' not in args.anno:
        coco_results = dataset.coco.loadRes(args.results)
        coco_eval = COCOeval(dataset.coco, coco_results, 'bbox')
        coco_eval.params.imgIds = img_ids  # score only ids we've used
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

    return results
from fast_rcnn.nms_wrapper import nms
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import pylab


if __name__ == '__main__':


	pylab.rcParams['figure.figsize'] = (10.0, 8.0)

	annType = 'bbox'

	ground_truth = '/mnt/d/BigData/COCO/instances_train-val2014/annotations/instances_val2014.json' 
	generated_result = '/mnt/c/Users/Lavenger/git/py-faster-rcnn/tools/result.json'

	cocoGt = COCO(generated_result)

	cocoDt = cocoGt.loadRes(generated_result)

	cocoEval = COCOeval(cocoGt,cocoDt)
	cocoEval.params.imgIds  = imgIds
	cocoEval.params.useSegm = False
	cocoEval.evaluate()
	cocoEval.accumulate()
	cocoEval.summarize()


Beispiel #35
0
    def evaluate(self, model):
        """
        COCO average precision (AP) Evaluation. Iterate inference on the test dataset
        and the results are evaluated by COCO API.
        Args:
            model : model object
        Returns:
            ap50_95 (float) : calculated COCO AP for IoU=50:95
            ap50 (float) : calculated COCO AP for IoU=50
        """
        model.eval()
        cuda = torch.cuda.is_available()
        Tensor = torch.cuda.FloatTensor if cuda else torch.FloatTensor
        ids = []
        data_dict = []
        dataiterator = iter(self.dataloader)
        while True:  # all the data in val2017
            try:
                img, _, info_img, id_ = next(dataiterator)  # load a batch
            except StopIteration:
                break
            info_img = [float(info) for info in info_img]
            id_ = int(id_)
            ids.append(id_)
            with torch.no_grad():
                img = Variable(img.type(Tensor))
                _, outputs = model(img)
                outputs = outputs.unsqueeze(0)
                outputs = postprocess(outputs, 80, self.confthre, self.nmsthre)
                if outputs[0] is None:
                    continue
                outputs = outputs[0].cpu().data

            for output in outputs:
                x1 = float(output[0])
                y1 = float(output[1])
                x2 = float(output[2])
                y2 = float(output[3])
                label = self.dataset.class_ids[int(output[6])]
                box = yolobox2label((y1, x1, y2, x2), info_img)
                bbox = [box[1], box[0], box[3] - box[1], box[2] - box[0]]
                score = float(
                    output[4].data.item() * output[5].data.item()
                )  # object score * class score
                A = {
                    "image_id": id_,
                    "category_id": label,
                    "bbox": bbox,
                    "score": score,
                    "segmentation": [],
                }  # COCO json format
                data_dict.append(A)

        annType = ["segm", "bbox", "keypoints"]

        # Evaluate the Dt (detection) json comparing with the ground truth
        if len(data_dict) > 0:
            cocoGt = self.dataset.coco
            # workaround: temporarily write data to json file because pycocotools can't process dict in py36.
            _, tmp = tempfile.mkstemp()
            json.dump(data_dict, open(tmp, "w"))
            cocoDt = cocoGt.loadRes(tmp)
            cocoEval = COCOeval(self.dataset.coco, cocoDt, annType[1])
            cocoEval.params.imgIds = ids
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            return cocoEval.stats[0], cocoEval.stats[1]
        else:
            return 0, 0
Beispiel #36
0
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    ret['mAP(bbox)'] = cocoEval.stats[0]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        ret['mAP(segm)'] = cocoEval.stats[0]
    return ret
Beispiel #37
0
def print_evaluation_scores(json_file):
    ret = {}
    assert cfg.DATA.BASEDIR and os.path.isdir(cfg.DATA.BASEDIR)
    annofile = os.path.join(
        cfg.DATA.BASEDIR, 'annotations',
        'instances_{}.json'.format(cfg.DATA.VAL))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
    for k in range(6):
        ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]

    if cfg.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        for k in range(6):
            ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
    return ret
Beispiel #38
0
    arg("-g", "--gt_path", type=str, help="Path to the json file with predictions.", required=True)

    args = parser.parse_args()

    coco = COCO(args.gt_path)

    pred_coco = coco.loadRes(args.pred_path)

    categories = coco.cats

    print("-------------------------------------------------------------------------------")
    print("CATEGORIES:")
    print(categories)

    print("-------------------------------------------------------------------------------")

    coco_eval = COCOeval(cocoGt=coco, cocoDt=pred_coco, iouType="bbox")

    print("ALL CLASSES :")

    print_results(coco_eval)

    for value in categories.values():
        category_id = value["id"]
        class_name = value["name"]
        print("-------------------------------------------------------------------------------")
        print("CLASS_NAME = ", class_name)

        coco_eval.params.catIds = category_id
        print_results(coco_eval)
Beispiel #39
0
def evaluate_coco(generator, model, threshold=0.05):
    """ Use the pycocotools to evaluate a COCO model on a dataset.

    Args
        generator : The generator for generating the evaluation data.
        model     : The model to evaluate.
        threshold : The score threshold to use.
    """
    # start collecting results
    results = []
    image_ids = []
    for index in progressbar.progressbar(range(generator.size()), prefix='COCO evaluation: '):
        image = generator.load_image(index)
        image = generator.preprocess_image(image)
        image, scale = generator.resize_image(image)

        if keras.backend.image_data_format() == 'channels_first':
            image = image.transpose((2, 0, 1))

        # run network
        boxes, scores, labels = model.predict_on_batch(np.expand_dims(image, axis=0))

        # correct boxes for image scale
        boxes /= scale

        # change to (x, y, w, h) (MS COCO standard)
        boxes[:, :, 2] -= boxes[:, :, 0]
        boxes[:, :, 3] -= boxes[:, :, 1]

        # compute predicted labels and scores
        for box, score, label in zip(boxes[0], scores[0], labels[0]):
            # scores are sorted, so we can break
            if score < threshold:
                break

            # append detection for each positively labeled class
            image_result = {
                'image_id'    : generator.image_ids[index],
                'category_id' : generator.label_to_coco_label(label),
                'score'       : float(score),
                'bbox'        : box.tolist(),
            }

            # append detection to results
            results.append(image_result)

        # append image to list of processed images
        image_ids.append(generator.image_ids[index])

    if not len(results):
        return

    # write output
    json.dump(results, open('{}_bbox_results.json'.format(generator.set_name), 'w'), indent=4)
    json.dump(image_ids, open('{}_processed_image_ids.json'.format(generator.set_name), 'w'), indent=4)

    # load results in COCO evaluation tool
    coco_true = generator.coco
    coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(generator.set_name))

    # run COCO evaluation
    coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
    coco_eval.params.imgIds = image_ids
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    return coco_eval.stats
Beispiel #40
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=None,
                 metric_items=None):
        """Evaluation in COCO protocol.

        Args:
            results (list[list | tuple]): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated. Options are
                'bbox', 'segm', 'proposal', 'proposal_fast'.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float], optional): IoU threshold used for
                evaluating recalls/mAPs. If set to a list, the average of all
                IoUs will also be computed. If not specified, [0.50, 0.55,
                0.60, 0.65, 0.70, 0.75, 0.80, 0.85, 0.90, 0.95] will be used.
                Default: None.
            metric_items (list[str] | str, optional): Metric items that will
                be returned. If not specified, ``['AR@100', 'AR@300',
                'AR@1000', 'AR_s@1000', 'AR_m@1000', 'AR_l@1000' ]`` will be
                used when ``metric=='proposal'``, ``['mAP', 'mAP_50', 'mAP_75',
                'mAP_s', 'mAP_m', 'mAP_l']`` will be used when
                ``metric=='bbox' or metric=='segm'``.

        Returns:
            dict[str, float]: COCO style evaluation metric.
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = [
            'bbox', 'segm', 'proposal', 'proposal_fast', 'pixel'
        ]
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')
        if iou_thrs is None:
            iou_thrs = np.linspace(.5,
                                   0.95,
                                   int(np.round((0.95 - .5) / .05)) + 1,
                                   endpoint=True)
        if metric_items is not None:
            if not isinstance(metric_items, list):
                metric_items = [metric_items]

        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)

        eval_results = OrderedDict()
        cocoGt = self.coco
        for metric in metrics:
            msg = f'Evaluating {metric}...'
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
                    log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError(f'{metric} is not in results')
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            if metric == 'pixel':
                iou_type = 'segm'
            else:
                iou_type = 'bbox' if metric == 'proposal' else metric
            cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
            cocoEval.params.catIds = self.cat_ids
            cocoEval.params.imgIds = self.img_ids
            cocoEval.params.maxDets = list(proposal_nums)
            cocoEval.params.iouThrs = iou_thrs
            # mapping of cocoEval.stats
            coco_metric_names = {
                'mAP': 0,
                'mAP_50': 1,
                'mAP_75': 2,
                'mAP_s': 3,
                'mAP_m': 4,
                'mAP_l': 5,
                'AR@100': 6,
                'AR@300': 7,
                'AR@1000': 8,
                'AR_s@1000': 9,
                'AR_m@1000': 10,
                'AR_l@1000': 11
            }
            if metric_items is not None:
                for metric_item in metric_items:
                    if metric_item not in coco_metric_names:
                        raise KeyError(
                            f'metric item {metric_item} is not supported')

            if metric == 'proposal':
                cocoEval.params.useCats = 0
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if metric_items is None:
                    metric_items = [
                        'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000',
                        'AR_m@1000', 'AR_l@1000'
                    ]

                for item in metric_items:
                    val = float(
                        f'{cocoEval.stats[coco_metric_names[item]]:.3f}')
                    eval_results[item] = val
            elif metric == 'pixel':
                cocoEval._prepare()
                full_gt_mask = []
                full_dt_mask = []
                for img_index in cocoGt.imgToAnns.keys():
                    gt_img_anns = cocoGt.imgToAnns[img_index]
                    gt_img_info = cocoGt.imgs[img_index]
                    dt_img_anns = cocoDt.imgToAnns[img_index]
                    dt_img_info = cocoDt.imgs[img_index]

                    gt_mask = anns_to_mask(gt_img_anns, gt_img_info['height'],
                                           gt_img_info['width'])
                    dt_mask = anns_to_mask(dt_img_anns, dt_img_info['height'],
                                           gt_img_info['width'])

                    full_gt_mask.append(gt_mask)
                    full_dt_mask.append(dt_mask)

                max_size = np.max([mask.shape[:2] for mask in full_gt_mask],
                                  axis=0)

                for i, _ in enumerate(full_gt_mask):
                    full_gt_mask[i] = pad_img_to_size(full_gt_mask[i],
                                                      max_size)
                    full_dt_mask[i] = pad_img_to_size(full_dt_mask[i],
                                                      max_size)

                full_gt_mask = np.stack(full_gt_mask)
                full_dt_mask = np.stack(full_dt_mask)

                metrics = calculate_metrics(full_dt_mask, full_gt_mask)

                table_data = [['Pixel Metric', 'Value']]
                # add the pixel-wise metrics to the output
                for name, value in metrics._asdict().items():
                    eval_results[f'{metric}_{name}'] = value
                    table_data.append([f'{metric}_{name}', '%.4f' % value])

                table = AsciiTable(table_data)
                print_log('\n' + table.table, logger=logger)
            else:
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if classwise:  # Compute per-category AP
                    # Compute per-category AP
                    # from https://github.com/facebookresearch/detectron2/
                    precisions = cocoEval.eval['precision']
                    # precision: (iou, recall, cls, area range, max dets)
                    assert len(self.cat_ids) == precisions.shape[2]

                    results_per_category = []
                    for idx, catId in enumerate(self.cat_ids):
                        # area range index 0: all area ranges
                        # max dets index -1: typically 100 per image
                        nm = self.coco.loadCats(catId)[0]
                        precision = precisions[:, :, idx, 0, -1]
                        precision = precision[precision > -1]
                        if precision.size:
                            ap = np.mean(precision)
                        else:
                            ap = float('nan')
                        results_per_category.append(
                            (f'{nm["name"]}', f'{float(ap):0.3f}'))

                    num_columns = min(6, len(results_per_category) * 2)
                    results_flatten = list(
                        itertools.chain(*results_per_category))
                    headers = ['category', 'AP'] * (num_columns // 2)
                    results_2d = itertools.zip_longest(*[
                        results_flatten[i::num_columns]
                        for i in range(num_columns)
                    ])
                    table_data = [headers]
                    table_data += [result for result in results_2d]
                    table = AsciiTable(table_data)
                    print_log('\n' + table.table, logger=logger)

                if metric_items is None:
                    metric_items = [
                        'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                    ]

                for metric_item in metric_items:
                    key = f'{metric}_{metric_item}'
                    val = float(
                        f'{cocoEval.stats[coco_metric_names[metric_item]]:.3f}'
                    )
                    eval_results[key] = val
                ap = cocoEval.stats[:6]
                eval_results[f'{metric}_mAP_copypaste'] = (
                    f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                    f'{ap[4]:.3f} {ap[5]:.3f}')
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
Beispiel #41
0
from pycocotools.coco import COCO
from pycocotools.cocoeval import COCOeval
import numpy as np
import skimage.io as io
import json
import os

annType = ['segm','bbox','keypoints']
annType = annType[2]      #specify type here
prefix = 'person_keypoints' if annType=='keypoints' else 'instances'
print('Running demo for *%s* results.'%(annType))

PATH_PREFIX = "./txts/scale2"

annFile = os.path.join(PATH_PREFIX, "result-gt-scale2-100-json.txt")
cocoGt=COCO(annFile)

resFile = os.path.join(PATH_PREFIX, "result-pred-scale2-100-json.txt")

cocoDt=cocoGt.loadRes(resFile)
imgIds=sorted(cocoGt.getImgIds())

cocoEval = COCOeval(cocoGt,cocoDt,annType)
cocoEval.params.imgIds = imgIds
cocoEval.evaluate()
cocoEval.accumulate()
cocoEval.summarize()
Beispiel #42
0
def main():
    # pylint: disable=import-outside-toplevel,too-many-branches,too-many-statements
    from pycocotools.coco import COCO
    from pycocotools.cocoeval import COCOeval

    parser = make_parser()
    args = parser.parse_args()

    current_network = import_from_file(args.file)
    cfg = current_network.Cfg()

    if args.weight_file:
        args.start_epoch = args.end_epoch = -1
    else:
        if args.start_epoch == -1:
            args.start_epoch = cfg.max_epoch - 1
        if args.end_epoch == -1:
            args.end_epoch = args.start_epoch
        assert 0 <= args.start_epoch <= args.end_epoch < cfg.max_epoch

    for epoch_num in range(args.start_epoch, args.end_epoch + 1):
        if args.weight_file:
            weight_file = args.weight_file
        else:
            weight_file = "log-of-{}/epoch_{}.pkl".format(
                os.path.basename(args.file).split(".")[0], epoch_num)

        if args.ngpus > 1:
            master_ip = "localhost"
            port = dist.get_free_ports(1)[0]
            dist.Server(port)

            result_list = []
            result_queue = Queue(2000)
            procs = []
            for i in range(args.ngpus):
                proc = Process(
                    target=worker,
                    args=(
                        current_network,
                        weight_file,
                        args.dataset_dir,
                        master_ip,
                        port,
                        args.ngpus,
                        i,
                        result_queue,
                    ),
                )
                proc.start()
                procs.append(proc)

            num_imgs = dict(coco=5000, objects365=30000)

            for _ in tqdm(range(num_imgs[cfg.test_dataset["name"]])):
                result_list.append(result_queue.get())
            for p in procs:
                p.join()
        else:
            result_list = []

            worker(current_network, weight_file, args.dataset_dir, None, None,
                   1, 0, result_list)

        all_results = DetEvaluator.format(result_list, cfg)
        json_path = "log-of-{}/epoch_{}.json".format(
            os.path.basename(args.file).split(".")[0], epoch_num)
        all_results = json.dumps(all_results)

        with open(json_path, "w") as fo:
            fo.write(all_results)
        logger.info("Save to %s finished, start evaluation!", json_path)

        eval_gt = COCO(
            os.path.join(args.dataset_dir, cfg.test_dataset["name"],
                         cfg.test_dataset["ann_file"]))
        eval_dt = eval_gt.loadRes(json_path)
        cocoEval = COCOeval(eval_gt, eval_dt, iouType="bbox")
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        metrics = [
            "AP",
            "[email protected]",
            "[email protected]",
            "APs",
            "APm",
            "APl",
            "AR@1",
            "AR@10",
            "AR@100",
            "ARs",
            "ARm",
            "ARl",
        ]
        logger.info("mmAP".center(32, "-"))
        for i, m in enumerate(metrics):
            logger.info("|\t%s\t|\t%.03f\t|", m, cocoEval.stats[i])
        logger.info("-" * 32)
Beispiel #43
0
def evaluate_coco(dataset, model, threshold=0.05):

    model.eval()

    with torch.no_grad():

        # start collecting results
        results = []
        image_ids = []

        for index in range(len(dataset)):
            data = dataset[index]
            scale = data['scale']

            # run network
            if torch.cuda.is_available():
                scores, labels, boxes = model(data['img'].permute(
                    2, 0, 1).cuda().float().unsqueeze(dim=0))
            else:
                scores, labels, boxes = model(data['img'].permute(
                    2, 0, 1).float().unsqueeze(dim=0))
            scores = scores.cpu()
            labels = labels.cpu()
            boxes = boxes.cpu()

            # correct boxes for image scale
            boxes /= scale

            if boxes.shape[0] > 0:
                # change to (x, y, w, h) (MS COCO standard)
                boxes[:, 2] -= boxes[:, 0]
                boxes[:, 3] -= boxes[:, 1]

                # compute predicted labels and scores
                #for box, score, label in zip(boxes[0], scores[0], labels[0]):
                for box_id in range(boxes.shape[0]):
                    score = float(scores[box_id])
                    label = int(labels[box_id])
                    box = boxes[box_id, :]

                    # scores are sorted, so we can break
                    if score < threshold:
                        break

                    # append detection for each positively labeled class
                    image_result = {
                        'image_id': dataset.image_ids[index],
                        'category_id': dataset.label_to_coco_label(label),
                        'score': float(score),
                        'bbox': box.tolist(),
                    }

                    # append detection to results
                    results.append(image_result)

            # append image to list of processed images
            image_ids.append(dataset.image_ids[index])

            # print progress
            print('{}/{}'.format(index, len(dataset)), end='\r')

        if not len(results):
            return

        # write output
        json.dump(results,
                  open('{}_bbox_results.json'.format(dataset.set_name), 'w'))

        # load results in COCO evaluation tool
        coco_true = dataset.coco
        coco_pred = coco_true.loadRes('{}_bbox_results.json'.format(
            dataset.set_name))

        # run COCO evaluation
        coco_eval = COCOeval(coco_true, coco_pred, 'bbox')
        coco_eval.params.imgIds = image_ids
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

        model.train()

        return
Beispiel #44
0
def test(data,
         weights=None,
         batch_size=16,
         imgsz=640,
         conf_thres=0.001,
         iou_thres=0.6,  # for NMS
         save_json=False,
         single_cls=False,
         augment=False,
         half=False,  # FP16
         model=None,
         dataloader=None,
         fast=False,
         verbose=False):
    # Initialize/load model and set device
    if model is None:
        device = torch_utils.select_device(opt.device, batch_size=batch_size)
        half &= device.type != 'cpu'  # half precision only supported on CUDA

        # Remove previous
        for f in glob.glob('test_batch*.jpg'):
            os.remove(f)

        # Load model
        google_utils.attempt_download(weights)
        model = torch.load(weights, map_location=device)['model']
        torch_utils.model_info(model)
        # model.fuse()
        model.to(device)
        if half:
            model.half()  # to FP16

        if device.type != 'cpu' and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)

        training = False
    else:  # called by train.py
        device = next(model.parameters()).device  # get model device
        training = True

    # Configure run
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.FullLoader)  # model dict
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95, 10).to(device)  # iou vector for [email protected]:0.95
    # iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        fast |= conf_thres > 0.001  # enable fast mode
        path = data['test'] if opt.task == 'test' else data['val']  # path to val/test images
        dataset = LoadImagesAndLabels(path,
                                      imgsz,
                                      batch_size,
                                      rect=True,  # rectangular inference
                                      single_cls=opt.single_cls,  # single class mode
                                      pad=0.0 if fast else 0.5)  # padding
        batch_size = min(batch_size, len(dataset))
        nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=nw,
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    img = torch.zeros((1, 3, imgsz, imgsz), device=device)  # init img
    _ = model(img.half() if half else img) if device.type != 'cpu' else None  # run once
    names = model.names if hasattr(model, 'names') else model.module.names
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = torch_utils.time_synchronized()
            inf_out, train_out = model(img, augment=augment)  # inference and training outputs
            t0 += torch_utils.time_synchronized() - t

            # Compute loss
            if training:  # if model has loss hyperparameters
                loss += compute_loss(train_out, targets, model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = torch_utils.time_synchronized()
            output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, fast=fast)
            t1 += torch_utils.time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(img[si].shape[1:], box, shapes[si][0], shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({'image_id': image_id,
                                  'category_id': coco91class[int(p[5])],
                                  'bbox': [round(x, 3) for x in b],
                                  'score': round(p[4], 5)})

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(-1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero().view(-1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(detected) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if batch_i < 1:
            f = 'test_batch%g_gt.jpg' % batch_i  # filename
            plot_images(img, targets, paths, f, names)  # ground truth
            f = 'test_batch%g_pred.jpg' % batch_i
            plot_images(img, output_to_target(output, width, height), paths, f, names)  # predictions

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1)  # [P, R, [email protected], [email protected]:0.95]
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64), minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t)

    # Save JSON
    if save_json and map50 and len(jdict):
        imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files]
        f = 'detections_val2017_%s_results.json' % \
            (weights.split(os.sep)[-1].replace('.pt', '') if weights else '')  # filename
        print('\nCOCO mAP with pycocotools... saving %s...' % f)
        with open(f, 'w') as file:
            json.dump(jdict, file)

        try:
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')[0])  # initialize COCO ground truth api
            cocoDt = cocoGt.loadRes(f)  # initialize COCO pred api

            cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
            cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            map, map50 = cocoEval.stats[:2]  # update to pycocotools results ([email protected]:0.95, [email protected])
        except:
            print('WARNING: pycocotools must be installed with numpy==1.17 to run correctly. '
                  'See https://github.com/cocodataset/cocoapi/issues/356')

    # Return results
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
Beispiel #45
0
    def print_coco_metrics(self, results):
        """
        Args:
            results(list[dict]): results in coco format
        Returns:
            dict: the evaluation metrics
        """
        from pycocotools.cocoeval import COCOeval
        ret = {}
        has_mask = "segmentation" in results[
            0]  # results will be modified by loadRes

        cocoDt = self.coco.loadRes(results)
        cocoEval = COCOeval(self.coco, cocoDt, 'bbox')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        fields = [
            'IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large'
        ]
        for k in range(6):
            ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]

        if len(results) > 0 and has_mask:
            cocoEval = COCOeval(self.coco, cocoDt, 'segm')
            cocoEval.evaluate()
            cocoEval.accumulate()
            cocoEval.summarize()
            for k in range(6):
                ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
        return ret
Beispiel #46
0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import pycocotools.coco as coco
from pycocotools.cocoeval import COCOeval
import sys
import cv2
import numpy as np
import pickle
import os

this_dir = os.path.dirname(__file__)
ANN_PATH = this_dir + '../../data/coco/annotations/person_keypoints_val2017.json'
print(ANN_PATH)
if __name__ == '__main__':
    pred_path = sys.argv[1]
    coco = coco.COCO(ANN_PATH)
    dets = coco.loadRes(pred_path)
    img_ids = coco.getImgIds()
    num_images = len(img_ids)
    coco_eval = COCOeval(coco, dets, "keypoints")
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
    coco_eval = COCOeval(coco, dets, "bbox")
    coco_eval.evaluate()
    coco_eval.accumulate()
    coco_eval.summarize()
Beispiel #47
0
    def run_eval(self, results, save_dir=None):
        segments = self.convert_eval_format(results)

        if save_dir is not None:
            result_json = os.path.join(save_dir, "coco_segm_results.json")
            json.dump(segments, open(result_json, "w"))

        coco_segms = self.coco.loadRes(segments)
        coco_eval_seg = COCOeval(self.coco, coco_segms, "segm")
        coco_eval_seg.evaluate()
        coco_eval_seg.accumulate()
        coco_eval_seg.summarize()

        coco_eval = COCOeval(self.coco, coco_segms, "bbox")
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

        return coco_eval_seg.stats
Beispiel #48
0
def test(test_model, logger):
    eval_gt = COCO(cfg.gt_path)
    import json
    with open(cfg.det_path, 'r') as f:
        dets = json.load(f)

    test_subset = False
    if test_subset:
        eval_gt.imgs = dict(list(eval_gt.imgs.items())[:100])
        anns = dict()
        for i in eval_gt.imgs:
            for j in eval_gt.getAnnIds(i):
                anns[j] = eval_gt.anns[j]
        eval_gt.anns = anns
    dets = [i for i in dets if i['image_id'] in eval_gt.imgs]

    dets = [i for i in dets if i['category_id'] == 1]
    dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True)
    for i in dets:
        i['imgpath'] = 'val2014/COCO_val2014_000000%06d.jpg' % i['image_id']
    img_num = len(np.unique([i['image_id'] for i in dets]))

    use_gtboxes = False
    if use_gtboxes:
        d = COCOJoints()
        coco_train_data, coco_test_data = d.load_data()
        coco_test_data.sort(key=lambda x: x['imgid'])
        for i in coco_test_data:
            i['image_id'] = i['imgid']
            i['score'] = 1.
        dets = coco_test_data

    from tfflat.mp_utils import MultiProc
    img_start = 0
    ranges = [0]
    images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1
    for run_img in range(img_num):
        img_end = img_start + 1
        while img_end < len(dets) and dets[img_end]['image_id'] == dets[
                img_start]['image_id']:
            img_end += 1
        if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num:
            ranges.append(img_end)
        img_start = img_end

    def func(id):
        cfg.set_args(args.gpu_ids.split(',')[id])
        tester = Tester(Network(), cfg)
        tester.load_weights(test_model)
        range = [ranges[id], ranges[id + 1]]
        return test_net(tester, logger, dets, range)

    MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func)
    all_res, dump_results = MultiGPUFunc.work()

    # evaluation
    result_path = osp.join(cfg.output_dir, 'results.json')
    with open(result_path, 'w') as f:
        json.dump(dump_results, f)

    eval_dt = eval_gt.loadRes(result_path)
    cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints')

    cocoEval.evaluate()
Beispiel #49
0
def test(cfg,
         data,
         weights=None,
         batch_size=16,
         img_size=416,
         iou_thres=0.5,
         conf_thres=0.001,
         nms_thres=0.5,
         save_json=False,
         model=None):
    # Initialize/load model and set device
    if model is None:
        device = torch_utils.select_device()
        verbose = True

        # Initialize model
        model = Darknet(cfg, img_size).to(device)

        # Load weights
        if weights.endswith('.pt'):  # pytorch format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'])
        else:  # darknet format
            _ = load_darknet_weights(model, weights)

        if torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:
        device = next(model.parameters()).device  # get model device
        verbose = False

    # Configure run
    data = parse_data_cfg(data)
    nc = int(data['classes'])  # number of classes
    test_path = data['valid']  # path to test images
    names = load_classes(data['names'])  # class names

    # Dataloader
    dataset = LoadImagesAndLabels(test_path, img_size, batch_size)
    dataloader = DataLoader(dataset,
                            batch_size=batch_size,
                            num_workers=os.cpu_count(),
                            pin_memory=True,
                            collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    coco91class = coco80_to_coco91_class()
    s = ('%30s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', 'mAP',
                                 'F1')
    p, r, f1, mp, mr, map, mf1 = 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        targets = targets.to(device)
        imgs = imgs.to(device)
        _, _, height, width = imgs.shape  # batch size, channels, height, width

        # Plot images with bounding boxes
        if batch_i == 0 and not os.path.exists('test_batch0.jpg'):
            plot_images(imgs=imgs,
                        targets=targets,
                        paths=paths,
                        fname='test_batch0.jpg')

        # Run model
        inf_out, train_out = model(imgs)  # inference and training outputs

        # Compute loss
        if hasattr(model, 'hyp'):  # if model has loss hyperparameters
            loss += compute_loss(train_out, targets,
                                 model)[1][[0, 2, 3]].cpu()  # GIoU, obj, cls

        # Run NMS
        output = non_max_suppression(inf_out,
                                     conf_thres=conf_thres,
                                     nms_thres=nms_thres)

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append(([], torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(imgs[si].shape[1:], box,
                             shapes[si])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for di, d in enumerate(pred):
                    jdict.append({
                        'image_id': image_id,
                        'category_id': coco91class[int(d[6])],
                        'bbox': [floatn(x, 3) for x in box[di]],
                        'score': floatn(d[4], 5)
                    })

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Assign all predictions as incorrect
            correct = [0] * len(pred)
            if nl:
                detected = []
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5])
                tbox[:, [0, 2]] *= width
                tbox[:, [1, 3]] *= height

                # Search for correct predictions
                for i, (*pbox, pconf, pcls_conf, pcls) in enumerate(pred):

                    # Break if all targets already located in image
                    if len(detected) == nl:
                        break

                    # Continue if predicted class not among image classes
                    if pcls.item() not in tcls:
                        continue

                    # Best iou, index between pred and targets
                    m = (pcls == tcls_tensor).nonzero().view(-1)
                    iou, bi = bbox_iou(pbox, tbox[m]).max(0)

                    # If iou > threshold and class is correct mark as correct
                    if iou > iou_thres and m[
                            bi] not in detected:  # and pcls == tcls[bi]:
                        correct[i] = 1
                        detected.append(m[bi])

            # Append statistics (correct, conf, pcls, tcls)
            stats.append((correct, pred[:, 4].cpu(), pred[:, 6].cpu(), tcls))

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in list(zip(*stats))]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%30s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Save JSON
    if save_json and map and len(jdict):
        imgIds = [int(Path(x).stem.split('_')[-1]) for x in dataset.img_files]
        with open('results.json', 'w') as file:
            json.dump(jdict, file)

        from pycocotools.coco import COCO
        from pycocotools.cocoeval import COCOeval

        # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
        cocoGt = COCO(
            'H:/Other_DataSets/coco/annotations/instances_val2014.json'
        )  # initialize COCO ground truth api
        cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api

        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        map = cocoEval.stats[1]  # update mAP to pycocotools mAP

    # Return results
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map, mf1, *(loss / len(dataloader)).tolist()), maps
Beispiel #50
0
def print_evaluation_scores(json_file):
    ret = {}
    assert config.BASEDIR and os.path.isdir(config.BASEDIR)
    annofile = os.path.join(
        config.BASEDIR, 'annotations',
        'instances_{}.json'.format(config.VAL_DATASET))
    coco = COCO(annofile)
    cocoDt = coco.loadRes(json_file)
    cocoEval = COCOeval(coco, cocoDt, 'bbox')
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
    fields = ['IoU=0.5:0.95', 'IoU=0.5', 'IoU=0.75', 'small', 'medium', 'large']
    for k in range(6):
        ret['mAP(bbox)/' + fields[k]] = cocoEval.stats[k]

    if config.MODE_MASK:
        cocoEval = COCOeval(coco, cocoDt, 'segm')
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        for k in range(6):
            ret['mAP(segm)/' + fields[k]] = cocoEval.stats[k]
    return ret
Beispiel #51
0
    def evaluate(self,
                 results,
                 metric='bbox',
                 logger=None,
                 jsonfile_prefix=None,
                 classwise=False,
                 proposal_nums=(100, 300, 1000),
                 iou_thrs=np.arange(0.5, 0.96, 0.05)):
        """Evaluation in COCO protocol.

        Args:
            results (list): Testing results of the dataset.
            metric (str | list[str]): Metrics to be evaluated.
            logger (logging.Logger | str | None): Logger used for printing
                related information during evaluation. Default: None.
            jsonfile_prefix (str | None): The prefix of json files. It includes
                the file path and the prefix of filename, e.g., "a/b/prefix".
                If not specified, a temp file will be created. Default: None.
            classwise (bool): Whether to evaluating the AP for each class.
            proposal_nums (Sequence[int]): Proposal number used for evaluating
                recalls, such as recall@100, recall@1000.
                Default: (100, 300, 1000).
            iou_thrs (Sequence[float]): IoU threshold used for evaluating
                recalls. If set to a list, the average recall of all IoUs will
                also be computed. Default: 0.5.

        Returns:
            dict[str: float]
        """

        metrics = metric if isinstance(metric, list) else [metric]
        allowed_metrics = ['bbox', 'segm', 'proposal', 'proposal_fast']
        for metric in metrics:
            if metric not in allowed_metrics:
                raise KeyError(f'metric {metric} is not supported')

        result_files, tmp_dir = self.format_results(results, jsonfile_prefix)

        eval_results = {}
        cocoGt = self.coco
        for metric in metrics:
            msg = f'Evaluating {metric}...'
            if logger is None:
                msg = '\n' + msg
            print_log(msg, logger=logger)

            if metric == 'proposal_fast':
                ar = self.fast_eval_recall(results,
                                           proposal_nums,
                                           iou_thrs,
                                           logger='silent')
                log_msg = []
                for i, num in enumerate(proposal_nums):
                    eval_results[f'AR@{num}'] = ar[i]
                    log_msg.append(f'\nAR@{num}\t{ar[i]:.4f}')
                log_msg = ''.join(log_msg)
                print_log(log_msg, logger=logger)
                continue

            if metric not in result_files:
                raise KeyError(f'{metric} is not in results')
            try:
                cocoDt = cocoGt.loadRes(result_files[metric])
            except IndexError:
                print_log('The testing results of the whole dataset is empty.',
                          logger=logger,
                          level=logging.ERROR)
                break

            iou_type = 'bbox' if metric == 'proposal' else metric
            cocoEval = COCOeval(cocoGt, cocoDt, iou_type)
            cocoEval.params.catIds = self.cat_ids
            cocoEval.params.imgIds = self.img_ids
            if metric == 'proposal':
                cocoEval.params.useCats = 0
                cocoEval.params.maxDets = list(proposal_nums)
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                metric_items = [
                    'AR@100', 'AR@300', 'AR@1000', 'AR_s@1000', 'AR_m@1000',
                    'AR_l@1000'
                ]
                for i, item in enumerate(metric_items):
                    val = float(f'{cocoEval.stats[i + 6]:.3f}')
                    eval_results[item] = val
            else:
                cocoEval.evaluate()
                cocoEval.accumulate()
                cocoEval.summarize()
                if classwise:  # Compute per-category AP
                    # Compute per-category AP
                    # from https://github.com/facebookresearch/detectron2/
                    precisions = cocoEval.eval['precision']
                    # precision: (iou, recall, cls, area range, max dets)
                    assert len(self.cat_ids) == precisions.shape[2]

                    results_per_category = []
                    for idx, catId in enumerate(self.cat_ids):
                        # area range index 0: all area ranges
                        # max dets index -1: typically 100 per image
                        nm = self.coco.loadCats(catId)[0]
                        precision = precisions[:, :, idx, 0, -1]
                        precision = precision[precision > -1]
                        if precision.size:
                            ap = np.mean(precision)
                        else:
                            ap = float('nan')
                        results_per_category.append(
                            (f'{nm["name"]}', f'{float(ap):0.3f}'))

                    num_columns = min(6, len(results_per_category) * 2)
                    results_flatten = list(
                        itertools.chain(*results_per_category))
                    headers = ['category', 'AP'] * (num_columns // 2)
                    results_2d = itertools.zip_longest(*[
                        results_flatten[i::num_columns]
                        for i in range(num_columns)
                    ])
                    table_data = [headers]
                    table_data += [result for result in results_2d]
                    table = AsciiTable(table_data)
                    print_log('\n' + table.table, logger=logger)

                metric_items = [
                    'mAP', 'mAP_50', 'mAP_75', 'mAP_s', 'mAP_m', 'mAP_l'
                ]
                for i in range(len(metric_items)):
                    key = f'{metric}_{metric_items[i]}'
                    val = float(f'{cocoEval.stats[i]:.3f}')
                    eval_results[key] = val
                ap = cocoEval.stats[:6]
                eval_results[f'{metric}_mAP_copypaste'] = (
                    f'{ap[0]:.3f} {ap[1]:.3f} {ap[2]:.3f} {ap[3]:.3f} '
                    f'{ap[4]:.3f} {ap[5]:.3f}')
        if tmp_dir is not None:
            tmp_dir.cleanup()
        return eval_results
Beispiel #52
0
class InstanceEvaluator(object):
	def __init__(self, dataset_json, preds_json):
		# load dataset ground truths
		self.dataset = COCO(dataset_json)
		category_ids = self.dataset.getCatIds()
		categories = [x['name'] for x in self.dataset.loadCats(category_ids)]
		self.category_to_id_map = dict(zip(categories, category_ids))
		self.classes = ['__background__'] + categories
		self.num_classes = len(self.classes)

		# load predictions
		self.preds = self.dataset.loadRes(preds_json)
		self.coco_eval = COCOeval(self.dataset, self.preds, 'segm')
		self.coco_eval.params.maxDets = [1, 50, 255]

	def evaluate(self):
		self.coco_eval.evaluate()
		self.coco_eval.accumulate()

	def _summarize(self, ap=1, iouThr=None, areaRng='all', maxDets=255):
		p = self.coco_eval.params
		iStr = ' {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}'
		titleStr = 'Average Precision' if ap == 1 else 'Average Recall'
		typeStr = '(AP)' if ap==1 else '(AR)'
		iouStr = '{:0.2f}:{:0.2f}'.format(p.iouThrs[0], p.iouThrs[-1]) \
		if iouThr is None else '{:0.2f}'.format(iouThr)

		aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
		mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
		if ap == 1:
			# dimension of precision: [TxRxKxAxM]
			s = self.coco_eval.eval['precision']
			# IoU
			if iouThr is not None:
				t = np.where(iouThr == p.iouThrs)[0]
				s = s[t]
			s = s[:,:,:,aind,mind]
		else:
			# dimension of recall: [TxKxAxM]
			s = self.coco_eval.eval['recall']
			if iouThr is not None:
				t = np.where(iouThr == p.iouThrs)[0]
				s = s[t]
			s = s[:,:,aind,mind]
		if len(s[s>-1])==0:
			mean_s = -1
		else:
		    mean_s = np.mean(s[s>-1])
		print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
		return mean_s

	def summarize(self, IoU_lo_thres=0.5, IoU_hi_thres=0.95):
		def _get_thr_ind(thr):
			ind = np.where((self.coco_eval.params.iouThrs > thr - 1e-5) &
						   (self.coco_eval.params.iouThrs < thr + 1e-5))[0][0]
			iou_thr = self.coco_eval.params.iouThrs[ind]
			assert np.isclose(iou_thr, thr)
			return ind

		ind_lo = _get_thr_ind(IoU_lo_thres)
		ind_hi = _get_thr_ind(IoU_hi_thres)

		# (iou, recall, cls, area, max_dets)
		precision = self.coco_eval.eval['precision'][ind_lo:(ind_hi + 1), :, :, 0, 2]
		ap_mean = np.mean(precision[precision > -1])
		print('* MeanAP: {}'.format(ap_mean))

		print('* Performance by class:')
		ap_by_class = []
		for cls_ind, cls_name in enumerate(self.classes):
			if cls_name == '__background__':
				continue
			cls_precision = self.coco_eval.eval['precision'][ind_lo: (ind_hi + 1), :, cls_ind - 1, 0, 2]
			cls_ap = np.mean(cls_precision[cls_precision > -1])
			ap_by_class.append(cls_ap)
			print('{}, AP: {}'.format(cls_name, cls_ap))
		ap_by_class = np.asarray(ap_by_class)

		print('* Performance at different thresholds:')
		ap_by_thres = np.zeros((12,))
		ap_by_thres[0] = self._summarize(1)
		ap_by_thres[1] = self._summarize(1, iouThr=.5, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[2] = self._summarize(1, iouThr=.75, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[3] = self._summarize(1, areaRng='small', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[4] = self._summarize(1, areaRng='medium', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[5] = self._summarize(1, areaRng='large', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[6] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[0])
		ap_by_thres[7] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[1])
		ap_by_thres[8] = self._summarize(0, maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[9] = self._summarize(0, areaRng='small', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[10] = self._summarize(0, areaRng='medium', maxDets=self.coco_eval.params.maxDets[2])
		ap_by_thres[11] = self._summarize(0, areaRng='large', maxDets=self.coco_eval.params.maxDets[2])
		return ap_mean, ap_by_class, ap_by_thres
def validate(args):
    # might as well try to validate something
    args.pretrained = args.pretrained or not args.checkpoint
    args.prefetcher = not args.no_prefetcher

    # create model
    config = get_efficientdet_config(args.model)
    model = EfficientDet(config)
    if args.checkpoint:
        load_checkpoint(model, args.checkpoint)

    param_count = sum([m.numel() for m in model.parameters()])
    logging.info('Model %s created, param count: %d' %
                 (args.model, param_count))

    bench = DetBenchEval(model, config)

    bench.model = bench.model.cuda()
    if has_amp:
        bench.model = amp.initialize(bench.model, opt_level='O1')

    if args.num_gpu > 1:
        bench.model = torch.nn.DataParallel(bench.model,
                                            device_ids=list(range(
                                                args.num_gpu)))

    if 'test' in args.anno:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'image_info_{args.anno}.json')
        image_dir = 'test2017'
    else:
        annotation_path = os.path.join(args.data, 'annotations',
                                       f'instances_{args.anno}.json')
        image_dir = args.anno
    dataset = CocoDetection(os.path.join(args.data, image_dir),
                            annotation_path)

    loader = create_loader(dataset,
                           input_size=config.image_size,
                           batch_size=args.batch_size,
                           use_prefetcher=args.prefetcher,
                           interpolation=args.interpolation,
                           num_workers=args.workers)

    img_ids = []
    results = []
    model.eval()
    batch_time = AverageMeter()
    end = time.time()
    with torch.no_grad():
        for i, (input, target) in enumerate(loader):
            output = bench(input, target['img_id'], target['scale'])
            for batch_out in output:
                for det in batch_out:
                    image_id = int(det[0])
                    score = float(det[5])
                    coco_det = {
                        'image_id': image_id,
                        'bbox': det[1:5].tolist(),
                        'score': score,
                        'category_id': int(det[6]),
                    }
                    img_ids.append(image_id)
                    results.append(coco_det)

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.log_freq == 0:
                print(
                    'Test: [{0:>4d}/{1}]  '
                    'Time: {batch_time.val:.3f}s ({batch_time.avg:.3f}s, {rate_avg:>7.2f}/s)  '
                    .format(
                        i,
                        len(loader),
                        batch_time=batch_time,
                        rate_avg=input.size(0) / batch_time.avg,
                    ))

    json.dump(results, open(args.results, 'w'), indent=4)
    if 'test' not in args.anno:
        coco_results = dataset.coco.loadRes(args.results)
        coco_eval = COCOeval(dataset.coco, coco_results, 'bbox')
        coco_eval.params.imgIds = img_ids  # score only ids we've used
        coco_eval.evaluate()
        coco_eval.accumulate()
        coco_eval.summarize()

    return results
def eval_mscoco_with_segm(cocoGT, cocoPred):
    # running evaluation
    cocoEval = COCOeval(cocoGT, cocoPred, "keypoints")
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()
Beispiel #55
0
def test(
        data,
        weights=None,
        batch_size=32,
        imgsz=640,
        conf_thres=0.001,
        iou_thres=0.6,  # for NMS
        save_json=False,
        single_cls=False,
        augment=False,
        verbose=False,
        model=None,
        dataloader=None,
        save_dir=Path(''),  # for saving images
        save_txt=False,  # for auto-labelling
        save_hybrid=False,  # for hybrid auto-labelling
        save_conf=False,  # save auto-label confidences
        plots=True,
        log_imgs=0,  # number of logged images
        compute_loss=None):
    # Initialize/load model and set device
    training = model is not None
    if training:  # called by train.py
        device = next(model.parameters()).device  # get model device

    else:  # called directly
        set_logging()
        device = select_device(opt.device, batch_size=batch_size)

        # Directories
        save_dir = Path(
            increment_path(Path(opt.project) / opt.name,
                           exist_ok=opt.exist_ok))  # increment run
        (save_dir / 'labels' if save_txt else save_dir).mkdir(
            parents=True, exist_ok=True)  # make dir

        # Load model
        model = attempt_load(weights, map_location=device)  # load FP32 model
        imgsz = check_img_size(imgsz, s=model.stride.max())  # check img_size

        # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99
        # if device.type != 'cpu' and torch.cuda.device_count() > 1:
        #     model = nn.DataParallel(model)

    # Half
    half = device.type != 'cpu'  # half precision only supported on CUDA
    if half:
        model.half()

    # Configure
    model.eval()
    is_coco = data.endswith('coco.yaml')  # is COCO dataset
    with open(data) as f:
        data = yaml.load(f, Loader=yaml.SafeLoader)  # model dict
    check_dataset(data)  # check
    nc = 1 if single_cls else int(data['nc'])  # number of classes
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    niou = iouv.numel()

    # Logging
    log_imgs, wandb = min(log_imgs, 100), None  # ceil
    try:
        import wandb  # Weights & Biases
    except ImportError:
        log_imgs = 0

    # Dataloader
    if not training:
        if device.type != 'cpu':
            model(
                torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(
                    next(model.parameters())))  # run once
        path = data['test'] if opt.task == 'test' else data[
            'val']  # path to val/test images
        dataloader = create_dataloader(
            path,
            imgsz,
            batch_size,
            model.stride.max(),
            opt,
            pad=0.5,
            rect=True,
            prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0]

    seen = 0
    confusion_matrix = ConfusionMatrix(nc=nc)
    names = {
        k: v
        for k, v in enumerate(
            model.names if hasattr(model, 'names') else model.module.names)
    }
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', '[email protected]:.95')
    p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class, wandb_images = [], [], [], [], []
    for batch_i, (img, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        img = img.to(device, non_blocking=True)
        img = img.half() if half else img.float()  # uint8 to fp16/32
        img /= 255.0  # 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = img.shape  # batch size, channels, height, width

        with torch.no_grad():
            # Run model
            t = time_synchronized()
            inf_out, train_out = model(
                img, augment=augment)  # inference and training outputs
            t0 += time_synchronized() - t

            # Compute loss
            if compute_loss:
                loss += compute_loss([x.float() for x in train_out],
                                     targets)[1][:3]  # box, obj, cls

            # Run NMS
            targets[:, 2:] *= torch.Tensor([width, height, width,
                                            height]).to(device)  # to pixels
            lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)
                  ] if save_hybrid else []  # for autolabelling
            t = time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres,
                                         labels=lb)
            t1 += time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            path = Path(paths[si])
            seen += 1

            if len(pred) == 0:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Predictions
            predn = pred.clone()
            scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0],
                         shapes[si][1])  # native-space pred

            # Append to text file
            if save_txt:
                gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0
                                                  ]]  # normalization gain whwh
                for *xyxy, conf, cls in predn.tolist():
                    xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) /
                            gn).view(-1).tolist()  # normalized xywh
                    line = (cls, *xywh,
                            conf) if save_conf else (cls,
                                                     *xywh)  # label format
                    with open(save_dir / 'labels' / (path.stem + '.txt'),
                              'a') as f:
                        f.write(('%g ' * len(line)).rstrip() % line + '\n')

            # W&B logging
            if plots and len(wandb_images) < log_imgs:
                box_data = [{
                    "position": {
                        "minX": xyxy[0],
                        "minY": xyxy[1],
                        "maxX": xyxy[2],
                        "maxY": xyxy[3]
                    },
                    "class_id": int(cls),
                    "box_caption": "%s %.3f" % (names[cls], conf),
                    "scores": {
                        "class_score": conf
                    },
                    "domain": "pixel"
                } for *xyxy, conf, cls in pred.tolist()]
                boxes = {
                    "predictions": {
                        "box_data": box_data,
                        "class_labels": names
                    }
                }  # inference-space
                wandb_images.append(
                    wandb.Image(img[si], boxes=boxes, caption=path.name))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(
                    path.stem) if path.stem.isnumeric() else path.stem
                box = xyxy2xywh(predn[:, :4])  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id':
                        image_id,
                        'category_id':
                        coco91class[int(p[5])] if is_coco else int(p[5]),
                        'bbox': [round(x, 3) for x in b],
                        'score':
                        round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5])
                scale_coords(img[si].shape[1:], tbox, shapes[si][0],
                             shapes[si][1])  # native-space labels
                if plots:
                    confusion_matrix.process_batch(
                        predn, torch.cat((labels[:, 0:1], tbox), 1))

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(
                        -1)  # prediction indices
                    pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(
                        -1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(predn[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        detected_set = set()
                        for j in (ious > iouv[0]).nonzero(as_tuple=False):
                            d = ti[i[j]]  # detected target
                            if d.item() not in detected_set:
                                detected_set.add(d.item())
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

        # Plot images
        if plots and batch_i < 3:
            f = save_dir / f'test_batch{batch_i}_labels.jpg'  # labels
            Thread(target=plot_images,
                   args=(img, targets, paths, f, names),
                   daemon=True).start()
            f = save_dir / f'test_batch{batch_i}_pred.jpg'  # predictions
            Thread(target=plot_images,
                   args=(img, output_to_target(output), paths, f, names),
                   daemon=True).start()

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats) and stats[0].any():
        p, r, ap, f1, ap_class = ap_per_class(*stats,
                                              plot=plots,
                                              save_dir=save_dir,
                                              names=names)
        ap50, ap = ap[:, 0], ap.mean(1)  # [email protected], [email protected]:0.95
        mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%12.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map50, map))

    # Print results per class
    if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i]))

    # Print speeds
    t = tuple(x / seen * 1E3
              for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size)  # tuple
    if not training:
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    # Plots
    if plots:
        confusion_matrix.plot(save_dir=save_dir, names=list(names.values()))
        if wandb and wandb.run:
            val_batches = [
                wandb.Image(str(f), caption=f.name)
                for f in sorted(save_dir.glob('test*.jpg'))
            ]
            wandb.log({
                "Images": wandb_images,
                "Validation": val_batches
            },
                      commit=False)

    # Save JSON
    if save_json and len(jdict):
        w = Path(weights[0] if isinstance(weights, list) else weights
                 ).stem if weights is not None else ''  # weights
        anno_json = '../coco/annotations/instances_val2017.json'  # annotations json
        pred_json = str(save_dir / f"{w}_predictions.json")  # predictions json
        print('\nEvaluating pycocotools mAP... saving %s...' % pred_json)
        with open(pred_json, 'w') as f:
            json.dump(jdict, f)

        try:  # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval

            anno = COCO(anno_json)  # init annotations api
            pred = anno.loadRes(pred_json)  # init predictions api
            eval = COCOeval(anno, pred, 'bbox')
            if is_coco:
                eval.params.imgIds = [
                    int(Path(x).stem) for x in dataloader.dataset.img_files
                ]  # image IDs to evaluate
            eval.evaluate()
            eval.accumulate()
            eval.summarize()
            map, map50 = eval.stats[:
                                    2]  # update results ([email protected]:0.95, [email protected])
        except Exception as e:
            print(f'pycocotools unable to run: {e}')

    # Return results
    if not training:
        s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else ''
        print(f"Results saved to {save_dir}{s}")
    model.float()  # for training
    maps = np.zeros(nc) + map
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map50, map,
            *(loss.cpu() / len(dataloader)).tolist()), maps, t
Beispiel #56
0
def test(
        cfg,
        data,
        weights=None,
        batch_size=16,
        img_size=416,
        conf_thres=0.001,
        iou_thres=0.6,  # for nms
        save_json=False,
        single_cls=False,
        augment=False,
        model=None,
        dataloader=None):
    # Initialize/load model and set device
    if model is None:
        device = torch_utils.select_device(opt.device, batch_size=batch_size)
        verbose = opt.task == 'test'

        # Remove previous
        for f in glob.glob('test_batch*.jpg'):
            os.remove(f)

        # Initialize model
        model = Darknet(cfg, img_size)

        # Load weights
        attempt_download(weights)
        if weights.endswith('.pt'):  # pytorch format
            model.load_state_dict(
                torch.load(weights, map_location=device)['model'], False)
        else:  # darknet format
            load_darknet_weights(model, weights)

        # Fuse
        model.fuse()
        model.to(device)
        model.half()

        if device.type != 'cpu' and torch.cuda.device_count() > 1:
            model = nn.DataParallel(model)
    else:  # called by train.py
        device = next(model.parameters()).device  # get model device
        verbose = False

    # Configure run
    data = parse_data_cfg(data)
    nc = 1 if single_cls else int(data['classes'])  # number of classes
    path = data['valid']  # path to test images
    names = load_classes(data['names'])  # class names
    iouv = torch.linspace(0.5, 0.95,
                          10).to(device)  # iou vector for [email protected]:0.95
    iouv = iouv[0].view(1)  # comment for [email protected]:0.95
    niou = iouv.numel()

    # Dataloader
    if dataloader is None:
        dataset = LoadImagesAndLabels(path,
                                      img_size,
                                      batch_size,
                                      rect=True,
                                      single_cls=opt.single_cls)
        batch_size = min(batch_size, len(dataset))
        dataloader = DataLoader(dataset,
                                batch_size=batch_size,
                                num_workers=min([
                                    os.cpu_count(),
                                    batch_size if batch_size > 1 else 0, 8
                                ]),
                                pin_memory=True,
                                collate_fn=dataset.collate_fn)

    seen = 0
    model.eval()
    _ = model(torch.zeros(
        (1, 3, img_size, img_size),
        device=device).half()) if device.type != 'cpu' else None  # run once
    coco91class = coco80_to_coco91_class()
    s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R',
                                 '[email protected]', 'F1')
    p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0.
    loss = torch.zeros(3, device=device)
    jdict, stats, ap, ap_class = [], [], [], []
    for batch_i, (imgs, targets, paths,
                  shapes) in enumerate(tqdm(dataloader, desc=s)):
        imgs = imgs.to(
            device).half() / 255.0  # uint8 to float32, 0 - 255 to 0.0 - 1.0
        targets = targets.to(device)
        nb, _, height, width = imgs.shape  # batch size, channels, height, width
        whwh = torch.Tensor([width, height, width, height]).to(device)

        # Plot images with bounding boxes
        f = 'test_batch%g.jpg' % batch_i  # filename
        #if batch_i < 1 and not os.path.exists(f):
        #    plot_images(imgs=imgs, targets=targets, paths=paths, fname=f)

        # Disable gradients
        with torch.no_grad():
            # Run model
            t = torch_utils.time_synchronized()
            inf_out, train_out = model(
                imgs, augment=augment)  # inference and training outputs
            t0 += torch_utils.time_synchronized() - t

            # Compute loss
            if hasattr(model, 'hyp'):  # if model has loss hyperparameters
                loss += compute_loss(train_out, targets,
                                     model)[1][:3]  # GIoU, obj, cls

            # Run NMS
            t = torch_utils.time_synchronized()
            output = non_max_suppression(inf_out,
                                         conf_thres=conf_thres,
                                         iou_thres=iou_thres)  # nms
            t1 += torch_utils.time_synchronized() - t

        # Statistics per image
        for si, pred in enumerate(output):
            labels = targets[targets[:, 0] == si, 1:]
            nl = len(labels)
            tcls = labels[:, 0].tolist() if nl else []  # target class
            seen += 1

            if pred is None:
                if nl:
                    stats.append((torch.zeros(0, niou, dtype=torch.bool),
                                  torch.Tensor(), torch.Tensor(), tcls))
                continue

            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, (height, width))

            # Append to pycocotools JSON dictionary
            if save_json:
                # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
                image_id = int(Path(paths[si]).stem.split('_')[-1])
                box = pred[:, :4].clone()  # xyxy
                scale_coords(imgs[si].shape[1:], box, shapes[si][0],
                             shapes[si][1])  # to original shape
                box = xyxy2xywh(box)  # xywh
                box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
                for p, b in zip(pred.tolist(), box.tolist()):
                    jdict.append({
                        'image_id': image_id,
                        'category_id': coco91class[int(p[5])],
                        'bbox': [round(x, 3) for x in b],
                        'score': round(p[4], 5)
                    })

            # Assign all predictions as incorrect
            correct = torch.zeros(pred.shape[0],
                                  niou,
                                  dtype=torch.bool,
                                  device=device)
            if nl:
                detected = []  # target indices
                tcls_tensor = labels[:, 0]

                # target boxes
                tbox = xywh2xyxy(labels[:, 1:5]) * whwh

                # Per target class
                for cls in torch.unique(tcls_tensor):
                    ti = (cls == tcls_tensor).nonzero().view(
                        -1)  # prediction indices
                    pi = (cls == pred[:,
                                      5]).nonzero().view(-1)  # target indices

                    # Search for detections
                    if pi.shape[0]:
                        # Prediction to target ious
                        ious, i = box_iou(pred[pi, :4], tbox[ti]).max(
                            1)  # best ious, indices

                        # Append detections
                        for j in (ious > iouv[0]).nonzero():
                            d = ti[i[j]]  # detected target
                            if d not in detected:
                                detected.append(d)
                                correct[
                                    pi[j]] = ious[j] > iouv  # iou_thres is 1xn
                                if len(
                                        detected
                                ) == nl:  # all targets already located in image
                                    break

            # Append statistics (correct, conf, pcls, tcls)
            stats.append(
                (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls))

    # Compute statistics
    stats = [np.concatenate(x, 0) for x in zip(*stats)]  # to numpy
    if len(stats):
        p, r, ap, f1, ap_class = ap_per_class(*stats)
        if niou > 1:
            p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean(
                1), ap[:, 0]  # [P, R, [email protected]:0.95, [email protected]]
        mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean()
        nt = np.bincount(stats[3].astype(np.int64),
                         minlength=nc)  # number of targets per class
    else:
        nt = torch.zeros(1)

    # Print results
    pf = '%20s' + '%10.3g' * 6  # print format
    print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1))

    # Print results per class
    if verbose and nc > 1 and len(stats):
        for i, c in enumerate(ap_class):
            print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i]))

    # Print speeds
    if verbose or save_json:
        t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (
            img_size, img_size, batch_size)  # tuple
        print(
            'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g'
            % t)

    maps = np.zeros(nc) + map
    # Save JSON
    if save_json and map and len(jdict):
        print('\nCOCO mAP with pycocotools...')
        imgIds = [
            int(Path(x).stem.split('_')[-1])
            for x in dataloader.dataset.img_files
        ]
        with open('results.json', 'w') as file:
            json.dump(jdict, file)

        try:
            from pycocotools.coco import COCO
            from pycocotools.cocoeval import COCOeval
        except:
            print(
                'WARNING: missing pycocotools package, can not compute official COCO mAP. See requirements.txt.'
            )

        # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb
        cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json')
                      [0])  # initialize COCO ground truth api
        cocoDt = cocoGt.loadRes('results.json')  # initialize COCO pred api

        cocoEval = COCOeval(cocoGt, cocoDt, 'bbox')
        cocoEval.params.imgIds = imgIds  # [:32]  # only evaluate these images
        cocoEval.evaluate()
        cocoEval.accumulate()
        cocoEval.summarize()
        map, map50 = cocoEval.stats[:
                                    2]  # update results ([email protected]:0.95, [email protected])
        return (mp, mr, map50, map,
                *(loss.cpu() / len(dataloader)).tolist()), maps, t

    # Return results
    for i, c in enumerate(ap_class):
        maps[c] = ap[i]
    return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps
Beispiel #57
0
def train(args):
    """Trains a detection model based on a configuration

    :param args: argparse arguments specifying input configuration
        and output folder
    """

    # create data paths dict
    data_paths = dict()
    data_paths['train_images'] = args.train_images
    data_paths['train_labels'] = args.train_labels
    data_paths['test_images'] = args.test_images
    data_paths['test_labels'] = args.test_labels

    batch_size = args.batch_size
    minibatch_size = args.minibatch_size
    sampler = args.sampler
    lr = args.lr
    epochs = args.epochs
    no_classes = args.classes
    class_weights = args.weights
    input_size = args.input_size
    anchors = args.anchors
    output = args.output
    transform_norm_parameters = args.transform_norm_parameters

    logger = pl_loggers.TensorBoardLogger(
        '{}/custom_ssd_ckpt/logs'.format(output))

    # add background class
    no_classes = no_classes + 1

    # initialize model
    model, bbox_encoder = create_detection_model(anchors,
                                                 input_size=input_size,
                                                 no_classes=no_classes)

    loss = ODLoss(minibatch_size, sampler, class_weights)

    # initialize detection model
    train_model = DetectionModel(model, input_size, loss, lr, epochs)

    # create checkpoint-creation callback
    checkpoint_callback = ModelCheckpoint(monitor='train_loss',
                                          save_top_k=3,
                                          save_last=True,
                                          mode='min')

    # initialize trainer
    trainer = pl.Trainer(logger=logger,
                         max_epochs=epochs,
                         gpus=1,
                         num_sanity_val_steps=0,
                         weights_save_path='{}/custom_ssd_ckpt'.format(output),
                         weights_summary='full',
                         callbacks=[checkpoint_callback])

    # initialize data module
    data_module = DataModule(batch_size, input_size, data_paths, no_classes,
                             bbox_encoder, transform_norm_parameters)

    # train model
    trainer.fit(train_model, data_module)

    # test model
    trainer.test(test_dataloaders=data_module.test_dataloader())

    version_path = '{}/custom_ssd_ckpt/default/version_{}'.format(
        output, trainer.logger.version)

    print('Version is: {}'.format(version_path))

    # save all metrics in json
    preds = train_model.get_test_preds()

    io_ops.save_dict(preds, os.path.join(version_path, 'test_preds.json'))

    #Ground truth data for coco
    cocoGt = COCO(data_paths['test_labels'])
    #Detection data for coco
    cocoDt = cocoGt.loadRes(os.path.join(version_path, 'test_preds.json'))

    annType = 'bbox'

    imgIds = sorted(cocoGt.getImgIds())

    # coco evaluator
    cocoEval = COCOeval(cocoGt, cocoDt, annType)
    cocoEval.params.imgIds = imgIds
    cocoEval.evaluate()
    cocoEval.accumulate()
    cocoEval.summarize()

    train_model.log('mAP IoU=0.50:0.95', round(cocoEval.stats[0] * 1, 2))
    train_model.log('mAP IoU=0.50', round(cocoEval.stats[1] * 1, 2))