def test(test_model, logger): eval_gt = COCO(cfg.gt_path) import json with open(cfg.det_path, 'r') as f: dets = json.load(f) test_subset = False if test_subset: eval_gt.imgs = dict(list(eval_gt.imgs.items())[:100]) anns = dict() for i in eval_gt.imgs: for j in eval_gt.getAnnIds(i): anns[j] = eval_gt.anns[j] eval_gt.anns = anns dets = [i for i in dets if i['image_id'] in eval_gt.imgs] dets = [i for i in dets if i['category_id'] == 1] dets.sort(key=lambda x: (x['image_id'], x['score']), reverse=True) for i in dets: i['imgpath'] = '/home/dx/data/coco/val2014/COCO_val2014_000000%06d.jpg' % i['image_id'] img_num = len(np.unique([i['image_id'] for i in dets])) use_gtboxes = False if use_gtboxes: d = COCOJoints() coco_train_data, coco_test_data = d.load_data() coco_test_data.sort(key=lambda x: x['imgid']) for i in coco_test_data: i['image_id'] = i['imgid'] i['score'] = 1. dets = coco_test_data from tfflat.mp_utils import MultiProc img_start = 0 ranges = [0] images_per_gpu = int(img_num / len(args.gpu_ids.split(','))) + 1 for run_img in range(img_num): img_end = img_start + 1 while img_end < len(dets) and dets[img_end]['image_id'] == dets[img_start]['image_id']: img_end += 1 if (run_img + 1) % images_per_gpu == 0 or (run_img + 1) == img_num: ranges.append(img_end) img_start = img_end def func(id): cfg.set_args(args.gpu_ids.split(',')[id]) tester = Tester(Network(), cfg) tester.load_weights(test_model) range = [ranges[id], ranges[id + 1]] return test_net(tester, logger, dets, range) MultiGPUFunc = MultiProc(len(args.gpu_ids.split(',')), func) all_res, dump_results = MultiGPUFunc.work() # evaluation result_path = osp.join(cfg.output_dir, 'results.json') with open(result_path, 'w') as f: json.dump(dump_results, f) eval_dt = eval_gt.loadRes(result_path) cocoEval = COCOeval(eval_gt, eval_dt, iouType='keypoints') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize()
def _init_coco(self): coco = COCO() # self.anns, self.cats, self.imgs self.dataset, self.imgToAnns, self.catToImgs # generate each member respectively det_anno_list = OrderedDict() images = OrderedDict() img2anns = OrderedDict() cat2imgs = defaultdict(list) for each_anno in self.vg_annotations: img_id = each_anno['id'] images[img_id] = { 'file_name': each_anno['path'], 'height': each_anno['height'], 'width': each_anno['width'], 'id': img_id } # each image contains many annotation # add sub id in single image obj_list = [] for each_object in each_anno['objects']: xywh = xyxy2xywh(each_object['box']) obj_list.append({ 'category_id': self.obj_cls_ind_q[each_object['class']], 'bbox': xywh, 'image_id': img_id, 'area': xywh[2] * xywh[3], 'iscrowd': 0, 'segmentation': [], 'id': -1 # left for last step accumulate }) # accumulate the sub_id to overall id last_seg_idx = len(det_anno_list.keys()) for indx, each in enumerate(obj_list): anno_id = last_seg_idx + indx obj_list[indx]['id'] = anno_id det_anno_list[anno_id] = each cat2imgs[each['category_id']].append(img_id) img2anns[img_id] = obj_list coco.anns = det_anno_list coco.imgs = images coco.imgToAnns = img2anns coco.catToImgs = cat2imgs # cats cats = OrderedDict() for id_, each_cat in enumerate(self.obj_cls_list): if id_ == 0: continue cats[id_] = { 'id': id_, 'name': each_cat, } coco.cats = cats # other info coco.dataset['licenses'] = '' coco.dataset['annotations'] = list(coco.anns.values()) coco.dataset['info'] = {} coco.dataset['images'] = list(coco.imgs.values()) coco.dataset['categories'] = list(coco.cats.values()) self.coco = coco self.ids = list(self.coco.imgs.keys())