def __init__(self, mode): print('Creating: {}'.format(cfg.dataset)) self.name = cfg.data_dir self.mode = mode data_path = DatasetPath(mode, self.name) data_dir = data_path.get_data_dir() file_list = data_path.get_file_list() self.image_dir = data_dir self.gt_dir = file_list
def __init__(self, mode): print('Creating: {}'.format(cfg.dataset)) self.name = cfg.dataset self.is_train = mode == 'train' data_path = DatasetPath(mode) data_dir = data_path.get_data_dir() file_list = data_path.get_file_list() self.image_directory = data_dir self.COCO = COCO(file_list) # Set up dataset classes category_ids = self.COCO.getCatIds() categories = [c['name'] for c in self.COCO.loadCats(category_ids)] self.category_to_id_map = dict(zip(categories, category_ids)) self.classes = ['__background__'] + categories self.num_classes = len(self.classes) self.json_category_id_to_contiguous_id = { v: i + 1 for i, v in enumerate(self.COCO.getCatIds()) } self.contiguous_category_id_to_json_id = { v: k for k, v in self.json_category_id_to_contiguous_id.items() }
def infer(): try: from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval, Params data_path = DatasetPath('val') test_list = data_path.get_file_list() coco_api = COCO(test_list) cid = coco_api.getCatIds() cat_id_to_num_id_map = { v: i + 1 for i, v in enumerate(coco_api.getCatIds()) } category_ids = coco_api.getCatIds() labels_map = { cat_id_to_num_id_map[item['id']]: item['name'] for item in coco_api.loadCats(category_ids) } labels_map[0] = 'background' except: print("The COCO dataset or COCO API is not exist, use the default " "mapping of class index and real category name on COCO17.") assert cfg.dataset == 'coco2017' labels_map = coco17_labels() image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size] class_nums = cfg.class_num model = model_builder.RCNN( add_conv_body_func=resnet.add_ResNet50_conv4_body, add_roi_box_head_func=resnet.add_ResNet_roi_conv5_head, use_pyreader=False, mode='infer') model.build_model(image_shape) pred_boxes = model.eval_bbox_out() if cfg.MASK_ON: masks = model.eval_mask_out() place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) # yapf: disable if not os.path.exists(cfg.pretrained_model): raise ValueError("Model path [%s] does not exist." % (cfg.pretrained_model)) def if_exist(var): return os.path.exists(os.path.join(cfg.pretrained_model, var.name)) fluid.io.load_vars(exe, cfg.pretrained_model, predicate=if_exist) # yapf: enable infer_reader = reader.infer(cfg.image_path) feeder = fluid.DataFeeder(place=place, feed_list=model.feeds()) dts_res = [] segms_res = [] if cfg.MASK_ON: fetch_list = [pred_boxes, masks] else: fetch_list = [pred_boxes] data = next(infer_reader()) im_info = [data[0][1]] result = exe.run(fetch_list=[v.name for v in fetch_list], feed=feeder.feed(data), return_numpy=False) pred_boxes_v = result[0] if cfg.MASK_ON: masks_v = result[1] new_lod = pred_boxes_v.lod() nmsed_out = pred_boxes_v image = None if cfg.MASK_ON: segms_out = segm_results(nmsed_out, masks_v, im_info) image = draw_mask_on_image(cfg.image_path, segms_out, cfg.draw_threshold) draw_bounding_box_on_image(cfg.image_path, nmsed_out, cfg.draw_threshold, labels_map, image)
def eval(): data_path = DatasetPath('val') test_list = data_path.get_file_list() image_shape = [3, cfg.TEST.max_size, cfg.TEST.max_size] class_nums = cfg.class_num devices = os.getenv("CUDA_VISIBLE_DEVICES") or "" devices_num = len(devices.split(",")) total_batch_size = devices_num * cfg.TRAIN.im_per_batch cocoGt = COCO(test_list) num_id_to_cat_id_map = {i + 1: v for i, v in enumerate(cocoGt.getCatIds())} category_ids = cocoGt.getCatIds() label_list = { item['id']: item['name'] for item in cocoGt.loadCats(category_ids) } label_list[0] = ['background'] model = model_builder.RCNN( add_conv_body_func=resnet.add_ResNet50_conv4_body, add_roi_box_head_func=resnet.add_ResNet_roi_conv5_head, use_pyreader=False, mode='val') model.build_model(image_shape) pred_boxes = model.eval_bbox_out() if cfg.MASK_ON: masks = model.eval_mask_out() place = fluid.CUDAPlace(0) if cfg.use_gpu else fluid.CPUPlace() exe = fluid.Executor(place) exe.run(fluid.default_startup_program()) # yapf: disable if cfg.pretrained_model: def if_exist(var): return os.path.exists(os.path.join(cfg.pretrained_model, var.name)) fluid.io.load_vars(exe, cfg.pretrained_model, predicate=if_exist) # yapf: enable test_reader = reader.test(total_batch_size) feeder = fluid.DataFeeder(place=place, feed_list=model.feeds()) dts_res = [] segms_res = [] if cfg.MASK_ON: fetch_list = [pred_boxes, masks] else: fetch_list = [pred_boxes] eval_start = time.time() for batch_id, batch_data in enumerate(test_reader()): start = time.time() im_info = [] for data in batch_data: im_info.append(data[1]) results = exe.run(fetch_list=[v.name for v in fetch_list], feed=feeder.feed(batch_data), return_numpy=False) pred_boxes_v = results[0] if cfg.MASK_ON: masks_v = results[1] new_lod = pred_boxes_v.lod() nmsed_out = pred_boxes_v dts_res += get_dt_res(total_batch_size, new_lod[0], nmsed_out, batch_data, num_id_to_cat_id_map) if cfg.MASK_ON and np.array(masks_v).shape != (1, 1): segms_out = segm_results(nmsed_out, masks_v, im_info) segms_res += get_segms_res(total_batch_size, new_lod[0], segms_out, batch_data, num_id_to_cat_id_map) end = time.time() print('batch id: {}, time: {}'.format(batch_id, end - start)) eval_end = time.time() total_time = eval_end - eval_start print('average time of eval is: {}'.format(total_time / (batch_id + 1))) assert len(dts_res) > 0, "The number of valid bbox detected is zero.\n \ Please use reasonable model and check input data." if cfg.MASK_ON: assert len( segms_res) > 0, "The number of valid mask detected is zero.\n \ Please use reasonable model and check input data." with io.open("detection_bbox_result.json", 'w') as outfile: encode_func = unicode if six.PY2 else str outfile.write(encode_func(json.dumps(dts_res))) print("start evaluate bbox using coco api") cocoDt = cocoGt.loadRes("detection_bbox_result.json") cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if cfg.MASK_ON: with io.open("detection_segms_result.json", 'w') as outfile: encode_func = unicode if six.PY2 else str outfile.write(encode_func(json.dumps(segms_res))) print("start evaluate mask using coco api") cocoDt = cocoGt.loadRes("detection_segms_result.json") cocoEval = COCOeval(cocoGt, cocoDt, 'segm') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize()
def eval(): devices_num = 1 total_batch_size = 1 #devices_num * cfg.TRAIN.im_per_batch data_path = DatasetPath('val') test_list = data_path.get_file_list() cocoGt = COCO(test_list) num_id_to_cat_id_map = {i + 1: v for i, v in enumerate(cocoGt.getCatIds())} use_random = True if cfg.enable_ce: use_random = False if cfg.parallel: strategy = fluid.dygraph.parallel.prepare_context() print("Execute Parallel Mode!!!") # Model model = RCNN("faster_rcnn", cfg=cfg, mode='eval', use_random=use_random) if cfg.parallel: model = fluid.dygraph.parallel.DataParallel(model, strategy) if False: #cfg.pretrained_model: model_state = model.state_dict() ckpt_file = open(cfg.pretrained_model, 'r') w_dict = pickle.load(ckpt_file) for k, v in w_dict.items(): for wk in model_state.keys(): res = re.search(k, wk) if res is not None: print("load: ", k, v.shape, np.mean(np.abs(v)), " --> ", wk, model_state[wk].shape) model_state[wk] = v break model.set_dict(model_state) elif cfg.resume_model: para_state_dict, opti_state_dict = fluid.load_dygraph("model_final") #print(para_state_dict.keys()) #ckpt_file = open("dyg_mask_rcnn.pkl", "w") new_dict = {} for k, v in para_state_dict.items(): if "conv2d" in k: new_k = k.split('.')[1] elif 'linear' in k: new_k = k.split('.')[1] elif 'conv2dtranspose' in k: new_k = k.split('.')[1] else: new_k = k print("save weight from %s to %s" % (k, new_k)) new_dict[new_k] = v.numpy() #print(new_dict.keys()) #pickle.dump(new_dict, ckpt_file) np.savez("dyg_mask_rcnn.npz", **new_dict) model.set_dict(para_state_dict) test_reader = reader.test(batch_size=total_batch_size) if cfg.parallel: train_reader = fluid.contrib.reader.distributed_batch_reader( train_reader) eval_start = time.time() dts_res = [] segms_res = [] for iter_id, data in enumerate(test_reader()): start = time.time() image_data = np.array([x[0] for x in data]).astype('float32') image_info_data = np.array([x[1] for x in data]).astype('float32') image_id_data = np.array([x[2] for x in data]).astype('int32') if cfg.enable_ce: print("image_data: ", np.abs(image_data).mean(), image_data.shape) print("im_info_dta: ", np.abs(image_info_data).mean(), image_info_data.shape, image_info_data) print("img_id: ", image_id_data, image_id_data.shape) # forward outputs = model(image_data, image_info_data, image_id_data) pred_boxes_v = outputs[1].numpy() if cfg.MASK_ON: masks_v = outputs[2].numpy() new_lod = list(outputs[0].numpy()) #new_lod = [[0, pred_boxes_v.shape[0]]] #pred_boxes_v.lod() nmsed_out = pred_boxes_v dts_res += get_dt_res(total_batch_size, new_lod, nmsed_out, data, num_id_to_cat_id_map) if cfg.MASK_ON and np.array(masks_v).shape != (1, 1): segms_out = segm_results(nmsed_out, masks_v, image_info_data) segms_res += get_segms_res(total_batch_size, new_lod, segms_out, data, num_id_to_cat_id_map) end = time.time() print('batch id: {}, time: {}'.format(iter_id, end - start)) eval_end = time.time() total_time = eval_end - eval_start print('average time of eval is: {}'.format(total_time / (iter_id + 1))) assert len(dts_res) > 0, "The number of valid bbox detected is zero.\n \ Please use reasonable model and check input data." if cfg.MASK_ON: assert len( segms_res) > 0, "The number of valid mask detected is zero.\n \ Please use reasonable model and check input data." with io.open("detection_bbox_result.json", 'w') as outfile: encode_func = unicode if six.PY2 else str outfile.write(encode_func(json.dumps(dts_res))) print("start evaluate bbox using coco api") cocoDt = cocoGt.loadRes("detection_bbox_result.json") cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() if cfg.MASK_ON: with io.open("detection_segms_result.json", 'w') as outfile: encode_func = unicode if six.PY2 else str outfile.write(encode_func(json.dumps(segms_res))) print("start evaluate mask using coco api") cocoDt = cocoGt.loadRes("detection_segms_result.json") cocoEval = COCOeval(cocoGt, cocoDt, 'segm') cocoEval.evaluate() cocoEval.accumulate()