def worker(current_network, weight_file, dataset_dir): cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if dist.get_rank() == 0: test_loader = tqdm(test_loader) result_list = [] for data in test_loader: image, im_info = DetEvaluator.process_inputs( data[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict(image=mge.tensor(image), im_info=mge.tensor(im_info)) result = { "pred_boxes": pred_res, "image_id": int(data[1][2][0].split(".")[0].split("_")[-1]), } result_list.append(result) return result_list
def main(): parser = make_parser() args = parser.parse_args() current_network = import_from_file(args.file) cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(args.weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) ori_img = cv2.imread(args.image) image, im_info = DetEvaluator.process_inputs( ori_img.copy(), model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict(image=mge.tensor(image), im_info=mge.tensor(im_info)) res_img = DetEvaluator.vis_det( ori_img, pred_res, is_show_label=True, classes=data_mapper[cfg.test_dataset["name"]].class_names, ) cv2.imwrite("results.jpg", res_img)
def worker( current_network, model_file, data_dir, worker_id, total_worker, result_queue, ): """ :param net_file: network description file :param model_file: file of dump weights :param data_dir: the dataset directory :param worker_id: the index of the worker :param total_worker: number of gpu for evaluation :param result_queue: processing queue """ os.environ["CUDA_VISIBLE_DEVICES"] = str(worker_id) @jit.trace(symbolic=True) def val_func(): pred = model(model.inputs) return pred cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg, batch_size=1) model.eval() evaluator = DetEvaluator(model) state_dict = mge.load(model_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) loader = build_dataloader(worker_id, total_worker, data_dir, model.cfg) for data_dict in loader: data, im_info = DetEvaluator.process_inputs( data_dict[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) model.inputs["im_info"].set_value(im_info) model.inputs["image"].set_value(data.astype(np.float32)) pred_res = evaluator.predict(val_func) result_queue.put_nowait({ "det_res": pred_res, "image_id": int(data_dict[1][2][0].split(".")[0].split("_")[-1]), })
def worker(current_network, weight_file, dataset_dir, result_list, master_ip=None, port=None, world_size=1, rank=0): if world_size > 1: dist.init_process_group( master_ip=master_ip, port=port, world_size=world_size, rank=rank, device=rank, ) cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg) model.eval() state_dict = mge.load(weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) test_loader = build_dataloader(dataset_dir, model.cfg) if dist.get_world_size() == 1: test_loader = tqdm(test_loader) for data in test_loader: image, im_info = DetEvaluator.process_inputs( data[0][0], model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) pred_res = evaluator.predict(image=mge.tensor(image), im_info=mge.tensor(im_info)) result = { "det_res": pred_res, "image_id": int(data[1][2][0].split(".")[0].split("_")[-1]), } if dist.get_world_size() > 1: result_list.put_nowait(result) else: result_list.append(result)
def main(): parser = make_parser() args = parser.parse_args() logger.info("Load Model : %s completed", args.weight_file) @jit.trace(symbolic=True) def val_func(): pred = model(model.inputs) return pred sys.path.insert(0, os.path.dirname(args.file)) current_network = importlib.import_module( os.path.basename(args.file).split(".")[0]) cfg = current_network.Cfg() cfg.backbone_pretrained = False model = current_network.Net(cfg, batch_size=1) model.eval() state_dict = mge.load(args.weight_file) if "state_dict" in state_dict: state_dict = state_dict["state_dict"] model.load_state_dict(state_dict) evaluator = DetEvaluator(model) ori_img = cv2.imread(args.image) data, im_info = DetEvaluator.process_inputs( ori_img.copy(), model.cfg.test_image_short_size, model.cfg.test_image_max_size, ) model.inputs["im_info"].set_value(im_info) model.inputs["image"].set_value(data.astype(np.float32)) pred_res = evaluator.predict(val_func) res_img = DetEvaluator.vis_det( ori_img, pred_res, is_show_label=True, classes=COCO.class_names, ) cv2.imwrite("results.jpg", res_img)
def detect_persons(self, image): data, im_info = DetEvaluator.process_inputs( image.copy(), self.detector.cfg.test_image_short_size, self.detector.cfg.test_image_max_size, ) self.detector.inputs["im_info"].set_value(im_info) self.detector.inputs["image"].set_value(data.astype(np.float32)) evaluator = DetEvaluator(self.detector) det_res = evaluator.predict(self.det_func) persons = [] for d in det_res: cls_id = int(d[5] + 1) if cls_id == 1: bbox = d[:4] persons.append(bbox) return persons
def detect_persons(self, image): data, im_info = DetEvaluator.process_inputs( image.copy(), self.detector.cfg.test_image_short_size, self.detector.cfg.test_image_max_size, ) evaluator = DetEvaluator(self.detector) det_res = evaluator.predict(image=mge.tensor(data), im_info=mge.tensor(im_info)) persons = [] for d in det_res: cls_id = int(d[5] + 1) if cls_id == 1: bbox = d[:5] persons.append(bbox) persons = np.array(persons).reshape(-1, 5) keep = py_cpu_nms(persons, cfg.nms_thr) return persons[keep]