def evaluate(model, data_loader, device, mAP_list=None): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = MetricLogger(delimiter=" ") header = "Test: " coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(coco, iou_types) for image, targets in metric_logger.log_every(data_loader, 100, header): image = list(img.to(device) for img in image) targets = [{k: v.to(device) for k, v in t.items()} for t in targets] # 当使用CPU时,跳过GPU相关指令 if device != torch.device("cpu"): torch.cuda.synchronize(device) model_time = time.time() outputs = model(image) outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time res = { target["image_id"].item(): output for target, output in zip(targets, outputs) } evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() torch.set_num_threads(n_threads) print_txt = coco_evaluator.coco_eval[iou_types[0]].stats coco_mAP = print_txt[0] voc_mAP = print_txt[1] if isinstance(mAP_list, list): mAP_list.append(voc_mAP) return coco_evaluator
def evaluate(model, data_loader, device): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = 'Test:' coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(coco, iou_types) for images, targets in metric_logger.log_every(data_loader, 100, header): images = list(img.to(device) for img in images) torch.cuda.synchronize() model_time = time.time() outputs = model(images) outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time res = {target["image_id"].item(): output for target, output in zip(targets, outputs)} evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() torch.set_num_threads(n_threads) return coco_evaluator
def evaluate(model, data_loader, device, data_set=None, mAP_list=None): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = "Test: " if data_set is None: data_set = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(data_set, iou_types) for images, targets in metric_logger.log_every(data_loader, 100, header): images = torch.stack(images, dim=0) targets = [{k: v.to(device) for k, v in t.items()} for t in targets] images = images.to(device) # targets = {k: v.to(device) for k, v in targets.items()} if device != torch.device("cpu"): torch.cuda.synchronize(device) model_time = time.time() # list((bboxes_out, labels_out, scores_out), ...) results = model(images, targets) outputs = [] for index, (bboxes_out, labels_out, scores_out) in enumerate(results): # 将box的相对坐标信息(0-1)转为绝对值坐标(xmin, ymin, xmax, ymax) height_width = targets[index]["height_width"] # height_width = [300, 300] bboxes_out[:, [0, 2]] = bboxes_out[:, [0, 2]] * height_width[1] bboxes_out[:, [1, 3]] = bboxes_out[:, [1, 3]] * height_width[0] info = {"boxes": bboxes_out.to(cpu_device), "labels": labels_out.to(cpu_device), "scores": scores_out.to(cpu_device)} outputs.append(info) # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] model_time = time.time() - model_time res = dict() for index in range(len(outputs)): info = {targets[index]["image_id"].item(): outputs[index]} res.update(info) # res = {target["image_id"].item(): output for target, output in zip(targets, outputs)} evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() torch.set_num_threads(n_threads) print_txt = coco_evaluator.coco_eval[iou_types[0]].stats coco_mAP = print_txt[0] voc_mAP = print_txt[1] if isinstance(mAP_list, list): mAP_list.append(voc_mAP)
def main(parser_data): device = torch.device( parser_data.device if torch.cuda.is_available() else "cpu") print("Using {} device training.".format(device.type)) data_transform = {"val": transforms.Compose([transforms.ToTensor()])} # read class_indict label_json_path = './pascal_voc_classes.json' assert os.path.exists( label_json_path), "json file {} dose not exist.".format( label_json_path) json_file = open(label_json_path, 'r') class_dict = json.load(json_file) category_index = {v: k for k, v in class_dict.items()} VOC_root = parser_data.data_path # check voc root if os.path.exists(os.path.join(VOC_root, "VOCdevkit")) is False: raise FileNotFoundError( "VOCdevkit dose not in path:'{}'.".format(VOC_root)) # 注意这里的collate_fn是自定义的,因为读取的数据包括image和targets,不能直接使用默认的方法合成batch batch_size = parser_data.batch_size nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers print('Using %g dataloader workers' % nw) # load validation data set val_data_set = VOC2007DataSet(VOC_root, data_transform["val"], "val.txt") val_data_set_loader = torch.utils.data.DataLoader( val_data_set, batch_size=batch_size, shuffle=False, num_workers=nw, collate_fn=val_data_set.collate_fn) # create model num_classes equal background + 20 classes backbone = resnet50_fpn_backbone() model = FasterRCNN(backbone=backbone, num_classes=parser_data.num_classes + 1) # 载入你自己训练好的模型权重 weights_path = parser_data.weights assert os.path.exists(weights_path), "not found {} file.".format( weights_path) weights_dict = torch.load(weights_path, map_location=device) model.load_state_dict(weights_dict['model']) # print(model) model.to(device) # evaluate on the test dataset coco = get_coco_api_from_dataset(val_data_set) iou_types = ["bbox"] coco_evaluator = CocoEvaluator(coco, iou_types) cpu_device = torch.device("cpu") model.eval() with torch.no_grad(): for image, targets in tqdm(val_data_set_loader, desc="validation..."): # 将图片传入指定设备device image = list(img.to(device) for img in image) # inference outputs = model(image) outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs] res = { target["image_id"].item(): output for target, output in zip(targets, outputs) } coco_evaluator.update(res) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() coco_eval = coco_evaluator.coco_eval["bbox"] # calculate COCO info for all classes coco_stats, print_coco = summarize(coco_eval) # calculate voc info for every classes(IoU=0.5) voc_map_info_list = [] for i in range(len(category_index)): stats, _ = summarize(coco_eval, catId=i) voc_map_info_list.append(" {:15}: {}".format(category_index[i + 1], stats[1])) print_voc = "\n".join(voc_map_info_list) print(print_voc) # 将验证结果保存至txt文件中 with open("record_mAP.txt", "w") as f: record_lines = [ "COCO results:", print_coco, "", "mAP(IoU=0.5) for each category:", print_voc ] f.write("\n".join(record_lines))
def evaluate(model, data_loader, coco=None, device=None): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) cpu_device = torch.device("cpu") model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = "Test: " if coco is None: coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(model) coco_evaluator = CocoEvaluator(coco, iou_types) for imgs, targets, paths, _, img_index in metric_logger.log_every(data_loader, 100, header): imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 # targets = targets.to(device) # 当使用CPU时,跳过GPU相关指令 if device != torch.device("cpu"): torch.cuda.synchronize(device) model_time = time.time() pred = model(imgs)[0] # only get inference result pred = non_max_suppression(pred, conf_thres=0.001, iou_thres=0.6, multi_label=False) outputs = [] for index, p in enumerate(pred): if p is None: p = torch.empty((0, 6), device=cpu_device) boxes = torch.empty((0, 4), device=cpu_device) else: # xmin, ymin, xmax, ymax boxes = p[:, :4] # 注意这里传入的boxes格式必须是xmin, ymin, xmax, ymax,且为绝对坐标 info = {"boxes": boxes.to(cpu_device), "labels": p[:, 5].to(device=cpu_device, dtype=torch.int64), "scores": p[:, 4].to(cpu_device)} outputs.append(info) model_time = time.time() - model_time res = {img_id: output for img_id, output in zip(img_index, outputs)} evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() torch.set_num_threads(n_threads) result_info = coco_evaluator.coco_eval[iou_types[0]].stats return result_info
def evaluate(self, data_loader, coco=None, device=None): n_threads = torch.get_num_threads() # FIXME remove this and make paste_masks_in_image run on the GPU torch.set_num_threads(1) if not device: device = torch.device("cpu") self.model.eval() metric_logger = utils.MetricLogger(delimiter=" ") header = "Test: " if coco is None: coco = get_coco_api_from_dataset(data_loader.dataset) iou_types = _get_iou_types(self.model) coco_evaluator = CocoEvaluator(coco, iou_types) log_every = metric_logger.log_every(data_loader, 100, header) for images, targets, paths, shapes, img_index in log_every: images = images.to(device).float( ) / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 # 当使用CPU时,跳过GPU相关指令 if device != torch.device("cpu"): torch.cuda.synchronize(device) model_time = time.time() pred = self.model(images)[0] # only get inference result pred = non_max_suppression(pred, conf_thres=0.001, iou_thres=0.6, multi_label=False) outputs = [] for index, pred_i in enumerate(pred): if pred_i is None: pred_i = torch.empty((0, 6), device=device) boxes = torch.empty((0, 4), device=device) else: boxes = pred_i[:, :4] # l, t, r, b # shapes: (h0, w0), ((h / h0, w / w0), pad) # 将boxes信息还原回原图尺度,这样计算的mAP才是准确的 boxes = scale_coordinates(boxes, images[index].shape[1:], shapes[index]).round() image = images[index] self.img_show(image, boxes) # 注意这里传入的boxes格式必须是 l_abs, t_abs, r_abs, b_abs,且为绝对坐标 info = { "boxes": boxes.to(device), "labels": pred_i[:, 5].to(device=device, dtype=torch.int64), "scores": pred_i[:, 4].to(device) } outputs.append(info) model_time = time.time() - model_time res = { img_id: output for img_id, output in zip(img_index, outputs) } evaluator_time = time.time() coco_evaluator.update(res) evaluator_time = time.time() - evaluator_time metric_logger.update(model_time=model_time, evaluator_time=evaluator_time) # gather the stats from all processes metric_logger.synchronize_between_processes() print("Averaged stats:", metric_logger) coco_evaluator.synchronize_between_processes() # accumulate predictions from all images coco_evaluator.accumulate() coco_evaluator.summarize() torch.set_num_threads(n_threads) result_info = coco_evaluator.coco_eval[ iou_types[0]].stats.tolist() # numpy to list return result_info