def val_part(my_model,images_val,targets_val,this_dataset): my_model.eval() images_val_2 = list(images_val.to(device) for images_val in images_val) targets_val_2 = [{k: v.to(device) for k, v in t.items()} for t in targets_val] targets_val_3 = [{k: v.tolist() for k, v in t.items()} for t in targets_val] # targets_val_3 = [{k: v.tolist() for k, v in t['image_id']} for t in targets_val_3] for i in range(len(targets_val_3)): # print(train_dataset.img_path_list[targets_val_3[i]['image_id'][0]]['image_path']) # print(targets_val_3[i]['image_id']) # targets_val_3[i]['image_id'] = train_dataset.img_path_list[targets_val_3[i]['image_id'][0]]['image_path'] targets_val_3[i].update({'image_id': [this_dataset.img_path_list[targets_val_3[i]['image_id'][0]]['image_path']]}) # print(targets_val_3[i]['image_id']) val_dict = my_model(images_val_2, targets_val_2) # 1、把loss_dict 转化为predict_all()return的结果的格式 √ # 2、调用eval_one_batch √ # 3、打印评估结果和save best map preds = [] for i in range(len(val_dict)): for j in range(len(val_dict[i]['scores'])): preds.append([ targets_val_3[i]['image_id'][0], # x_train[0]['image_path'], val_dict[i]['scores'][j], val_dict[i]['boxes'][j][0].cpu().detach().item(), val_dict[i]['boxes'][j][1].cpu().detach().item(), val_dict[i]['boxes'][j][2].cpu().detach().item(), val_dict[i]['boxes'][j][3].cpu().detach().item(), ]) # 这里pred['labels'][j]-1 用于与标签对应 0-没有佩戴,1-有佩戴 # eval_one_batch(eval_labels) # 开始计算最后得分 sum_ap = 0 all_labels = [i for i in range(2)] # 所有目标类别 for label in all_labels: # 逐个类别计算ap if len(val_dict) != 0: # 当包含预测框的时候,进行计算ap值 rec, prec, ap = voc_eval(targets_val_3, preds, label) else: ap = 0 sum_ap += ap map = sum_ap / len(all_labels) # result = dict() # result['score'] = round(map * 100, 2) # result['label'] = "The Score is MAP." # result['info'] = "" # print(json.dumps(result)) print('mAP is %.2f' % map) return map
def do_python_eval(use_07=True): devkit_path = VOCroot + 'VOC2007' cachedir = os.path.join(devkit_path, 'annotations_cache') aps = [] # The PASCAL VOC metric changed in 2010 use_07_metric = use_07 if not os.path.isdir(output_dir): os.mkdir(output_dir) for i, cls in enumerate(labelmap): filename = 'tmp/det_test_%s.txt' % (cls) rec, prec, ap = voc_eval( filename, annopath, imgsetpath.format('test'), cls, cachedir, ovthresh=0.5, use_07_metric=use_07_metric) aps += [ap] # print('AP for {} = {:.4f}'.format(cls, ap)) # with open(os.path.join(output_dir, cls + '_pr.pkl'), 'wb') as f: # pickle.dump({'rec': rec, 'prec': prec, 'ap': ap}, f) return np.mean(aps)
def valmodel(model, path, dboxes, trans, encoder): od = torch.load(path) #print(od.keys()) model.load_state_dict(od["model"]) model.eval() model.cuda() lm = od["label_map"] #print(lm) img_folder = "../../VOCdevkit/VOC2007/JPEGImages" ann_folder = "../../VOCdevkit/VOC2007/Annotations" tgt_folder = "../../VOCdevkit/VOC2007/ImageSets/Main/test.txt" vd = VOCDetection(img_folder, ann_folder, tgt_folder, label_map=lm, \ transform = trans) #print(vd.label_map) #import sys; sys.exit() if not os.path.exists("pr_data"): os.mkdir("pr_data") else: shutil.rmtree("pr_data") os.mkdir("pr_data") img_info = [[] for _ in range(21)] end = time.time() for idx, fname in enumerate(vd.images): print("Parse image: {}/{}".format(idx + 1, len(vd)), end="\r") img, (h, w), bbox, label = vd[idx] with torch.no_grad(): ploc, plabel = model(img.unsqueeze(0).cuda()) try: result = encoder.decode_batch(ploc, plabel, 0.50, 200)[0] except: #raise print("No object detected in idx: {}".format(idx), end="\r") continue loc, label, prob = [r.cpu().numpy() for r in result] for loc_, label_, prob_ in zip(loc, label, prob): img_info[label_].append((fname[0].split(".")[0], prob_, loc_[0]*w, loc_[1]*h, \ loc_[2]*w, loc_[3]*h)) print("") print("Test: total time elapsed: {:.3f}".format(time.time() - end)) for i in range(1, 21): fn = "pr_data/pred_" + vd.label_map[i] + ".txt" with open(fn, "w") as fout: for rec in img_info[i]: fout.write( "{} {:.4f} {:.1f} {:.1f} {:.1f} {:.1f}\n".format(*rec)) from eval import voc_eval import glob s = 0 files = glob.glob("./pr_data/pred_*.txt") files.sort() for f in files: name = (f.split("_")[-1]).split(".")[0] r = voc_eval(f, annopath=os.path.join(ann_folder, "%s.xml"), imagesetfile=vd.file_filter, classname=name, cachedir="./cache", ovthresh=0.45) s += r[-1] s /= 20 print('mAP {:.3f}'.format(s)) return s
dets = all_boxes[0+1][0] if dets != []: # the VOCdevkit expects 1-based indices for k in range(dets.shape[0]): f.write('{:s} {:.3f} {:.1f} {:.1f} {:.1f} {:.1f}\n'. format(index[1], dets[k, -1], dets[k, 0] + 1, dets[k, 1] + 1, dets[k, 2] + 1, dets[k, 3] + 1)) anno_path = 'vis/Annotations/%s.xml' image_set_file = 'vis/Annotations/images' classname = 'dog' cachedir = 'vis/Annotations/' _, _, ap = voc_eval(det_text, anno_path, image_set_file.format('test'), classname, cachedir) if iteration % 10 == 0: print('loss' + str(ap)) ap.backward() optimizer.step() ''' # backprop optimizer.zero_grad()
def test( cfg, weights, batch_size=16, img_size=416, iou_thres=0.5, conf_thres=0.001, nms_thres=0.5, model=None ): if model is None: device = torch_utils.select_device() # Initialize model model = Darknet(cfg, img_size).to(device) # Load weights if weights.endswith('.pt'): # pytorch format model.load_state_dict(torch.load(weights, map_location=device)['model'], strict=False) else: # darknet format _ = load_darknet_weights(model, weights) else: device = next(model.parameters()).device # get model device # Get dataloader vocset = VOCDetection(root=os.path.join('~', 'data', 'VOCdevkit'), splits=((2007, 'test'),), img_size=img_size, mode='test') dataloader = torch.utils.data.DataLoader(vocset, batch_size=batch_size, num_workers=8, collate_fn=vocset.collate_fn) nC = vocset.num_class #num class classes = vocset.classes det_results_path = os.path.join('eval', 'results', 'VOC2007', 'Main') if os.path.exists(det_results_path): shutil.rmtree(det_results_path) os.makedirs(det_results_path) model.eval() seen = 0 pbar = tqdm(total=len(dataloader) * batch_size, desc='Computing mAP') for batch_i, (imgs, targets, shapes, img_paths) in enumerate(dataloader): output, _ = model(imgs.to(device)) # nms output = nms(output, conf_thres, nms_thres, method='nms') for si, detections in enumerate(output): seen += 1 if len(detections) == 0: continue # Rescale boxes from 416 to true image size scale_coords(img_size, detections[:, :4], shapes[si]).round() image_ind = os.path.split(img_paths[si])[-1][:-4] for bbox in detections: coor = bbox[:4] score = bbox[4] class_ind = int(bbox[5]) class_name = classes[class_ind] score = score = '%.4f' % score xmin, ymin, xmax, ymax = map(str, coor) bbox_mess = ' '.join([image_ind, score, xmin, ymin, xmax, ymax]) + '\n' with open(os.path.join(det_results_path, 'comp3_det_test_' + class_name + '.txt'), 'a') as f: f.write(bbox_mess) pbar.update(batch_size) pbar.close() filename = os.path.join('eval', 'results', 'VOC2007', 'Main', 'comp3_det_test_{:s}.txt') cachedir = os.path.join('eval', 'cache') annopath = os.path.join(vocset._root, 'VOC2007', 'Annotations', '{:s}.xml') imagesetfile = os.path.join(vocset._root, 'VOC2007', 'ImageSets', 'Main', 'test.txt') APs = {} for i, cls in enumerate(classes): rec, prec, ap = voc_eval(filename, annopath, imagesetfile, cls, cachedir, iou_thres, False) APs[cls] = ap # if os.path.exists(cachedir): # shutil.rmtree(cachedir) mAP = np.mean([APs[cls] for cls in APs]) return APs, mAP
val_dict[i]['boxes'][j][3].cpu().detach().item(), ]) # 这里pred['labels'][j]-1 用于与标签对应 0-没有佩戴,1-有佩戴 # eval_one_batch(eval_labels) # 开始计算最后得分 sum_ap = 0 all_labels = [i for i in range(2)] # 所有目标类别 for label in all_labels: # 逐个类别计算ap # prediction1 = [] # 在计算 ap 的时候,需要把prediction按照最后预测的类别进行筛选 # for pred in eval_labels: # if pred[3] == label: # prediction1.append([pred[0], pred[1], pred[2][0], pred[2][1], pred[2][2], pred[2][3]]) if len(val_dict) != 0: # 当包含预测框的时候,进行计算ap值 rec, prec, ap = voc_eval(targets_val_3, preds, label) else: ap = 0 sum_ap += ap map = sum_ap / len(all_labels) result = dict() result['score'] = round(map * 100, 2) result['label'] = "The Score is MAP." result['info'] = "" print(json.dumps(result)) # 等等 # losses = sum(loss for loss in loss_dict.values()) # # optimizer.zero_grad()