def val(valloader, yolo, save_img=True): """Validation wrapper @Args valloader: (Dataloader) validation data loader yolo: (nn.Module) YOLOv3 model save_img: (bool) whether to save images during validation """ mAPs = [] tbar = tqdm(valloader, ncols=80) for batch_idx, (names, inputs, targets) in enumerate(tbar): inputs = inputs.cuda() detections = yolo(inputs) mAP_batch = mAP(detections, targets, args.reso) mAPs += mAP_batch tbar.set_description("mAP=%.2f" % (np.mean(mAPs) * 100)) if save_img == True and batch_idx % 4 == 0: img_path = opj(config.datasets[args.dataset]['val_imgs'], names[0]) img_name = img_path.split('/')[-1] try: detection = detections[detections[:, 0] == 0] except Exception: img = Image.open(img_path) else: img = draw_detection(img_path, detection, yolo.reso, type='pred') img.save(opj(config.evaluate['result_dir'], img_name)) return mAPs
def _calc_map(self, x, y, pred): self.anchor_boxes = self.anchor_boxes.to(self.device) exist_mask = torch.round(torch.sigmoid(pred[..., 4:5])) cell_idx = torch.arange(13, device=self.device) bx = exist_mask * torch.sigmoid( pred[..., 0:1]) + exist_mask * cell_idx.view([1, 1, -1, 1, 1]) by = exist_mask * torch.sigmoid( pred[..., 1:2]) + exist_mask * cell_idx.view([1, -1, 1, 1, 1]) bw = (exist_mask * self.anchor_boxes[:, 2].view([1, 1, 1, -1, 1]) * exist_mask * torch.exp(pred[..., 2:3])) bh = (exist_mask * self.anchor_boxes[:, 3].view([1, 1, 1, -1, 1]) * exist_mask * torch.exp(pred[..., 3:4])) pred[..., :4] = torch.cat([bx, by, bw, bh], dim=-1) pred_boxes, target_boxes = get_bboxes( x=x, y=y, predictions=pred, iou_threshold=0.45, threshold=0.005, S=self.S, B=self.B, device=self.device, ) mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5) return mean_avg_prec
def _calc_map(self, y, pred): pred_boxes = [] target_boxes = [] small_preprocessed_pred = self._preprocess(pred[0], self.anchor_boxes[6:9] / (416 / 13), S=13) medium_preprocessed_pred = self._preprocess(pred[1], self.anchor_boxes[3:6] / (416 / 26), S=26) large_preprocessed_pred = self._preprocess(pred[2], self.anchor_boxes[:3] / (416 / 52), S=52) pred_boxes, target_boxes = get_bboxes( y=y, predictions=( small_preprocessed_pred, medium_preprocessed_pred, large_preprocessed_pred, ), iou_threshold=0.5, threshold=0.5, S=[13, 26, 52], B=3, device=self.device, ) mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5) return mean_avg_prec
def main(): model = Yolov3(num_classes=config.NUM_CLASSES).to(config.DEVICE) optimizer = optim.Adam(model.parameters(), lr=config.LEARNING_RATE, weight_decay=config.WEIGHT_DECAY) loss_fn = YoloLoss() scaler = torch.cuda.amp.GradScaler() train_loader, test_loader, train_eval_loader = get_loaders( train_csv_path=config.DATASET + "/train.csv", test_csv_path=config.DATASET + "/test.csv") if config.LOAD_MODEL: load_checkpoint(config.CHECKPOINT_FILE, model, optimizer, config.LEARNING_RATE) scaled_anchors = (torch.tensor(config.ANCHORS) * torch.tensor( config.S).unsqueeze(1).unsqueeze(1).repeat(1, 3, 2)).to(config.DEVICE) for epoch in range(config.NUM_EPOCHS): #plot_couple_examples(model, test_loader, 0.6, 0.5, scaled_anchors) train_fn(train_loader, model, optimizer, loss_fn, scaler, scaled_anchors) if config.SAVE_MODEL: save_checkpoint(model, optimizer, filename=f"checkpoint.pth.tar") #print(f"Currently epoch {epoch}") #print("On Train Eval loader:") #check_class_accuracy(model, train_eval_loader, threshold=config.CONF_THRESHOLD) #print("On Train loader:") #check_class_accuracy(model, train_loader, threshold=config.CONF_THRESHOLD) if epoch % 10 == 0 and epoch > 0: print("On Test loader:") check_class_accuracy(model, test_loader, threshold=config.CONF_THRESHOLD) pred_boxes, true_boxes = get_evaluation_bboxes( test_loader, model, iou_threshold=config.NMS_IOU_THRESH, anchors=config.ANCHORS, threshold=config.CONF_THRESHOLD, ) mapval = mAP( pred_boxes, true_boxes, iou_threshold=config.MAP_IOU_THRESH, box_format="midpoint", num_classes=config.NUM_CLASSES, ) print(f"MAP: {mapval.item()}")
def _calc_map(self, x, y, pred): pred_boxes, target_boxes = get_bboxes( x=x, y=y, predictions=pred, iou_threshold=0.5, threshold=0.4, S=self.S, device=self.device, ) mean_avg_prec = mAP(pred_boxes, target_boxes, iou_threshold=0.5) return mean_avg_prec
def val(valloader, yolo): """Validation wrapper @Args valloader: (Dataloader) validation data loader yolo: (nn.Module) YOLOv3 model """ yolo.eval() mAPs = [] tbar = tqdm(valloader, ncols=80) tbar.set_description('validation') for batch_idx, (names, inputs, targets) in enumerate(tbar): inputs = inputs.cuda() start_time = time.time() detections = yolo(inputs) forward_time = time.time() loss, cache = yolo.loss(targets) mAP_batch = mAP(detections, targets, args.reso) mAPs += mAP_batch tbar.set_description("mAP=%.2f" % (np.mean(mAPs) * 100)) return loss['total'], np.mean(mAPs)
def test(self): mAP_predicts = [] mAP_ground_truths = [] self.trainer.use_pretrain(opt.saved_model_path) torch_normailze = transforms.Compose( [transforms.ToTensor(), transforms.Normalize(opt.mean, opt.std)]) with torch.no_grad(): for i, data in tqdm(enumerate(self.VocTestDataLoader)): ori_img, resized_img, true_boxes, true_labels = data img = torch_normailze(resized_img[0].numpy()) img = img.unsqueeze(dim=0).to(opt.device) # boxes shape: [1, (13*13+26*26+52*52)*3, 4] # confs shape: [1, (13*13+26*26+52*52)*3, 1] # probs shape: [1, (13*13+26*26+52*52)*3, 80] boxes, confs, probs = self.trainer.predict(img) boxes = boxes.squeeze(0) # [13*13*5, 4] confs = confs.squeeze(0) # [13*13*5, 1] probs = probs.squeeze(0) # [13*13*5, 80] scores = confs * probs # [13*13*5, 80] # NMS box_output, score_output, label_output = each_class_nms( boxes, scores, opt.score_threshold, opt.iou_threshold, opt.max_boxes_num, self.img_size) _, ratio, dh, dw = letter_resize(ori_img.numpy()[0], [416, 416]) if len(box_output) != 0: plot_dict = { 'img': ori_img.cpu().numpy()[0], 'ratio': ratio, 'dh': dh, 'dw': dw, 'pred_box': box_output.cpu().numpy(), 'pred_score': score_output.cpu().numpy(), 'pred_label': label_output.cpu().numpy(), 'gt_box': None, 'gt_label': None, 'img_name': f'{i}.jpg', 'save_path': '/home/dk/Desktop/model_best/' } plot_one(plot_dict) if box_output.size(0) != 0: # [X, 5] mAP_predict_in = torch.cat([ box_output, score_output.reshape(score_output.numel(), 1) ], dim=-1) else: mAP_predict_in = torch.zeros(1, 5) # shape: [N, 4]; format: [xmin, ymin, xmax, ymax]; dtype: np.ndarray mAP_ground_truth_in = true_boxes.numpy().reshape(-1, 4)[:, ::-1] mAP_predict_in = mAP_predict_in.detach().cpu().numpy() mAP_predict_in[:, [1, 3]] = mAP_predict_in[:, [1, 3]] - dh mAP_predict_in[:, [0, 2]] = mAP_predict_in[:, [0, 2]] - dw mAP_predict_in = mAP_predict_in / ratio mAP_predicts.append(mAP_predict_in) mAP_ground_truths.append(mAP_ground_truth_in) MAP = mAP(mAP_predicts, mAP_ground_truths, 0.5) self.logger.info('AP: %.2f %%' % (MAP.elevenPointAP * 100))
def evaluate(config): # checkpoint_paths = {'58': r'\\192.168.25.58\Team-CV\checkpoints\torch_yolov3'} checkpoint_paths = {'39': r'C:\Users\Administrator\Desktop\checkpoint/'} # checkpoint_paths = {'68': r'E:\github\YOLOv3_PyTorch\evaluate\weights'} post_weights = {k: 0 for k in checkpoint_paths.keys()} weight_index = {k: 0 for k in checkpoint_paths.keys()} time_inter = 10 dataloader = torch.utils.data.DataLoader(COCODataset( config["train_path"], (config["img_w"], config["img_h"]), is_training=False, is_scene=True), batch_size=config["batch_size"], shuffle=False, num_workers=0, pin_memory=False, drop_last=True) # DataLoader net, yolo_losses = build_yolov3(config) while 1: for key, checkpoint_path in checkpoint_paths.items(): os.makedirs(checkpoint_path + '/result', exist_ok=True) checkpoint_weights = os.listdir(checkpoint_path) checkpoint_result = os.listdir(checkpoint_path + '/result') checkpoint_result = [ cweight.split("_")[2][:-4] for cweight in checkpoint_result if cweight.endswith('ini') ] checkpoint_weights = [ cweight for cweight in checkpoint_weights if cweight.endswith('weights') ] if weight_index[key] >= len(checkpoint_weights): print('weight_index[key]', weight_index[key], len(checkpoint_weights)) time.sleep(time_inter) continue if post_weights[key] == checkpoint_weights[weight_index[key]]: print('post_weights[key]', post_weights[key]) time.sleep(time_inter) continue post_weights[key] = checkpoint_weights[weight_index[key]] if post_weights[key].endswith("_.weights"): #检查权重是否保存完 print("post_weights[key].split('_')", post_weights[key].split('_')) time.sleep(time_inter) continue if checkpoint_weights[weight_index[key]].split( "_")[1][:-8] in checkpoint_result: print('weight_index[key] +', weight_index[key]) weight_index[key] += 1 time.sleep(time_inter // 20) continue weight_index[key] += 1 try: if config["pretrain_snapshot"]: # Restore pretrain model state_dict = torch.load(config["pretrain_snapshot"]) logging.info("loading model from %s" % config["pretrain_snapshot"]) net.load_state_dict(state_dict) else: state_dict = torch.load( os.path.join(checkpoint_path, post_weights[key])) logging.info( "loading model from %s" % os.path.join(checkpoint_path, post_weights[key])) net.load_state_dict(state_dict) except Exception as E: print(E) time.sleep(time_inter) continue logging.info("Start eval.") # Start the eval loop n_gt = 0 correct = 0 imagepath_list = [] for step, samples in enumerate(dataloader): images, labels, image_paths = samples["image"], samples[ "label"], samples["img_path"] labels = labels.cuda() with torch.no_grad(): output = net(images) time1 = datetime.datetime.now() map = mAP(output, labels, 352) # output = non_max_suppression(output, 1, conf_thres=0.5) # output = soft_nms_n(output, score_threshold=0.5) if ((datetime.datetime.now() - time1).seconds > 10): logging.info('Batch %d time is too long ' % (step)) n_gt = 1 break print( "map time", (datetime.datetime.now() - time1).seconds * 1000 + (datetime.datetime.now() - time1).microseconds // 1000) # calculate # for sample_i in range(labels.size(0)): # # Get labels for sample where width is not zero (dummies) # target_sample = labels[sample_i, labels[sample_i, :, 3] != 0] # for obj_cls, tx, ty, tw, th in target_sample: # # Get rescaled gt coordinates # tx1, tx2 = config["img_w"] * (tx - tw / 2), config["img_w"] * (tx + tw / 2) # ty1, ty2 = config["img_h"] * (ty - th / 2), config["img_h"] * (ty + th / 2) # n_gt += 1 # box_gt = torch.cat([coord.unsqueeze(0) for coord in [tx1, ty1, tx2, ty2]]).view(1, -1) # sample_pred = output[sample_i] # last_current=correct # if sample_pred is not None: # # Iterate through predictions where the class predicted is same as gt # for x1, y1, x2, y2, conf, obj_conf, obj_pred in sample_pred[sample_pred[:, 6] == obj_cls.cuda()]: # box_pred = torch.cat([coord.unsqueeze(0) for coord in [x1, y1, x2, y2]]).view(1, -1) # iou = bbox_iou(box_pred, box_gt) # if iou >= config["iou_thres"]: # correct += 1 # break # if last_current==correct and image_paths[sample_i] not in imagepath_list: # imagepath_list.append(image_paths[sample_i]) # print("get result time", time.time() - start) logging.info('Mean Average Precision: %.5f' % map) # if n_gt: # logging.info('Batch [%d/%d] err_count:%d mAP: %.5f' % (step, len(dataloader), len(imagepath_list),float(correct / n_gt))) # logging.info('Mean Average Precision: %.5f' % float(correct / n_gt)) logging.info('Mean Average Precision: %.5f' % map) # Mean_Average = float(correct / n_gt) # ini_name = os.path.join(checkpoint_path+'/result/', '%.4f_%s.ini'%((float(post_weights[key].split("_")[0])+float(correct / n_gt))/2,post_weights[key].replace(".weights",""))) # write_ini(ini_name, Mean_Average, imagepath_list) break