def train(model, optimizer, epoch, dataloader): num_iters = len(dataloader) # iters per epoch max_iter = parser.max_epoch * num_iters model.train() model.to(device) for i, samples in enumerate(dataloader): images, labels, _ = samples cur_iter = (epoch - 1) * num_iters + i + 1 adjust_learning_rate(optimizer=optimizer, cur_iter=cur_iter, ini_lr=parser.learning_rate, step_size=parser.step_size, max_iter=max_iter, mode='poly') images = images.to(device) labels = labels.long().to(device) output = model(images) loss = compute_loss(output, labels, name='ce', ignore_index=parser.ignore_label) optimizer.zero_grad() loss.backward() if parser.clip_gradient: torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=parser.max_norm) optimizer.step() print("epoch number: {}/{}, iteration: {}/{}, loss value: {}" .format(epoch, parser.max_epoch, i + 1, num_iters, loss.item())) if parser.tensorboard: saved_scalar = { 'loss': loss.item() } add_summary(saved_scalar, cur_iter, parser.log_dir)
def train_vgg_cifar_0001(config_data, tab_level=0, verbose=250): print('train_vgg_cifar_0001()') from pipeline.data.load_data_cifar import load_cifar_0001 from utils.loss import compute_loss trainloader = load_cifar_0001(config_data, batch_size=None, shuffle=True, verbose=verbose) state_tracker = setup_state_tracker(config_data, verbose=verbose, tab_level=tab_level) net = new_or_load_model(state_tracker, verbose=verbose, tab_level=tab_level) criterion, optimizer = setup_training_tools_0001(net, config_data, verbose=verbose, tab_level=tab_level+1) pm.printv('Start training...'%(), tab_level=tab_level) total_iter_in_this_run = 0 l_epoch = 1 + state_tracker.get_latest_saved_epoch() for n_epoch in range(l_epoch, l_epoch + config_data['general']['epoch']): state_tracker.setup_for_this_epoch(n_epoch) for i, data in enumerate(trainloader,0): optimizer.zero_grad() x, y0 = data if DEBUG_train_loop_0001(DEBUG_train_vgg_cifar_LOOP_SIGNAL, net, x, y0, tab_level=tab_level, verbose=verbose): return y = net(x.to(this_device)) loss = compute_loss(criterion, y.squeeze(3).squeeze(2).cpu(), y0) loss.backward() optimizer.step() # FOR LOGGING total_iter_in_this_run += 1 state_tracker.store_loss_by_epoch(loss.item(), n_epoch) stop_iter, stop_epoch = DEBUG_train_loop_0002(DEBUG_train_vgg_cifar_LOOP2_SIGNAL, i, n_epoch - l_epoch , tab_level=tab_level+1, verbose=verbose) if stop_iter: break state_tracker.update_epoch() if stop_epoch: break state_tracker.update_state(total_iter_in_this_run, config_data) save_model_by_n_th_run(net, state_tracker,tab_level=tab_level, verbose=verbose) state_tracker.display_end_state(tab_level=tab_level+1, verbose=verbose)
def test_epoch(net, dataloader, logger, config): net.eval() total_mask_loss = 0.0 dataprocess = tqdm(dataloader) confusion_matrix = np.zeros((config.NUM_CLASS, config.NUM_CLASS)) logger.info("Testing : ") with torch.no_grad(): for batch_item in dataprocess: image, mask = batch_item['image'], batch_item['mask'] if torch.cuda.is_available(): image, mask = image.cuda(), mask.cuda() out = net(image) weights = torch.tensor( [0.75, 1.25, 1.25, 1.25, 1.25, 1.25, 1.25, 1.25]).cuda() mask_loss = compute_loss(out, mask, weights=weights, device_id=0, num_class=config.NUM_CLASS) total_mask_loss += mask_loss.detach().item() confusion_matrix += get_confusion_matrix(mask, out, mask.size(), config.NUM_CLASS) dataprocess.set_description_str('Test') dataprocess.set_postfix_str('mask loss is {:.4f}'.format( mask_loss.item())) logger.info("\taverage loss is {:.4f}".format(total_mask_loss / len(dataloader))) pos = confusion_matrix.sum(0) res = confusion_matrix.sum(1) tp = np.diag(confusion_matrix) IoU_array = (tp / np.maximum(1.0, pos + res - tp)) for i in range(8): print('{} IoU is : {}'.format(i, IoU_array[i])) logger.info('\t{} Iou is : {}'.format(i, IoU_array[i])) miou = IoU_array[1:].mean() logger.info('Test miou is : {:.4f}'.format(miou)) print('Test: miou is {}'.format(miou))
def test(cfg, data, weights=None, batch_size=16, img_size=608, iou_thres=0.5, conf_thres=0.001, nms_thres=0.5, save_json=True, hyp=None, model=None, single_cls=False): """test the metrics of the trained model :param str cfg: model cfg file :param str data: data dict :param str weights: weights path :param int batch_size: batch size :param int img_size: image size :param float iou_thres: iou threshold :param float conf_thres: confidence threshold :param float nms_thres: nms threshold :param bool save_json: Whether to save the model :param str hyp: hyperparameter :param str model: yolov4 model :param bool single_cls: only one class :return: results """ if model is None: device = select_device(opt.device) verbose = False # Initialize model model = Model(cfg, img_size).to(device) # Load weights if weights.endswith('.pt'): checkpoint = torch.load(weights, map_location=device) state_dict = intersect_dicts(checkpoint['model'], model.state_dict()) model.load_state_dict(state_dict, strict=False) elif len(weights) > 0: load_darknet_weights(model, weights) print(f'Loaded weights from {weights}!') if torch.cuda.device_count() > 1: model = nn.DataParallel(model) else: device = next(model.parameters()).device verbose = False test_path = data['valid'] num_classes, names = (1, ['item']) if single_cls else (int( data['num_classes']), data['names']) # Dataloader dataset = LoadImagesAndLabels(test_path, img_size, batch_size, hyp=hyp) dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=8, pin_memory=True, collate_fn=dataset.collate_fn) seen = 0 model.eval() coco91class = coco80_to_coco91_class() output_format = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'Pre', 'Rec', 'mAP', 'F1') precision, recall, f_1, mean_pre, mean_rec, mean_ap, mf1 = 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3) json_dict, stats, aver_pre, ap_class = [], [], [], [] for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=output_format)): targets = targets.to(device) imgs = imgs.to(device) / 255.0 _, _, height, width = imgs.shape # batch size, channels, height, width # Plot images with bounding boxes if batch_i == 0 and not os.path.exists('test_batch0.jpg'): plot_images(imgs=imgs, targets=targets, paths=paths, fname='test_batch0.jpg') with torch.no_grad(): inference_output, train_output = model(imgs) if hasattr(model, 'hyp'): # if model has loss hyperparameters loss += compute_loss(train_output, targets, model)[1][:3].cpu() # GIoU, obj, cls output = non_max_suppression(inference_output, conf_thres=conf_thres, nms_thres=nms_thres) # Statistics per image for i, pred in enumerate(output): labels = targets[targets[:, 0] == i, 1:] num_labels = len(labels) target_class = labels[:, 0].tolist() if num_labels else [] seen += 1 if pred is None: if num_labels: stats.append( ([], torch.Tensor(), torch.Tensor(), target_class)) continue # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(Path(paths[i]).stem.split('_')[-1]) box = pred[:, :4].clone() # xyxy scale_coords(imgs[i].shape[1:], box, shapes[i][0]) # to original shape box = xyxy2xywh(box) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for det_i, det in enumerate(pred): json_dict.append({ 'image_id': image_id, 'category_id': coco91class[int(det[6])], 'bbox': [float(format(x, '.%gf' % 3)) for x in box[det_i]], 'score': float(format(det[4], '.%gf' % 5)) }) # Clip boxes to image bounds clip_coords(pred, (height, width)) # Assign all predictions as incorrect correct = [0] * len(pred) if num_labels: detected = [] tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) tbox[:, [0, 2]] *= width tbox[:, [1, 3]] *= height # Search for correct predictions for j, (*pbox, _, _, pcls) in enumerate(pred): # Break if all targets already located in image if len(detected) == num_labels: break # Continue if predicted class not among image classes if pcls.item() not in target_class: continue # Best iou, index between pred and targets mask = (pcls == tcls_tensor).nonzero( as_tuple=False).view(-1) iou, best_iou = bbox_iou(pbox, tbox[mask]).max(0) # If iou > threshold and class is correct mark as correct if iou > iou_thres and mask[ best_iou] not in detected: # and pcls == target_class[bi]: correct[j] = 1 detected.append(mask[best_iou]) # Append statistics (correct, conf, pcls, target_class) stats.append( (correct, pred[:, 4].cpu(), pred[:, 6].cpu(), target_class)) # Compute statistics stats = [np.concatenate(x, 0) for x in list(zip(*stats))] if len(stats): precision, recall, aver_pre, f_1, ap_class = ap_per_class(*stats) mean_pre, mean_rec, mean_ap, mf1 = precision.mean(), recall.mean( ), aver_pre.mean(), f_1.mean() num_targets = np.bincount( stats[3].astype(np.int64), minlength=num_classes) # number of targets per class else: num_targets = torch.zeros(1) # Print results print_format = '%20s' + '%10.3g' * 6 print(print_format % ('all', seen, num_targets.sum(), mean_pre, mean_rec, mean_ap, mf1)) # Print results per class if verbose and num_classes > 1 and stats: for i, class_ in enumerate(ap_class): print(print_format % (names[class_], seen, num_targets[class_], precision[i], recall[i], aver_pre[i], f_1[i])) # Save JSON if save_json and mean_ap and json_dict: try: img_ids = [ int(Path(x).stem.split('_')[-1]) for x in dataset.img_files ] with open('results.json', 'w') as file: json.dump(json_dict, file) # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb cocogt = COCO('data/coco/annotations/instances_val2017.json' ) # initialize COCO ground truth api cocodt = cocogt.loadRes('results.json') # initialize COCO pred api cocoeval = COCOeval(cocogt, cocodt, 'bbox') cocoeval.params.imgIds = img_ids # [:32] # only evaluate these images cocoeval.evaluate() cocoeval.accumulate() cocoeval.summarize() mean_ap = cocoeval.stats[1] # update mAP to pycocotools mAP except ImportError: print( 'WARNING: missing dependency pycocotools from requirements.txt. Can not compute official COCO mAP.' ) # Return results maps = np.zeros(num_classes) + mean_ap for i, class_ in enumerate(ap_class): maps[class_] = aver_pre[i] return (mean_pre, mean_rec, mean_ap, mf1, *(loss / len(dataloader)).tolist()), maps
def train(model, discriminator, optimizer, source_data_iter, target_data_iter, val_data, start_iter, last_mIoU): # record the validation result best_mIoU = last_mIoU best_iter = start_iter D1 = discriminator['D1'] D2 = discriminator['D2'] model.train() model.to(device) D1.train() D1.to(device) D2.train() D2.to(device) optimizer_G = optimizer['G'] optimizer_D1 = optimizer['D1'] optimizer_D2 = optimizer['D2'] for i in range(start_iter, parser.max_iter + 1): loss_seg1_value = 0 loss_seg2_value = 0 loss_D1_value = 0 loss_D2_value = 0 loss_adv1_target_value = 0 loss_adv2_target_value = 0 optimizer_G.zero_grad() optimizer_D1.zero_grad() optimizer_D2.zero_grad() adjust_learning_rate(optimizer=optimizer_G, cur_iter=i, ini_lr=parser.learning_rate, step_size=parser.step_size, max_iter=parser.max_iter, mode='poly') adjust_learning_rate(optimizer=optimizer_D1, cur_iter=i, ini_lr=LEARNING_RATE_D, step_size=parser.step_size, max_iter=parser.max_iter, mode='poly') adjust_learning_rate(optimizer=optimizer_D2, cur_iter=i, ini_lr=LEARNING_RATE_D, step_size=parser.step_size, max_iter=parser.max_iter, mode='poly') # train G # froze gradient in discriminator for params in D1.parameters(): params.requires_grad = False for params in D2.parameters(): params.requires_grad = False _, batch = source_data_iter.__next__() images, labels = batch images = images.to(device) labels = labels.long().to(device) # train with source data output1_source, output2_source = model(images) loss_seg1 = compute_loss(output1_source, labels, name='ce', ignore_index=parser.ignore_label) loss_seg2 = compute_loss(output2_source, labels, name='ce', ignore_index=parser.ignore_label) loss_seg = loss_seg2 + LAMBDA_SEG * loss_seg1 loss_seg.backward() loss_seg1_value += loss_seg1.item() loss_seg2_value += loss_seg2.item() # train with target data _, batch = target_data_iter.__next__() images, _, _ = batch images = images.to(device) output1_target, output2_target = model(images) output_map1 = D1(F.softmax(output1_target)) output_map2 = D2(F.softmax(output2_target)) label_map1 = torch.FloatTensor( output_map1.data.size()).fill_(SOURCE_LABEL).to(device) label_map2 = torch.FloatTensor( output_map2.data.size()).fill_(SOURCE_LABEL).to(device) loss_adv1_target = compute_loss(output_map1, label_map1, name='mse') loss_adv1_target_value += loss_adv1_target.item() loss_adv2_target = compute_loss(output_map2, label_map2, name='mse') loss_adv2_target_value += loss_adv2_target.item() loss_adv_target = LAMBDA_ADV_TARGET_1 * loss_adv1_target + LAMBDA_ADV_TARGET_2 * loss_adv2_target loss_adv_target.backward() # train D # bring back gradient for params in D1.parameters(): params.requires_grad = True for params in D2.parameters(): params.requires_grad = True # train with source output1_source = output1_source.detach() output2_source = output2_source.detach() output_map1 = D1(F.softmax(output1_source)) output_map2 = D2(F.softmax(output2_source)) label_map1 = torch.FloatTensor( output_map1.data.size()).fill_(SOURCE_LABEL).to(device) label_map2 = torch.FloatTensor( output_map2.data.size()).fill_(SOURCE_LABEL).to(device) loss_D1 = compute_loss(output_map1, label_map1, name='mse') loss_D1 = loss_D1 / 2 loss_D1.backward() loss_D1_value += loss_D1.item() loss_D2 = compute_loss(output_map2, label_map2, name='mse') loss_D2 = loss_D2 / 2 loss_D2.backward() loss_D2_value += loss_D2.item() # train with target output1_target = output1_target.detach() output2_target = output2_target.detach() output_map1 = D1(F.softmax(output1_target)) output_map2 = D2(F.softmax(output2_target)) label_map1 = torch.FloatTensor( output_map1.data.size()).fill_(TARGET_LABEL).to(device) label_map2 = torch.FloatTensor( output_map2.data.size()).fill_(TARGET_LABEL).to(device) loss_D1 = compute_loss(output_map1, label_map1, name='mse') loss_D1 = loss_D1 / 2 loss_D1.backward() loss_D1_value += loss_D1.item() loss_D2 = compute_loss(output_map2, label_map2, name='mse') loss_D2 = loss_D2 / 2 loss_D2.backward() loss_D2_value += loss_D2.item() # clip gradient if parser.clip_gradient: torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=parser.max_norm) torch.nn.utils.clip_grad_norm_(D1.parameters(), max_norm=parser.max_norm) torch.nn.utils.clip_grad_norm_(D2.parameters(), max_norm=parser.max_norm) optimizer_G.step() optimizer_D1.step() optimizer_D2.step() print( "iteration: {}/{}, loss_seg1_value: {}, loss_seg2_value: {}, loss_D1_value: {},loss_D2_value: {}, " "loss_adv1_target_value: {},loss_adv2_target_value: {}".format( i + 1, parser.max_iter, loss_seg1_value, loss_seg2_value, loss_D1_value, loss_D2_value, loss_adv1_target_value, loss_adv2_target_value)) if parser.tensorboard: saved_scalar = { 'loss_seg1': loss_seg1_value, 'loss_seg2': loss_seg2_value, 'loss_D1': loss_D1_value, 'loss_D2': loss_D2_value, 'loss_adv1_target': loss_adv1_target_value, 'loss_adv2_target': loss_adv2_target_value } add_summary(saved_scalar, i, parser.log_dir) if i % parser.save_iter == 0: cur_mIoU = validation(model, val_data) if cur_mIoU > best_mIoU: optimizer = { 'G': optimizer_G.state_dict(), 'D1': optimizer_D1.state_dict(), 'D2': optimizer_D2.state_dict() } best_iter = i best_mIoU = cur_mIoU state_dict = { 'iter': best_iter, 'model': model.state_dict(), 'optimizer': optimizer, 'best_mIoU': best_mIoU } prefix = "Cross_Domain_Segmentation_" + mode sava_checkpoint(state_dict, parser.ckpt_dir, prefix=prefix) return best_mIoU, best_iter
prev_mAP = 0.0 current_mAP = 0.0 for epoch in range(args.epochs): print("\n---- Training Model ----") model.train() # Set model to training mode for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc=f"Training Epoch {epoch}")): batches_done = len(dataloader) * epoch + batch_i imgs = imgs.to(device, non_blocking=True) targets = targets.to(device) yolo_B_old_class,yolo_A_old_class,yolo_B_new_class = model(imgs) loss, loss_components = compute_loss(yolo_B_new_class, targets, model) loss_distill = distillation_loss(yolo_A_old_class,yolo_B_old_class) loss += loss_distill loss.backward() ############### # Run optimizer ############### if batches_done % model.hyperparams['subdivisions'] == 0: # Adapt learning rate # Get learning rate defined in cfg lr = model.hyperparams['learning_rate'] if batches_done < model.hyperparams['burn_in']:
def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info( colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) print(f'Hyperparameters {hyp}') """ 训练日志包括:权重、tensorboard文件、超参数hyp、设置的训练参数opt(也就是epochs,batch_size等),result.txt result.txt包括: 占GPU内存、训练集的GIOU loss, objectness loss, classification loss, 总loss, targets的数量, 输入图片分辨率, 准确率TP/(TP+FP),召回率TP/P ; 测试集的mAP50, [email protected]:0.95, GIOU loss, objectness loss, classification loss. 还会保存batch<3的ground truth """ # 获取保存路径、总轮次、批次、总批次(涉及到分布式训练)、权重、进程序号(主要用于分布式训练) save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings # 保存hyp和opt with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict # torch_distributed_zero_first同步所有进程 # check_dataset检查数据集,如果没找到数据集则下载数据集(仅适用于项目中自带的yaml文件数据集) with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len( data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % ( len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') if pretrained: # 加载模型,从google云盘中自动下载模型 # 但通常会下载失败,建议提前下载下来放进weights目录 with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally # 加载检查点 ckpt = torch.load(weights, map_location=device) # load checkpoint if hyp.get('anchors'): ckpt['model'].yaml['anchors'] = round( hyp['anchors']) # force autoanchor """ 这里模型创建,可通过opt.cfg,也可通过ckpt['model'].yaml 这里的区别在于是否是resume,resume时会将opt.cfg设为空,则按照ckpt['model'].yaml创建模型 这也影响着下面是否除去anchor的key(也就是不加载anchor),如果resume则不加载anchor 主要是因为保存的模型会保存anchors,有时候用户自定义了anchor之后,再resume,则原来基于coco数据集的anchor就会覆盖自己设定的anchor, 参考https://github.com/ultralytics/yolov5/issues/459 所以下面设置了intersect_dicts,该函数就是忽略掉exclude """ model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [ ] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load # 显示加载预训练权重的的键值对和创建模型的键值对 # 如果设置了resume,则会少加载两个键值对(anchors,anchor_grid) logger.info( 'Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc).to(device) # create # Freeze """ 冻结模型层,设置冻结层名字即可 具体可以查看https://github.com/ultralytics/yolov5/issues/679 但作者不鼓励冻结层,因为他的实验当中显示冻结层不能获得更好的性能,参照:https://github.com/ultralytics/yolov5/pull/707 并且作者为了使得优化参数分组可以正常进行,在下面将所有参数的requires_grad设为了True 其实这里只是给一个freeze的示例 """ freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer """ nbs为模拟的batch_size; 就比如默认的话上面设置的opt.batch_size为16,这个nbs就为64, 也就是模型梯度累积了64/16=4(accumulate)次之后 再更新一次模型,变相的扩大了batch_size """ nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing # 根据accumulate设置权重衰减系数 hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") # 将模型分成三组(weight、bn, bias, 其他所有参数)优化 pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay # 选用优化器,并设置pg0组的优化方式 if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) # 设置weight、bn的优化方式 optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay # 设置biases的优化方式 optimizer.add_param_group({'params': pg2}) # add pg2 (biases) # 打印优化信息 logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # 设置cosine调度器,定义学习率衰减学习率衰减,这里为余弦退火方式进行衰减 # 就是根据以下公式lf,epoch和超参数hyp['lrf']进行衰减 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR # lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp['lrf']) + hyp['lrf'] # cosine lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # Logging if rank in [-1, 0] and wandb and wandb.run is None: opt.hyp = hyp # add hyperparameters wandb_run = wandb.init( config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=save_dir.stem, id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict # EMA # 在深度学习中,经常会使用EMA(指数移动平均)这个方法对模型的参数做滑动平均,以求提高测试指标并增加模型鲁棒,如果GPU进程数大于1,则不创建 # Exponential moving average ema = ModelEMA(model) if rank in [-1, 0] else None # Resume # best_fitness是以[0.0, 0.0, 0.1, 0.9]为系数并乘以[精确度, 召回率, [email protected], [email protected]:0.95]再求和所得 # 根据best_fitness来保存best.pt start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer # 加载优化器与best_fitness if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # EMA if ema and ckpt.get('ema'): ema.ema.load_state_dict(ckpt['ema'].float().state_dict()) ema.updates = ckpt['updates'] # Results # 加载训练结果result.txt if ckpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(ckpt['training_results']) # write results.txt # Epochs 加载训练的轮次 start_epoch = ckpt['epoch'] + 1 """ 如果resume,则备份权重 尽管目前resume能够近似100%成功的起作用了,参照:https://github.com/ultralytics/yolov5/pull/756 但为了防止resume时出现其他问题,把之前的权重覆盖了,所以这里进行备份,参照:https://github.com/ultralytics/yolov5/pull/765 """ if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % ( weights, epochs) """ 如果新设置epochs小于加载的epoch, 则视新设置的epochs为需要再训练的轮次数而不再是总的轮次数 """ if epochs < start_epoch: logger.info( '%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # 获取模型最大步长和模型输入图片分辨率 gs = int(model.stride.max()) # grid size (max stride) nl = model.model[ -1].nl # number of detection layers (used for scaling hyp['obj']) # 检查训练和测试图片分辨率确保能够整除总步长gs imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # DP mode # 分布式训练,参照:https://github.com/ultralytics/yolov5/issues/475 # DataParallel模式,仅支持单机多卡 # rank为进程编号, 这里应该设置为rank=-1则使用DataParallel模式 # rank=-1且gpu数量=1时,不会进行分布式 if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm # 使用跨卡同步BN if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # DDP mode # 如果rank不等于-1,则使用DistributedDataParallel模式 # local_rank为gpu编号,rank为进程,例如rank=3,local_rank=0 表示第 3 个进程内的第 1 块 GPU。 if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) """ 获取标签中最大的类别值,并于类别数作比较 如果小于类别数则表示有问题 """ mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % ( mlc, nc, opt.data, nc - 1) # Process 0 if rank in [-1, 0]: # 更新ema模型的updates参数,保持ema的平滑性 ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader( test_path, imgsz_test, total_batch_size, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5, prefix=colorstr('val: '))[0] if not opt.resume: # 将所有样本的标签拼接到一起shape为(total, 1),统计后做可视化 labels = np.concatenate(dataset.labels, 0) # 获得所有样本的类别 c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: # 根据上面的统计对所有样本的类别,中心点xy位置,长宽wh做可视化 plot_labels(labels, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c, 0) # Check anchors """ 计算默认锚点anchor与数据集标签框的长宽比值 标签的长h宽w与anchor的长h_a宽w_a的比值, 即h/h_a, w/w_a都要在(1/hyp['anchor_t'], hyp['anchor_t'])是可以接受的 如果标签框满足上面条件的数量小于总数的99%,则根据k-mean算法聚类新的锚点anchor """ if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # Model parameters # 根据自己数据集的类别数设置分类损失的系数 hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640)**2 * 3. / nl # scale to image size and layers model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) # 根据labels初始化图片采样权重 model.class_weights = labels_to_class_weights( dataset.labels, nc).to(device) * nc # attach class weights model.names = names """ 设置giou的值在objectness loss中做标签的系数, 使用代码如下 tobj[b, a, gj, gi] = (1.0 - model.gr) + model.gr * giou.detach().clamp(0).type(tobj.dtype) 这里model.gr=1,也就是说完全使用标签框与预测框的giou值来作为该预测框的objectness标签 """ # Start training t0 = time.time() # 获取热身训练的迭代次数 nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0 ) # P, R, [email protected], [email protected], val_loss(box, obj, cls) """ 设置学习率衰减所进行到的轮次, 目的是打断训练后,--resume接着训练也能正常的衔接之前的训练进行学习率衰减 """ scheduler.last_epoch = start_epoch - 1 # do not move # 通过torch1.6自带的api设置混合精度训练 scaler = amp.GradScaler(enabled=cuda) """ 打印训练和测试输入图片分辨率 加载图片时调用的cpu进程数 从哪个epoch开始训练 """ logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') # 加载图片权重(可选),定义进度条,设置偏差Burn-in,使用多尺度,前向传播,损失函数,反向传播,优化器,打印进度条,保存训练参数至tensorboard,计算mAP,保存结果到results.txt,保存模型(最好和最后) for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices """ 如果设置进行图片采样策略, 则根据前面初始化的图片采样权重model.class_weights以及maps配合每张图片包含的类别数 通过random.choices生成图片索引indices从而进行采样 """ if rank in [-1, 0]: cw = model.class_weights.cpu().numpy() * ( 1 - maps)**2 / nc # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights # 类平衡采样 dataset.indices = random.choices( range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP # 如果是DDP模式,则广播采样策略 if rank != -1: indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders # 初始化训练时打印的平均损失信息 mloss = torch.zeros(4, device=device) # mean losses if rank != -1: # DDP模式下打乱数据, ddp.sampler的随机采样数据是基于epoch+seed作为随机种子, # 每次epoch不同,随机种子就不同 dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info( ('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar tqdm 创建进度条,方便训练时 信息的展示 optimizer.zero_grad() for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float( ) / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup """ 热身训练(前nw次迭代) 在前nw次迭代中,根据以下方式选取accumulate和学习率 """ if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max( 1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 """ bias的学习率从0.1下降到基准学习率lr*lf(epoch),其他的参数学习率从0增加到lr*lf(epoch) lf为上面设置的余弦退火的衰减函数 """ x['lr'] = np.interp(ni, xi, [ hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch) ]) if 'momentum' in x: x['momentum'] = np.interp( ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale # 设置多尺度训练,从imgsz * 0.5, imgsz * 1.5 + gs随机选取尺寸 if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # 混合精度 with amp.autocast(enabled=cuda): pred = model(imgs) # forward # 计算损失,包括分类损失,objectness损失,框的回归损失 # loss为总损失值,loss_items为一个元组,包含分类损失,objectness损失,框的回归损失和总损失 if (IS_Debug()): #loss, loss_items = compute_loss(pred, targets.to(device), model, imgs) # loss scaled by batch_size loss, loss_items = compute_loss( pred, targets.to(device), model) # loss scaled by batch_size else: loss, loss_items = compute_loss( pred, targets.to(device), model) # loss scaled by batch_size if rank != -1: # 平均不同gpu之间的梯度 loss *= opt.world_size # gradient averaged between devices in DDP mode if opt.quad: loss *= 4. # Backward scaler.scale(loss).backward() # Optimize # 模型反向传播accumulate次之后再根据累积的梯度更新一次参数 if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema is not None: ema.update(model) # Print if rank in [-1, 0]: # 打印显存,进行的轮次,损失,target的数量和图片的size等信息 mloss = (mloss * i + loss_items) / (i + 1 ) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot # 将前三次迭代batch的标签框在图片上画出来并保存 if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard elif plots and ni == 3 and wandb: wandb.log({ "Mosaics": [ wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') ] }) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler # 进行学习率衰减 lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP # 更新EMA的属性 # 添加include的属性 if ema: ema.update_attr(model, include=[ 'yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights' ]) # 判断该epoch是否为最后一轮 final_epoch = epoch + 1 == epochs # 对测试集进行测试,计算mAP等指标 # 测试时使用的是EMA模型 if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, plots=plots and final_epoch, log_imgs=opt.log_imgs if wandb else 0) # Write with open(results_file, 'a') as f: f.write( s + '%10.4g' * 7 % results + '\n') # P, R, [email protected], [email protected], val_loss(box, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Log tags = [ 'train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2' ] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb: wandb.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # weighted combination of [P, R, [email protected], [email protected]] if fi > best_fitness: best_fitness = fi # Save model """ 保存模型,还保存了epoch,results,optimizer等信息, optimizer将不会在最后一轮完成后保存 model保存的是EMA的模型 """ save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema, 'optimizer': None if final_epoch else optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: # Strip optimizers """ 模型训练完后,strip_optimizer函数将optimizer从ckpt中去除; 并且对模型进行model.half(), 将Float32的模型->Float16, 可以减少模型大小,提高inference速度 """ final = best if best.exists() else last # final model for f in [last, best]: if f.exists(): strip_optimizer(f) # strip optimizers if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload # Plots if plots: # 可视化results.txt文件 plot_results(save_dir=save_dir) # save as results.png if wandb: files = [ 'results.png', 'precision_recall_curve.png', 'confusion_matrix.png' ] wandb.log({ "Results": [ wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists() ] }) if opt.log_artifacts: wandb.log_artifact(artifact_or_path=str(final), type='model', name=save_dir.stem) # Test best.pt logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) if opt.data.endswith('coco.yaml') and nc == 80: # if COCO for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests results, _, _ = test.test(opt.data, batch_size=total_batch_size, imgsz=imgsz_test, conf_thres=conf, iou_thres=iou, model=attempt_load(final, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=save_json, plots=False) else: dist.destroy_process_group() # 释放显存 wandb.run.finish() if wandb and wandb.run else None torch.cuda.empty_cache() return results
def main(): """ Train and test :param opt: args :param writer: tensorboard :return: """ global opt opt = parse() arc = opt.arc cfg = opt.cfg teacher_cfg = opt.teacher_cfg img_size = opt.img_size epochs = opt.epochs batch_size = opt.batch_size accumulate = opt.accumulate # effective bs = batch_size * accumulate = 16 * 4 = 64 weights = opt.weights teacher_weights = opt.teacher_weights multi_scale = opt.multi_scale sparsity_training = opt.st opt.weights = last if opt.resume else opt.weights # Initial logging logging.basicConfig( format="%(message)s", level=logging.INFO if opt.local_rank in [-1, 0] else logging.WARN) # Train logger.info(opt) if opt.local_rank in [-1, 0]: logger.info('Start Tensorboard with "tensorboard --logdir=runs", view at http://localhost:6006/') writer = SummaryWriter() # Hyperparameters with open(opt.hyp) as f_hyp: hyp = yaml.safe_load(f_hyp) # data dict with open(opt.data) as f_data: data = yaml.safe_load(f_data) # Distributed training initialize device = select_device(opt.device) if opt.local_rank != -1: dist.init_process_group(init_method="env://", backend='nccl') torch.cuda.set_device(opt.local_rank) device = torch.device(f"cuda:{opt.local_rank}") # world_size = torch.distributed.get_world_size() init_seeds() cuda = device.type != 'cpu' torch.backends.cudnn.benchmark = True if multi_scale: img_size_min = round(img_size / 32 / 1.5) + 1 img_size_max = round(img_size / 32 * 1.5) - 1 img_size = img_size_max * 32 # initiate with maximum multi_scale size logger.info(f'Using multi-scale {img_size_min * 32} - {img_size}') train_path = data['train'] num_classes = int(data['num_classes']) # number of classes # Load dataset dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True, hyp=hyp, rect=opt.rect) train_sampler = torch.utils.data.distributed.DistributedSampler(dataset) if opt.local_rank != -1 else None num_worker = os.cpu_count() // torch.cuda.device_count() dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=min([num_worker, batch_size, 8]), shuffle=not (opt.rect or train_sampler), sampler=train_sampler, pin_memory=True, collate_fn=dataset.collate_fn) # Load model model = Model(cfg, img_size, arc=arc).to(device) # Load teacher model if teacher_cfg: teacher_model = Model(teacher_cfg, img_size, arc).to(device) # optimizer parameter groups param_group0, param_group1 = [], [] for key, value in model.named_parameters(): if 'Conv2d.weight' in key: param_group1.append(value) else: param_group0.append(value) if opt.adam: optimizer = optim.Adam(param_group0, lr=hyp['lr0']) else: optimizer = optim.SGD(param_group0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) # add param_group1 with weight_decay optimizer.add_param_group({'params': param_group1, 'weight_decay': hyp['weight_decay']}) logger.info(f'Optimizer groups: {len(param_group1)} conv.weight, {len(param_group0)} other') del param_group0, param_group1 start_epoch = 0 best_fitness = 0. if weights.endswith('.pt'): checkpoint = torch.load(weights, map_location=device) state_dict = intersect_dicts(checkpoint['model'], model.state_dict()) model.load_state_dict(state_dict, strict=False) print('loaded weights from', weights, '\n') # load optimizer if checkpoint['optimizer'] is not None: optimizer.load_state_dict(checkpoint['optimizer']) best_fitness = checkpoint['best_fitness'] # load results if checkpoint.get('training_results') is not None: with open(results_file, 'w') as file: file.write(checkpoint['training_results']) # resume if opt.resume: start_epoch = checkpoint['epoch'] + 1 del checkpoint elif len(weights) > 0: # weights are 'yolov4.weights', 'darknet53.conv.74' etc. load_darknet_weights(model, weights) logger.info(f'loaded weights from {weights}\n') # Load teacher weights if teacher_cfg: if teacher_weights.endswith('.pt'): teacher_model.load_state_dict(torch.load(teacher_weights, map_location=device)['model']) elif teacher_weights.endswith('.weights'): load_darknet_weights(teacher_model, teacher_weights) else: raise Exception('pls provide proper teacher weights for knowledge distillation') if not mixed_precision: teacher_model.eval() logger.info('<......................using knowledge distillation....................>') logger.info(f'teacher model: {teacher_weights}\n') # Sparsity training if opt.prune == 0: _, _, prune_index = parse_module_index(model.module_dicts) if sparsity_training: logger.info('normal sparse training') if mixed_precision: if teacher_cfg: [model, teacher_model], optimizer = amp.initialize([model, teacher_model], optimizer, opt_level='O1', verbosity=1) else: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=1) # SyncBatchNorm and distributed training if cuda and opt.local_rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) model = model.to(device) model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[opt.local_rank]) model.module_list = model.module.module_list model.yolo_layers = model.module.yolo_layers for index in prune_index: bn_weights = gather_bn_weights(model.module_list, [index]) if opt.local_rank == 0: writer.add_histogram('before_train_per_layer_bn_weights/hist', bn_weights.numpy(), index, bins='doane') # Start training model.num_classes = num_classes model.arc = opt.arc model.hyp = hyp num_batch_size = len(dataloader) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' results = (0, 0, 0, 0, 0, 0, 0) start_train_time = time.time() logger.info('Image sizes %d \n Starting training for %d epochs...', img_size, epochs) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() mean_losses = torch.zeros(4).to(device) mean_soft_target = torch.zeros(1).to(device) pbar = enumerate(dataloader) logger.info(('\n %10s %10s %10s %10s %10s %10s %10s %10s'), 'Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size') if opt.local_rank in [-1, 0]: pbar = tqdm(pbar, total=num_batch_size) optimizer.zero_grad() for i, (imgs, targets, _, _) in pbar: # batch ------------------------------------------------------------- num_integrated_batches = i + num_batch_size * epoch # Adjust the learning rate learning_rate = adjust_learning_rate(optimizer, num_integrated_batches, num_batch_size, hyp, epoch, epochs) if i == 0 and opt.local_rank in [-1, 0]: logger.info(f'learning rate: {learning_rate}') imgs = imgs.to(device) / 255.0 targets = targets.to(device) # Multi-Scale training if multi_scale: if num_integrated_batches / accumulate % 10 == 0: img_size = random.randrange(img_size_min, img_size_max + 1) * 32 scale_factor = img_size / max(imgs.shape[2:]) if scale_factor != 1: new_shape = [math.ceil(x * scale_factor / 32.) * 32 for x in imgs.shape[2:]] imgs = F.interpolate(imgs, size=new_shape, mode='bilinear', align_corners=False) pred = model(imgs) # Compute loss loss, loss_items = compute_loss(pred, targets, model) # knowledge distillation soft_target = 0 if teacher_cfg: if mixed_precision: with torch.no_grad(): output_teacher = teacher_model(imgs) else: _, output_teacher = teacher_model(imgs) soft_target = distillation_loss(pred, output_teacher, model.num_classes, imgs.size(0)) loss += soft_target # Scale loss by nominal batch_size of 64 loss *= batch_size / 64 # Compute gradient if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # Sparse the BN layer that needs pruning if sparsity_training: # bn_l1_regularization(model.module_list, opt.penalty_factor, cba_index, epoch, epochs) bn_l1_regularization(model.module_list, opt.penalty_factor, prune_index, epoch, epochs) # Accumulate gradient for x batches before optimizing if num_integrated_batches % accumulate == 0: optimizer.step() optimizer.zero_grad() if opt.local_rank in [-1, 0]: mean_losses = (mean_losses * i + loss_items) / (i + 1) mean_soft_target = (mean_soft_target * i + soft_target) / (i + 1) memory = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) description = ('%10s' * 2 + '%10.3g' * 6) % ( '%g/%g' % (epoch, epochs - 1), '%.3gG' % memory, *mean_losses, mean_soft_target, img_size) pbar.set_description(description) # end batch ------------------------------------------------------------------------------------------------ # Update scheduler # scheduler.step() if opt.local_rank in [-1, 0]: final_epoch = epoch + 1 == epochs # Calculate mAP if not (opt.notest or opt.nosave) or final_epoch: with torch.no_grad(): results, _ = test(cfg, data, batch_size=batch_size, img_size=opt.img_size, model=model, conf_thres=0.001 if final_epoch and epoch > 0 else 0.1, # 0.1 for speed save_json=final_epoch and epoch > 0) # Write epoch results with open(results_file, 'a') as file: # P, R, mAP, F1, test_losses=(GIoU, obj, cls) file.write(description + '%10.3g' * 7 % results + '\n') # Write Tensorboard results if writer: outputs = list(mean_losses) + list(results) titles = ['GIoU', 'Objectness', 'Classification', 'Train loss', 'Precision', 'Recall', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification'] for output, title in zip(outputs, titles): writer.add_scalar(title, output, epoch) bn_weights = gather_bn_weights(model.module_list, prune_index) writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane') # Update best mAP fitness = results[2] if fitness > best_fitness: best_fitness = fitness # Save training results save = (not opt.nosave) or (final_epoch and not opt.evolve) if save and opt.local_rank == 0: with open(results_file, 'r') as file: # Create checkpoint checkpoint = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': file.read(), 'model': model.module.state_dict() if isinstance( model, nn.parallel.DistributedDataParallel) else model.state_dict(), 'optimizer': None if final_epoch else optimizer.state_dict()} # Save last checkpoint torch.save(checkpoint, last) # Save best checkpoint if best_fitness == fitness: torch.save(checkpoint, best) # Delete checkpoint del checkpoint # end epoch ----------------------------------------------------------------------------------------------- # end training if opt.local_rank in [-1, 0]: if len(opt.name): os.rename('results.txt', 'results_%s.txt' % opt.name) plot_results() # save as results.png print(f'{epoch - start_epoch + 1} epochs completed in {(time.time() - start_train_time) / 3600:.3f} hours.\n') if torch.cuda.device_count() > 1: dist.destroy_process_group() torch.cuda.empty_cache() return results
def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info(f'Hyperparameters {hyp}') save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] nc, names = (1, ['item']) if opt.single_cls else (int( data_dict['nc']), data_dict['names']) # number classes, names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % ( len(names), nc, opt.data) # check # Model pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint if hyp.get('anchors'): ckpt['model'].yaml['anchors'] = round( hyp['anchors']) # force autoanchor model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [ ] # exclude keys state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info( 'Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # report else: model = Model(opt.cfg, ch=3, nc=nc).to(device) # create # Freeze freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp[ 'lrf']) + hyp['lrf'] # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # Logging if wandb and wandb.run is None: wandb_run = wandb.init( config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=save_dir.stem, id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # Results if ckpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % ( weights, epochs) if epochs < start_epoch: logger.info( '%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = int(max(model.stride)) # grid size (max stride) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Exponential moving average ema = ModelEMA(model) if rank in [-1, 0] else None # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % ( mlc, nc, opt.data, nc - 1) # Process 0 if rank in [-1, 0]: ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers)[0] # testloader if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) plot_labels(labels, save_dir=save_dir) if tb_writer: # tb_writer.add_hparams(hyp, {}) # causes duplicate https://github.com/ultralytics/yolov5/pull/384 tb_writer.add_histogram('classes', c, 0) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # Model parameters hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to( device) # attach class weights model.names = names # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0 ) # P, R, [email protected], [email protected], val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) logger.info('Image sizes %g train, %g test\n' 'Using %g dataloader workers\nLogging results to %s\n' 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs)) for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices if rank in [-1, 0]: cw = model.class_weights.cpu().numpy() * ( 1 - maps)**2 # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices( range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP if rank != -1: indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info( ('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float( ) / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max( 1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [ hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch) ]) if 'momentum' in x: x['momentum'] = np.interp( ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss( pred, targets.to(device), model) # loss scaled by batch_size if rank != -1: loss *= opt.world_size # gradient averaged between devices in DDP mode # Backward scaler.scale(loss).backward() # Optimize if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1 ) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if ni < 3: f = str(save_dir / f'train_batch{ni}.jpg') # filename result = plot_images(images=imgs, targets=targets, paths=paths, fname=f) # if tb_writer and result is not None: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP if ema: ema.update_attr( model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, plots=epoch == 0 or final_epoch, # plot first and last log_imgs=opt.log_imgs if wandb else 0) # Write with open(results_file, 'a') as f: f.write( s + '%10.4g' * 7 % results + '\n') # P, R, [email protected], [email protected], val_loss(box, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Log tags = [ 'train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2' ] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb: wandb.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # weighted combination of [P, R, [email protected], [email protected]] if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema, 'optimizer': None if final_epoch else optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: # Strip optimizers n = opt.name if opt.name.isnumeric() else '' fresults, flast, fbest = save_dir / f'results{n}.txt', wdir / f'last{n}.pt', wdir / f'best{n}.pt' for f1, f2 in zip([wdir / 'last.pt', wdir / 'best.pt', results_file], [flast, fbest, fresults]): if f1.exists(): os.rename(f1, f2) # rename if str(f2).endswith('.pt'): # is *.pt strip_optimizer(f2) # strip optimizer os.system( 'gsutil cp %s gs://%s/weights' % (f2, opt.bucket)) if opt.bucket else None # upload # Finish if not opt.evolve: plot_results(save_dir=save_dir) # save as results.png logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) dist.destroy_process_group() if rank not in [-1, 0] else None torch.cuda.empty_cache() return results
def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info(f'Hyperparameters {hyp}') save_dir, epochs, batch_size, total_batch_size, weights, rank = \ Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pt' best = wdir / 'best.pt' results_file = save_dir / 'results.txt' # Save run settings,超参数,训练para with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != 'cpu' init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict with torch_distributed_zero_first( rank): # torch_distributed_zero_first同步所有进程 check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] nc, names = (1, ['item']) if opt.single_cls else (int( data_dict['nc']), data_dict['names']) # number classes, names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % ( len(names), nc, opt.data) # check # Model # 所以这里主要是设定一个,如果加载预训练权重进行训练的话,就去除掉权重中的anchor,采用用户自定义的; # 如果是resume的话,就是不去除anchor,就权重和anchor一起加载, 接着训练; pretrained = weights.endswith('.pt') if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint if hyp.get('anchors'): ckpt['model'].yaml['anchors'] = round( hyp['anchors']) # force autoanchor model = Model(opt.cfg or ckpt['model'].yaml, ch=3, nc=nc).to(device) # create resume时将opt.cfg设为空 exclude = ['anchor'] if opt.cfg or hyp.get('anchors') else [ ] # exclude keys 如果resume,则加载权重中保存的anchor来继续训练; state_dict = ckpt['model'].float().state_dict() # to FP32 state_dict = intersect_dicts(state_dict, model.state_dict(), exclude=exclude) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info('Transferred %g/%g items from %s' % (len(state_dict), len(model.state_dict()), weights)) # 显示加载预训练权重的的键值对和创建模型的键值对 # 如果设置了resume,则会少加载两个键值对(anchors,anchor_grid) else: model = Model(opt.cfg, ch=3, nc=nc).to(device) # create # Freeze freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print('freezing %s' % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / total_batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= total_batch_size * accumulate / nbs # scale weight_decay #将模型分成三组(weight、bn, bias, 其他所有参数)优化 pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR lf = lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp[ 'lrf']) + hyp['lrf'] # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # Logging if wandb and wandb.run is None: opt.hyp = hyp # add hyperparameters wandb_run = wandb.init( config=opt, resume="allow", project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, name=save_dir.stem, id=ckpt.get('wandb_id') if 'ckpt' in locals() else None) loggers = {'wandb': wandb} # loggers dict # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt['optimizer'] is not None: optimizer.load_state_dict(ckpt['optimizer']) best_fitness = ckpt['best_fitness'] # Results if ckpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(ckpt['training_results']) # write results.txt # Epochs start_epoch = ckpt['epoch'] + 1 if opt.resume: assert start_epoch > 0, '%s training to %g epochs is finished, nothing to resume.' % ( weights, epochs) if epochs < start_epoch: logger.info( '%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt, state_dict # Image sizes gs = int(max(model.stride)) # grid size (max stride) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # DP mode DataParallel模式,仅支持单机多卡 if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info('Using SyncBatchNorm()') # Exponential moving average 为模型创建EMA指数滑动平均,如果GPU进程数大于1,则不创建 ema = ModelEMA(model) if rank in [-1, 0] else None # DDP mode # 如果rank不等于-1,则使用DistributedDataParallel模式 # local_rank为gpu编号,rank为进程,例如rank=3,local_rank=0 表示第 3 个进程内的第 1 块 GPU。 if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Trainloader dataloader, dataset = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, image_weights=opt.image_weights) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % ( mlc, nc, opt.data, nc - 1) """ 获取标签中最大的类别值,并于类别数作比较 如果小于类别数则表示有问题 """ # Process 0 if rank in [-1, 0]: ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader(test_path, imgsz_test, total_batch_size, gs, opt, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, pad=0.5)[0] if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) # 根据上面的统计对所有样本的类别,中心点xy位置,长宽wh做可视化 if plots: Thread(target=plot_labels, args=(labels, save_dir, loggers), daemon=True).start() if tb_writer: tb_writer.add_histogram('classes', c, 0) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # Model parameters hyp['cls'] *= nc / 80. # scale coco-tuned hyp['cls'] to current dataset model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to( device) # attach class weights model.names = names # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0 ) # P, R, [email protected], [email protected], val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) # 通过torch1.6自带的api设置混合精度训练 logger.info('Image sizes %g train, %g test\n' 'Using %g dataloader workers\nLogging results to %s\n' 'Starting training for %g epochs...' % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs)) for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices if rank in [-1, 0]: cw = model.class_weights.cpu().numpy() * ( 1 - maps)**2 # class weights iw = labels_to_image_weights(dataset.labels, nc=nc, class_weights=cw) # image weights dataset.indices = random.choices( range(dataset.n), weights=iw, k=dataset.n) # rand weighted idx # Broadcast if DDP if rank != -1: indices = (torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n)).int() dist.broadcast(indices, 0) # 广播索引到其他group if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) # DDP模式下打乱数据, ddp.sampler的随机采样数据是基于epoch+seed作为随机种子, # 每次epoch不同,随机种子就不同 pbar = enumerate(dataloader) logger.info( ('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device, non_blocking=True).float( ) / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max( 1, np.interp(ni, xi, [1, nbs / total_batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 """ bias的学习率从0.1下降到基准学习率lr*lf(epoch), 其他的参数学习率从0增加到lr*lf(epoch). lf为上面设置的余弦退火的衰减函数 """ x['lr'] = np.interp(ni, xi, [ hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch) ]) if 'momentum' in x: x['momentum'] = np.interp( ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # loss为总损失值,loss_items为一个元组,包含分类损失,objectness损失,框的回归损失和总损失 with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss( pred, targets.to(device), model) # loss scaled by batch_size if rank != -1: loss *= opt.world_size # gradient averaged between devices in DDP mode # Backward scaler.scale(loss).backward() # 模型反向传播accumulate次之后再根据累积的梯度更新一次参数 if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1 ) # update mean losses mem = '%.3gG' % (torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard elif plots and ni == 3 and wandb: wandb.log({ "Mosaics": [ wandb.Image(str(x), caption=x.name) for x in save_dir.glob('train*.jpg') ] }) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP if ema: ema.update_attr( model, include=['yaml', 'nc', 'hyp', 'gr', 'names', 'stride']) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, plots=plots and final_epoch, log_imgs=opt.log_imgs if wandb else 0) # Write with open(results_file, 'a') as f: f.write( s + '%10.4g' * 7 % results + '\n') # P, R, [email protected], [email protected], val_loss(box, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Log tags = [ 'train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2' ] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb: wandb.log({tag: x}) # W&B # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # weighted combination of [P, R, [email protected], [email protected]] if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema, 'optimizer': None if final_epoch else optimizer.state_dict(), 'wandb_id': wandb_run.id if wandb else None } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training # 模型训练完后,strip_optimizer函数将optimizer从ckpt中去除; # 并且对模型进行model.half(), 将Float32的模型->Float16, if rank in [-1, 0]: # Strip optimizers for f in [last, best]: if f.exists(): # is *.pt strip_optimizer(f) # strip optimizer os.system('gsutil cp %s gs://%s/weights' % (f, opt.bucket)) if opt.bucket else None # upload # Plots if plots: plot_results(save_dir=save_dir) # save as results.png if wandb: files = [ 'results.png', 'precision_recall_curve.png', 'confusion_matrix.png' ] wandb.log({ "Results": [ wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists() ] }) logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # Test best.pt if opt.data.endswith('coco.yaml') and nc == 80: # if COCO results, _, _ = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, model=attempt_load(best if best.exists() else last, device).half(), single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=True, # use pycocotools plots=False) else: dist.destroy_process_group() wandb.run.finish() if wandb and wandb.run else None torch.cuda.empty_cache() return results
def train(hyp): cfg = opt.cfg # data = opt.data epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs batch_size = opt.batch_size accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64) weights = opt.weights # initial training weights # Image Sizes gs = 32 # (pixels) grid size max stride # Configure run init_seeds() nc = 1 if opt.single_cls else int(len(open( opt.names_classes).readlines())) # number of classes hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset # Remove previous results for f in glob.glob('*_batch*.jpg') + glob.glob(results_file): os.remove(f) # Initialize model # model = Darknet(opt.cfg, opt.input_size, opt.algorithm_type).to(device) from utils.model_prune import Darknet_sss model = Darknet_sss(opt.cfg, opt.input_size, opt.algorithm_type).to(device) CBL_idx, _, prune_idx, ignore_idx = parse_module_defs(model.module_defs) # Optimizer pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in dict(model.named_parameters()).items(): if '.bias' in k: pg2 += [v] # biases elif 'Conv2d.weight' in k: pg1 += [v] # apply weight_decay else: pg0 += [v] # all else if opt.adam: # hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4) optimizer = optim.Adam(pg0, lr=hyp['lr0']) # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1) else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) if opt.sss: optimizer2 = APGNAG([{ 'params': model.lambda_block }], lr=hyp['lr0'], momentum=opt.momentum, gamma=opt.gamma_data) optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 start_epoch = 0 best_fitness = 0.0 # attempt_download(weights) if weights.endswith('.pt'): # pytorch format # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. ckpt = torch.load(weights, map_location=device) # load model try: # ckpt['model'] = {k: v for k, v in ckpt['model'].state_dict().items() if model.state_dict()[k].numel() == v.numel()} # model.load_state_dict(ckpt['model'], strict=False) model.load_state_dict( torch.load(opt.weights, map_location=device)['model']) except KeyError as e: s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError(s) from e # load optimizer # if ckpt['optimizer'] is not None: # optimizer.load_state_dict(ckpt['optimizer']) # best_fitness = ckpt['best_fitness'] best_fitness = 1e-5 # load results if ckpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(ckpt['training_results']) # write results.txt # epochs start_epoch = ckpt['epoch'] + 1 if epochs < start_epoch: print( '%s has been trained for %g epochs. Fine-tuning for %g additional epochs.' % (opt.weights, ckpt['epoch'], epochs)) epochs += ckpt['epoch'] # finetune additional epochs del ckpt elif len(weights) > 0: # darknet format # possible weights are '*.weights', 'yolov3-tiny.conv.15', 'darknet53.conv.74' etc. load_darknet_weights(model, weights) if opt.freeze_layers: output_layer_indices = [ idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer) ] freeze_layer_indices = [ x for x in range(len(model.module_list)) if (x not in output_layer_indices) and ( x - 1 not in output_layer_indices) ] for idx in freeze_layer_indices: for parameter in model.module_list[idx].parameters(): parameter.requires_grad_(False) # Mixed precision training https://github.com/NVIDIA/apex if mixed_precision: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # Scheduler https://arxiv.org/pdf/1812.01187.pdf lf = lambda x: (( (1 + math.cos(x * math.pi / epochs)) / 2)**1.0) * 0.95 + 0.05 # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # see link below if opt.sss: scheduler2 = lr_scheduler.LambdaLR(optimizer2, lr_lambda=lf) scheduler2.last_epoch = start_epoch - 1 # see link below # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 # Plot lr schedule # y = [] # for _ in range(epochs): # scheduler.step() # y.append(optimizer.param_groups[0]['lr']) # plt.plot(y, '.-', label='LambdaLR') # plt.xlabel('epoch') # plt.ylabel('LR') # plt.tight_layout() # plt.savefig('LR.png', dpi=300) model = torch.nn.DataParallel(model).to(device) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level dataloader, dataset = create_dataloader( opt.train_path, opt.input_size, batch_size, gs, hyp=hyp, augment=True, cache=False, rect=False, local_rank=-1, # Model parameters world_size=1) testloader = create_dataloader(opt.val_path, opt.input_size, 4, gs, hyp=hyp, augment=False, cache=False, rect=True, local_rank=-1, world_size=1)[0] nw = 8 model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to( device) # attach class weights model.module_list = model.module.module_list # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = ( 0, 0, 0, 0, 0, 0, 0 ) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' t0 = time.time() # print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) start_epoch = 0 for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps)**2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- # for i, (imgs, targets, paths, _) in enumerate(dataloader): # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float( ) / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 if opt.multi_scale: sz = random.randrange( opt.input_size * 0.5, opt.input_size * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp( ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max( 1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp( ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp( ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Forward # if opt.sss: pred = model(imgs) # else: # pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() # Optimize idx2mask = None # if opt.sr and opt.prune==1 and epoch > opt.epochs * 0.5: # idx2mask = get_mask2(model, prune_idx, 0.85) # BNOptimizer.updateBN(opt.sr_flag, model.module_list, opt.gamma, prune_idx, idx2mask) if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() if opt.sss: optimizer2.step() optimizer2.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), imgs.shape[-1]) pbar.set_description(s) # Plot if ni < 1: f = 'train_batch%g.jpg' % i # filename res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) if tb_writer: tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( cfg=opt.cfg, names_file=opt.names_classes, batch_size=8, img_size=opt.input_size, conf_thres=0.01, save_json=False, # model=ema.ema.module if hasattr(ema.ema, 'module') else ema.ema, model=ema.ema, single_cls=False, dataloader=testloader, save_dir=wdir) if opt.sss: print("lambda '{}'\n".format(model.module.lambda_block if hasattr( model, 'module') else model.lambda_block)) if epoch % opt.interval_prune == 0: #------------------------------ begin soft prune ---------------------------------------------# obtain_num_parameters = lambda model: sum( [param.nelement() for param in model.parameters()]) origin_nparameters = obtain_num_parameters(model) from utils.prune_utils import parse_module_defs2, get_global_norm_thr, obtain_filters_mask_norm, merge_mask, prune_soft_model_code, get_layer_norm_thr, obtain_filters_mask_norm_per_layer CBL_idx, Conv_idx, prune_idx, _, _ = parse_module_defs2( model.module.module_defs if hasattr(model, 'module' ) else model.module_defs) # norm_thr = get_global_norm_thr(model, prune_idx, opt.global_percent) norm_thr_list, norm_prune_index = get_layer_norm_thr( model, prune_idx, opt.global_percent) print("norm_thr_list is", norm_thr_list) # print("norm index is", norm_prune_index) num_filters_l2, filters_mask_l2 = obtain_filters_mask_norm_per_layer( model, norm_thr_list, CBL_idx, prune_idx, layer_keep=opt.layer_keep) CBLidx2mask = { idx: mask for idx, mask in zip(CBL_idx, filters_mask_l2) } CBLidx2filters = { idx: filters for idx, filters in zip(CBL_idx, num_filters_l2) } for i in model.module.module_defs if hasattr( model, 'module') else model.module_defs: if i['type'] == 'shortcut': i['is_access'] = False # print('merge the mask of layers connected to shortcut!') merge_mask(model.module if hasattr(model, 'module') else model, CBLidx2mask, CBLidx2filters) prune_soft_model_code( model.module if hasattr(model, 'module') else model, CBL_idx, CBLidx2mask) print("after soft prune, map test ") results_sfp, maps_sfp, times_sfp = test.test( cfg=opt.cfg, names_file=opt.names_classes, batch_size=8, img_size=opt.input_size, conf_thres=0.01, save_json=False, model=model.module if hasattr(model, 'module') else model, single_cls=False, dataloader=testloader, save_dir=wdir) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = [ 'train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss' ] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) bn_weights = gather_bn_weights(model.module_list, prune_idx) tb_writer.add_histogram('bn_weights/hist', bn_weights.numpy(), epoch, bins='doane') # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] # fisfp = fitness(np.array(results_sfp).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] # if fi-fisfp < 0.05: # with open(results_file, 'r') as f: # create checkpoint # ckpt = {'epoch': epoch, # 'best_fitness': str(fisfp), # 'training_results': f.read(), # # 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), # 'model': ema.ema.module if hasattr(ema, 'module') else ema.ema, # 'optimizer': None if final_epoch else optimizer.state_dict()} # torch.save(ckpt, last) # if (best_fitness == fi) and not final_epoch: # torch.save(ckpt, best) # del ckpt if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint ckpt = { 'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), # 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), 'model': ema.ema.module if hasattr(ema, 'module') else ema.ema, 'optimizer': None if final_epoch else optimizer.state_dict() } # Save last, best and delete torch.save(ckpt, last) if (best_fitness == fi) and not final_epoch: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training n = opt.name if len(n): n = '_' + n if not n.isnumeric() else n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): if os.path.exists(f1): os.rename(f1, f2) # rename ispt = f2.endswith('.pt') # is *.pt strip_optimizer(f2) if ispt else None # strip optimizer os.system('gsutil cp %s gs://%s/weights' % ( f2, opt.bucket)) if opt.bucket and ispt else None # upload if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) dist.destroy_process_group() if torch.cuda.device_count() > 1 else None torch.cuda.empty_cache() return results
def run(): print_environment_info() parser = argparse.ArgumentParser(description="Trains the YOLO model.") parser.add_argument("-m", "--model", type=str, default="config/yolov3.cfg", help="Path to model definition file (.cfg)") parser.add_argument("-d", "--data", type=str, default="config/coco.data", help="Path to data config file (.data)") parser.add_argument("-e", "--epochs", type=int, default=300, help="Number of epochs") parser.add_argument("-v", "--verbose", action='store_true', help="Makes the training more verbose") parser.add_argument("--n_cpu", type=int, default=8, help="Number of cpu threads to use during batch generation") parser.add_argument("--pretrained_weights", type=str, help="Path to checkpoint file (.weights or .pth). Starts training from checkpoint model") parser.add_argument("--checkpoint_interval", type=int, default=1, help="Interval of epochs between saving model weights") parser.add_argument("--evaluation_interval", type=int, default=1, help="Interval of epochs between evaluations on validation set") parser.add_argument("--multiscale_training", action="store_false", help="Allow for multi-scale training") parser.add_argument("--iou_thres", type=float, default=0.5, help="Evaluation: IOU threshold required to qualify as detected") parser.add_argument("--conf_thres", type=float, default=0.1, help="Evaluation: Object confidence threshold") parser.add_argument("--nms_thres", type=float, default=0.5, help="Evaluation: IOU threshold for non-maximum suppression") parser.add_argument("--logdir", type=str, default="logs", help="Directory for training log files (e.g. for TensorBoard)") args = parser.parse_args() print("Command line arguments: {}".format(args)) logger = Logger(args.logdir) # Tensorboard logger # Create output directories if missing os.makedirs("output", exist_ok=True) os.makedirs("checkpoints", exist_ok=True) # Get data configuration data_config = parse_data_config(args.data) train_path = data_config["train"] valid_path = data_config["valid"] class_names = load_classes(data_config["names"]) device = torch.device("cuda" if torch.cuda.is_available() else "cpu") # ############ # Create model # ############ model = load_model(args.model, args.pretrained_weights) # Print model if args.verbose: summary(model, input_size=(3, model.hyperparams['height'], model.hyperparams['height'])) mini_batch_size = model.hyperparams['batch'] // model.hyperparams['subdivisions'] # ################# # Create Dataloader # ################# # Load training dataloader dataloader = _create_data_loader( train_path, mini_batch_size, model.hyperparams['height'], args.n_cpu, args.multiscale_training) # Load validation dataloader validation_dataloader = _create_validation_data_loader( valid_path, mini_batch_size, model.hyperparams['height'], args.n_cpu) # ################ # Create optimizer # ################ params = [p for p in model.parameters() if p.requires_grad] if (model.hyperparams['optimizer'] in [None, "adam"]): optimizer = optim.Adam( params, lr=model.hyperparams['learning_rate'], weight_decay=model.hyperparams['decay'], ) elif (model.hyperparams['optimizer'] == "sgd"): optimizer = optim.SGD( params, lr=model.hyperparams['learning_rate'], weight_decay=model.hyperparams['decay'], momentum=model.hyperparams['momentum']) else: print("Unknown optimizer. Please choose between (adam, sgd).") for epoch in range(args.epochs): print("\n---- Training Model ----") model.train() # Set model to training mode for batch_i, (_, imgs, targets) in enumerate(tqdm.tqdm(dataloader, desc="Training Epoch {}".format(epoch))): batches_done = len(dataloader) * epoch + batch_i imgs = imgs.to(device, non_blocking=True) targets = targets.to(device) outputs = model(imgs) loss, loss_components = compute_loss(outputs, targets, model) loss.backward() ############### # Run optimizer ############### if batches_done % model.hyperparams['subdivisions'] == 0: # Adapt learning rate # Get learning rate defined in cfg lr = model.hyperparams['learning_rate'] if batches_done < model.hyperparams['burn_in']: # Burn in lr *= (batches_done / model.hyperparams['burn_in']) else: # Set and parse the learning rate to the steps defined in the cfg for threshold, value in model.hyperparams['lr_steps']: if batches_done > threshold: lr *= value # Log the learning rate logger.scalar_summary("train/learning_rate", lr, batches_done) # Set learning rate for g in optimizer.param_groups: g['lr'] = lr # Run optimizer optimizer.step() # Reset gradients optimizer.zero_grad() # ############ # Log progress # ############ if args.verbose: print(AsciiTable( [ ["Type", "Value"], ["IoU loss", float(loss_components[0])], ["Object loss", float(loss_components[1])], ["Class loss", float(loss_components[2])], ["Loss", float(loss_components[3])], ["Batch loss", to_cpu(loss).item()], ]).table) # Tensorboard logging tensorboard_log = [ ("train/iou_loss", float(loss_components[0])), ("train/obj_loss", float(loss_components[1])), ("train/class_loss", float(loss_components[2])), ("train/loss", to_cpu(loss).item())] logger.list_of_scalars_summary(tensorboard_log, batches_done) model.seen += imgs.size(0) # ############# # Save progress # ############# # Save model to checkpoint file if epoch % args.checkpoint_interval == 0: checkpoint_path = "checkpoints/yolov3_ckpt_{}.pth".format(epoch) print("---- Saving checkpoint to: '{}' ----".format(checkpoint_path)) torch.save(model.state_dict(), checkpoint_path) # ######## # Evaluate # ######## if epoch % args.evaluation_interval == 0: print("\n---- Evaluating Model ----") # Evaluate the model on the validation set metrics_output = _evaluate( model, validation_dataloader, class_names, img_size=model.hyperparams['height'], iou_thres=args.iou_thres, conf_thres=args.conf_thres, nms_thres=args.nms_thres, verbose=args.verbose ) if metrics_output is not None: precision, recall, AP, f1, ap_class = metrics_output evaluation_metrics = [ ("validation/precision", precision.mean()), ("validation/recall", recall.mean()), ("validation/mAP", AP.mean()), ("validation/f1", f1.mean())] logger.list_of_scalars_summary(evaluation_metrics, epoch)
print("\n---- Training Model ----") model.train() # train 시작 start_time = time.time() for batch_i, (_, imgs, targets) in enumerate( tqdm.tqdm(dataloader, desc=f"Training Epoch {epoch}")): # 미니배치만큼 돌린다 batches_done = len( dataloader ) * epoch + batch_i #len dataloader=minibatch 갯수 , batches_done은 epch의 마지막까지 학습에 이용된 minibatch의 갯수-1이 저장된다. imgs = imgs.to(device, non_blocking=True) #cpu에 있는image를 gpu로 올린다. targets = targets.to(device) #cpu에 있는 label을 gpu로 올린다. outputs = model(imgs) # loss, loss_components = compute_loss( outputs, targets, model) # 예측값과 실제값, 모델을 넣어서 loss 값을 계산한다. loss.backward() # loss 값을 backpropagation을 ############### # Run optimizer ############### if batches_done % model.hyperparams[ 'subdivisions'] == 0: # batches_done이 subdivision의 값으로 나누어 떨어지면 아래 조건문 실행-> gpu에 minibatch가 다 안올라가는 경우 # Adapt learning rate # Get learning rate defined in cfg lr = model.hyperparams[ 'learning_rate'] # lr에 learning_rate값 저장 if batches_done < model.hyperparams[ 'burn_in']: # default burn_in=1000, 만약 bathes_done이 burn in 보다 작을 경우 learning rate를 점점 작게 하여 학습속도 감소시킨다.
else: print("Unknown optimizer. Please choose between (adam, sgd).") for epoch in range(start_epoch, epochs): print("\n---- Training Model ----") for batch_i, (_, imgs, targets) in enumerate(train_loader): model.train() batches_done = len(train_loader) * epoch + batch_i imgs = imgs.to(device, non_blocking=True) targets = targets.to(device) outputs = model(imgs) loss, loss_components = compute_loss(outputs, targets, model) loss.backward() ############### # Run optimizer ############### if batches_done % model.hyperparams['subdivisions'] == 0: # Adapt learning rate # Get learning rate defined in cfg lr = model.hyperparams['learning_rate'] if batches_done < model.hyperparams['burn_in']: # Burn in lr *= (batches_done / model.hyperparams['burn_in']) else: # Set and parse the learning rate to the steps defined in the cfg for threshold, value in model.hyperparams['lr_steps']:
def test( data, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True, log_imgs=0): # number of logged images # Initialize/load model and set device # 判断是否在训练时调用test,如果是则获取训练时的设备 training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) # Directories save_dir = Path( increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir( parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model # 检查输入图片分辨率是否能被模型的最大步长(默认32)整除 imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: # model = nn.DataParallel(model) # Half # 如果设备不是cpu并且gpu数目为1,则将模型由Float32转为Float16,提高前向传播的速度 half = device.type != 'cpu' # half precision only supported on CUDA if half: model.half() # to FP16 # Configure # 加载数据配置信息 model.eval() is_coco = data.endswith('coco.yaml') # is COCO dataset with open(data) as f: data = yaml.load(f, Loader=yaml.FullLoader) # model dict check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes # 设置iou阈值,从0.5~0.95,每间隔0.05取一次 iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 # iou个数 niou = iouv.numel() # Logging log_imgs, wandb = min(log_imgs, 100), None # ceil try: import wandb # Weights & Biases except ImportError: log_imgs = 0 # Dataloader if not training: # 创建一个全0数组测试一下前向传播是否正常运行 img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img _ = model(img.half() if half else img ) if device.type != 'cpu' else None # run once # 获取图片路径 path = data['test'] if opt.task == 'test' else data[ 'val'] # path to val/test images # 创建dataloader # 注意这里rect参数为True,yolov5的测试评估是基于矩形推理的 dataloader = create_dataloader( path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True, prefix=colorstr('test: ' if opt.task == 'test' else 'val: '))[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) # 获取类别的名字 names = { k: v for k, v in enumerate( model.names if hasattr(model, 'names') else model.module.names) } """ 获取coco数据集的类别索引 这里要说明一下,coco数据集有80个类别(索引范围应该为0~79), 但是他的索引却属于0~90(笔者是通过查看coco数据测试集的json文件发现的,具体原因不知) coco80_to_coco91_class()就是为了与上述索引对应起来,返回一个范围在0~90的索引数组 """ coco91class = coco80_to_coco91_class() # 设置tqdm进度条的显示信息 s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') # 初始化指标,时间 p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. # 初始化测试集的损失 loss = torch.zeros(3, device=device) # 初始化json文件的字典,统计信息,ap jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) # 图片也由Float32->Float16 img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width with torch.no_grad(): # Run model """ time_synchronized()函数里面进行了torch.cuda.synchronize(),再返回的time.time() torch.cuda.synchronize()等待gpu上完成所有的工作 总的来说就是这样测试时间会更准确 """ t = time_synchronized() # inf_out为预测结果, train_out训练结果 inf_out, train_out = model( img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss # 如果是在训练时进行的test,则通过训练结果计算并返回测试集的GIoU, obj, cls损失 if training: loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= torch.Tensor([width, height, width, height]).to(device) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb) ] if save_hybrid else [] # for autolabelling t = time_synchronized() """ non_max_suppression进行非极大值抑制; conf_thres为置信度阈值,iou_thres为iou阈值 merge为是否合并框 """ output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) t1 += time_synchronized() - t # Statistics per image # 为每一张图片做统计, 写入预测信息到txt文件, 生成json文件字典, 统计tp等 for si, pred in enumerate(output): # 获取第si张图片的标签信息, 包括class,x,y,w,h # targets[:, 0]为标签属于哪一张图片的编号 labels = targets[targets[:, 0] == si, 1:] nl = len(labels) # 获取标签类别 tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) # 统计测试图片数量 seen += 1 # 如果预测为空,则添加空的信息到stats里 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file # 保存预测结果为txt文件 if save_txt: # 获得对应图片的长和宽 gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0 ]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): # xyxy格式->xywh, 并对坐标进行归一化处理 xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if plots and len(wandb_images) < log_imgs: box_data = [{ "position": { "minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3] }, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": { "class_score": conf }, "domain": "pixel" } for *xyxy, conf, cls in pred.tolist()] boxes = { "predictions": { "box_data": box_data, "class_labels": names } } # inference-space wandb_images.append( wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int( path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({ 'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5) }) # Assign all predictions as incorrect # 初始化预测评定,niou为iou阈值的个数 correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: # detected用来存放已检测到的目标 detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes # 获得xyxy格式的框并乘以wh tbox = xywh2xyxy(labels[:, 1:5]) # 将预测框的坐标调整到基于其原本长宽的坐标 scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch( pred, torch.cat((labels[:, 0:1], tbox), 1)) # Per target class # 对图片中的每个类单独处理 for cls in torch.unique(tcls_tensor): # 标签框该类别的索引 ti = (cls == tcls_tensor).nonzero(as_tuple=False).view( -1) # prediction indices # 预测框该类别的索引 pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view( -1) # target indices # Search for detections if pi.shape[0]: """ Prediction to target ious box_iou计算预测框与标签框的iou值,max(1)选出最大的ious值,i为对应的索引 pred shape[N, 4] tbox shape[M, 4] box_iou shape[N, M] ious shape[N, 1] i shape[N, 1], i里的值属于0~M """ ious, i = box_iou(predn[pi, :4], tbox[ti]).max( 1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): # 获得检测到的目标 d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) # iouv为以0.05为步长 0.5到0.95的序列 # 获得不同iou阈值下的true positive correct[ pi[j]] = ious[j] > iouv # iou_thres is 1xn if len( detected ) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) # 每张图片的结果统计到stats里 stats.append( (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images # 画出第1个batch的图片的ground truth和预测框并保存 if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() # Compute statistics # 将stats列表的信息拼接到一起 stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): # 根据上面得到的tp等信息计算指标 # 精准度TP/TP+FP,召回率TP/P,map,f1分数,类别 p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean( 1) # [P, R, [email protected], [email protected]:0.95] mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() # nt是一个列表,测试集每个类别有多少个标签框 nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # Print results pf = '%20s' + '%12.3g' * 6 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple if not training: print( 'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) if wandb and wandb.run: wandb.log({"Images": wandb_images}) wandb.log({ "Validation": [ wandb.Image(str(f), caption=f.name) for f in sorted(save_dir.glob('test*.jpg')) ] }) # Save JSON # 采用之前保存的json格式预测结果,通过cocoapi评估指标 # 需要注意的是 测试集的标签也需要转成coco的json格式 if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights ).stem if weights is not None else '' # weights anno_json = '../coco/annotations/instances_val2017.json' # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [ int(Path(x).stem) for x in dataloader.dataset.img_files ] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[: 2] # update results ([email protected]:0.95, [email protected]) except Exception as e: print(f'pycocotools unable to run: {e}') # Return results if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") model.float() # for training maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def train(hyp, opt, tb_writer=None): logger.info( colorstr('hyperparameters: ') + ', '.join(f'{k}={v}' for k, v in hyp.items())) save_dir, epochs, batch_size, weights = Path( opt.save_dir), opt.epochs, opt.batch_size, opt.weights # Directories wdir = save_dir / 'weights' wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / 'last.pkl' best = wdir / 'best.pkl' results_file = save_dir / 'results.txt' # Save run settings with open(save_dir / 'hyp.yaml', 'w') as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / 'opt.yaml', 'w') as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = not opt.no_cuda if cuda: jt.flags.use_cuda = 1 init_seeds(1) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.SafeLoader) # data dict check_dataset(data_dict) # check train_path = data_dict['train'] test_path = data_dict['val'] nc = 1 if opt.single_cls else int(data_dict['nc']) # number of classes names = ['item'] if opt.single_cls and len( data_dict['names']) != 1 else data_dict['names'] # class names assert len(names) == nc, '%g names found for nc=%g dataset in %s' % ( len(names), nc, opt.data) # check # Model model = Model(opt.cfg, ch=3, nc=nc) # create pretrained = weights.endswith('.pkl') if pretrained: model.load(weights) # load # Optimizer nbs = 64 # nominal batch size accumulate = max(round(nbs / batch_size), 1) # accumulate loss before optimizing hyp['weight_decay'] *= batch_size * accumulate / nbs # scale weight_decay logger.info(f"Scaled weight_decay = {hyp['weight_decay']}") pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, 'bias') and isinstance(v.bias, jt.Var): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm): pg0.append(v.weight) # no decay elif hasattr(v, 'weight') and isinstance(v.weight, jt.Var): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam(pg0, lr=hyp['lr0'], betas=(hyp['momentum'], 0.999)) # adjust beta1 to momentum else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({ 'params': pg1, 'weight_decay': hyp['weight_decay'] }) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) logger.info('Optimizer groups: %g .bias, %g conv.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR lf = one_cycle(1, hyp['lrf'], epochs) # cosine 1->hyp['lrf'] scheduler = optim.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) loggers = {} # loggers dict start_epoch, best_fitness = 0, 0.0 # Image sizes gs = int(model.stride.max()) # grid size (max stride) nl = model.model[ -1].nl # number of detection layers (used for scaling hyp['obj']) imgsz, imgsz_test = [check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # EMA ema = ModelEMA(model) # Trainloader dataloader = create_dataloader(train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, workers=opt.workers, image_weights=opt.image_weights, quad=opt.quad, prefix=colorstr('train: ')) mlc = np.concatenate(dataloader.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert mlc < nc, 'Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g' % ( mlc, nc, opt.data, nc - 1) ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader( test_path, imgsz_test, batch_size, gs, opt, # testloader hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, workers=opt.workers, pad=0.5, prefix=colorstr('val: ')) labels = np.concatenate(dataloader.labels, 0) c = jt.array(labels[:, 0]) # classes # cf = torch.bincount(c.int(), minlength=nc) + 1. # frequency # model._initialize_biases(cf) if plots: plot_labels(labels, save_dir, loggers) if tb_writer: tb_writer.add_histogram('classes', c.numpy(), 0) # Anchors if not opt.noautoanchor: check_anchors(dataloader, model=model, thr=hyp['anchor_t'], imgsz=imgsz) # Model parameters hyp['box'] *= 3. / nl # scale to layers hyp['cls'] *= nc / 80. * 3. / nl # scale to classes and layers hyp['obj'] *= (imgsz / 640)**2 * 3. / nl # scale to image size and layers model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights( dataloader.labels, nc) * nc # attach class weights model.names = names # Start training t0 = time.time() nw = max(round(hyp['warmup_epochs'] * nb), 1000) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0 ) # P, R, [email protected], [email protected], val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move logger.info(f'Image sizes {imgsz} train, {imgsz_test} test\n' f'Using {dataloader.num_workers} dataloader workers\n' f'Logging results to {save_dir}\n' f'Starting training for {epochs} epochs...') for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices cw = model.class_weights.numpy() * (1 - maps)**2 / nc # class weights iw = labels_to_image_weights(dataloader.labels, nc=nc, class_weights=cw) # image weights dataloader.indices = random.choices( range(dataloader.n), weights=iw, k=dataloader.n) # rand weighted idx # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = jt.zeros((4, )) # mean losses pbar = enumerate(dataloader) logger.info( ('\n' + '%10s' * 7) % ('Epoch', 'box', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(pbar, total=nb) # progress bar for i, ( imgs, targets, paths, _ ) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.float() / 255.0 # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) # accumulate = max(1, np.interp(ni, xi, [1, nbs / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [ hyp['warmup_bias_lr'] if j == 2 else 0.0, x['initial_lr'] * lf(epoch) ]) if 'momentum' in x: x['momentum'] = np.interp( ni, xi, [hyp['warmup_momentum'], hyp['momentum']]) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = nn.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # forward loss, loss_items = compute_loss(pred, targets, model) # loss scaled by batch_size if opt.quad: loss *= 4. # Optimize optimizer.step(loss) if ema: ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses s = ('%10s' + '%10.4g' * 6) % ('%g/%g' % (epoch, epochs - 1), *mloss, targets.shape[0], imgs.shape[-1]) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f'train_batch{ni}.jpg' # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler lr = [x['lr'] for x in optimizer.param_groups] # for tensorboard scheduler.step() # mAP if ema: ema.update_attr(model, include=[ 'yaml', 'nc', 'hyp', 'gr', 'names', 'stride', 'class_weights' ]) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test(data=opt.data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, plots=plots and final_epoch) # Write with open(results_file, 'a') as f: f.write(s + '%10.4g' * 7 % results + '\n') # P, R, [email protected], [email protected], val_loss(box, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp %s gs://%s/results/results%s.txt' % (results_file, opt.bucket, opt.name)) # Log tags = [ 'train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5-0.95', 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss 'x/lr0', 'x/lr1', 'x/lr2' ] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: if hasattr(x, "numpy"): x = x.numpy() tb_writer.add_scalar(tag, x, epoch) # tensorboard # Update best mAP fi = fitness(np.array(results).reshape( 1, -1)) # weighted combination of [P, R, [email protected], [email protected]] if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: # Save last, best and delete jt.save(ema.ema.state_dict(), last) if best_fitness == fi: jt.save(ema.ema.state_dict(), best) # end epoch ---------------------------------------------------------------------------------------------------- # end training # Strip optimizers final = best if best.exists() else last # final model if opt.bucket: os.system(f'gsutil cp {final} gs://{opt.bucket}/weights') # upload # Plots if plots: plot_results(save_dir=save_dir) # save as results.png # Test best.pkl logger.info('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) best_model = Model(opt.cfg) best_model.load(str(final)) best_model = best_model.fuse() if opt.data.endswith('coco.yaml') and nc == 80: # if COCO for conf, iou, save_json in ([0.25, 0.45, False], [0.001, 0.65, True]): # speed, mAP tests results, _, _ = test.test(opt.data, batch_size=total_batch_size, imgsz=imgsz_test, conf_thres=conf, iou_thres=iou, model=best_model, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, save_json=save_json, plots=False) return results
def train(hyp, opt, device, tb_writer=None, wandb=None): logger.info(f"Hyperparameters {hyp}") save_dir, epochs, batch_size, total_batch_size, weights, rank = ( Path(opt.save_dir), opt.epochs, opt.batch_size, opt.total_batch_size, opt.weights, opt.global_rank, ) # Directories wdir = save_dir / "weights" wdir.mkdir(parents=True, exist_ok=True) # make dir last = wdir / "last.pt" best = wdir / "best.pt" results_file = save_dir / "results.txt" # Save run settings with open(save_dir / "hyp.yaml", "w") as f: yaml.dump(hyp, f, sort_keys=False) with open(save_dir / "opt.yaml", "w") as f: yaml.dump(vars(opt), f, sort_keys=False) # Configure plots = not opt.evolve # create plots cuda = device.type != "cpu" init_seeds(2 + rank) with open(opt.data) as f: data_dict = yaml.load(f, Loader=yaml.FullLoader) # data dict with torch_distributed_zero_first(rank): check_dataset(data_dict) # check train_path = data_dict["train"] test_path = data_dict["val"] nc, names = ( (1, ["item"]) if opt.single_cls else (int(data_dict["nc"]), data_dict["names"]) ) # number classes, names assert len(names) == nc, "%g names found for nc=%g dataset in %s" % ( len(names), nc, opt.data, ) # check # Model pretrained = weights.endswith(".pt") if pretrained: with torch_distributed_zero_first(rank): attempt_download(weights) # download if not found locally ckpt = torch.load(weights, map_location=device) # load checkpoint if hyp.get("anchors"): ckpt["model"].yaml["anchors"] = round(hyp["anchors"]) # force autoanchor model = Model(opt.cfg or ckpt["model"].yaml, ch=3, nc=nc).to(device) # create exclude = ["anchor"] if opt.cfg or hyp.get("anchors") else [] # exclude keys state_dict = ckpt["model"].float().state_dict() # to FP32 state_dict = intersect_dicts( state_dict, model.state_dict(), exclude=exclude ) # intersect model.load_state_dict(state_dict, strict=False) # load logger.info( "Transferred %g/%g items from %s" % (len(state_dict), len(model.state_dict()), weights) ) # report else: model = Model(opt.cfg, ch=3, nc=nc).to(device) # create # Freeze freeze = [] # parameter names to freeze (full or partial) for k, v in model.named_parameters(): v.requires_grad = True # train all layers if any(x in k for x in freeze): print("freezing %s" % k) v.requires_grad = False # Optimizer nbs = 64 # nominal batch size accumulate = max( round(nbs / total_batch_size), 1 ) # accumulate loss before optimizing hyp["weight_decay"] *= total_batch_size * accumulate / nbs # scale weight_decay pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in model.named_modules(): if hasattr(v, "bias") and isinstance(v.bias, nn.Parameter): pg2.append(v.bias) # biases if isinstance(v, nn.BatchNorm2d): pg0.append(v.weight) # no decay elif hasattr(v, "weight") and isinstance(v.weight, nn.Parameter): pg1.append(v.weight) # apply decay if opt.adam: optimizer = optim.Adam( pg0, lr=hyp["lr0"], betas=(hyp["momentum"], 0.999) ) # adjust beta1 to momentum else: optimizer = optim.SGD( pg0, lr=hyp["lr0"], momentum=hyp["momentum"], nesterov=True ) optimizer.add_param_group( {"params": pg1, "weight_decay": hyp["weight_decay"]} ) # add pg1 with weight_decay optimizer.add_param_group({"params": pg2}) # add pg2 (biases) logger.info( "Optimizer groups: %g .bias, %g conv.weight, %g other" % (len(pg2), len(pg1), len(pg0)) ) del pg0, pg1, pg2 # Scheduler https://arxiv.org/pdf/1812.01187.pdf # https://pytorch.org/docs/stable/_modules/torch/optim/lr_scheduler.html#OneCycleLR lf = ( lambda x: ((1 + math.cos(x * math.pi / epochs)) / 2) * (1 - hyp["lrf"]) + hyp["lrf"] ) # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) # plot_lr_scheduler(optimizer, scheduler, epochs) # Logging if wandb and wandb.run is None: opt.hyp = hyp # add hyperparameters wandb_run = wandb.init( config=opt, resume="allow", project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem, name=save_dir.stem, id=ckpt.get("wandb_id") if "ckpt" in locals() else None, ) # Resume start_epoch, best_fitness = 0, 0.0 if pretrained: # Optimizer if ckpt["optimizer"] is not None: optimizer.load_state_dict(ckpt["optimizer"]) best_fitness = ckpt["best_fitness"] # Results if ckpt.get("training_results") is not None: with open(results_file, "w") as file: file.write(ckpt["training_results"]) # write results.txt # Epochs start_epoch = ckpt["epoch"] + 1 if opt.resume: assert ( start_epoch > 0 ), "%s training to %g epochs is finished, nothing to resume." % ( weights, epochs, ) if epochs < start_epoch: logger.info( "%s has been trained for %g epochs. Fine-tuning for %g additional epochs." % (weights, ckpt["epoch"], epochs) ) epochs += ckpt["epoch"] # finetune additional epochs del ckpt, state_dict # Image sizes gs = int(max(model.stride)) # grid size (max stride) imgsz, imgsz_test = [ check_img_size(x, gs) for x in opt.img_size ] # verify imgsz are gs-multiples # DP mode if cuda and rank == -1 and torch.cuda.device_count() > 1: model = torch.nn.DataParallel(model) # SyncBatchNorm if opt.sync_bn and cuda and rank != -1: model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model).to(device) logger.info("Using SyncBatchNorm()") # EMA ema = ModelEMA(model) if rank in [-1, 0] else None # DDP mode if cuda and rank != -1: model = DDP(model, device_ids=[opt.local_rank], output_device=opt.local_rank) # Trainloader dataloader, dataset = create_dataloader( train_path, imgsz, batch_size, gs, opt, hyp=hyp, augment=True, cache=opt.cache_images, rect=opt.rect, rank=rank, world_size=opt.world_size, workers=opt.workers, ) mlc = np.concatenate(dataset.labels, 0)[:, 0].max() # max label class nb = len(dataloader) # number of batches assert ( mlc < nc ), "Label class %g exceeds nc=%g in %s. Possible class labels are 0-%g" % ( mlc, nc, opt.data, nc - 1, ) # Process 0 if rank in [-1, 0]: ema.updates = start_epoch * nb // accumulate # set EMA updates testloader = create_dataloader( test_path, imgsz_test, total_batch_size, gs, opt, hyp=hyp, cache=opt.cache_images and not opt.notest, rect=True, rank=-1, world_size=opt.world_size, workers=opt.workers, )[ 0 ] # testloader if not opt.resume: labels = np.concatenate(dataset.labels, 0) c = torch.tensor(labels[:, 0]) # classes # cf = torch.bincount(c.long(), minlength=nc) + 1. # frequency # model._initialize_biases(cf.to(device)) if plots: plot_labels(labels, save_dir=save_dir) if tb_writer: tb_writer.add_histogram("classes", c, 0) if wandb: wandb.log( { "Labels": [ wandb.Image(str(x), caption=x.name) for x in save_dir.glob("*labels*.png") ] } ) # Anchors if not opt.noautoanchor: check_anchors(dataset, model=model, thr=hyp["anchor_t"], imgsz=imgsz) # Model parameters hyp["cls"] *= nc / 80.0 # scale coco-tuned hyp['cls'] to current dataset model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # iou loss ratio (obj_loss = 1.0 or iou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to( device ) # attach class weights model.names = names # Start training t0 = time.time() nw = max( round(hyp["warmup_epochs"] * nb), 1000 ) # number of warmup iterations, max(3 epochs, 1k iterations) # nw = min(nw, (epochs - start_epoch) / 2 * nb) # limit warmup to < 1/2 of training maps = np.zeros(nc) # mAP per class results = (0, 0, 0, 0, 0, 0, 0) # P, R, [email protected], [email protected], val_loss(box, obj, cls) scheduler.last_epoch = start_epoch - 1 # do not move scaler = amp.GradScaler(enabled=cuda) logger.info( "Image sizes %g train, %g test\n" "Using %g dataloader workers\nLogging results to %s\n" "Starting training for %g epochs..." % (imgsz, imgsz_test, dataloader.num_workers, save_dir, epochs) ) for epoch in range( start_epoch, epochs ): # epoch ------------------------------------------------------------------ model.train() # Update image weights (optional) if opt.image_weights: # Generate indices if rank in [-1, 0]: cw = ( model.class_weights.cpu().numpy() * (1 - maps) ** 2 ) # class weights iw = labels_to_image_weights( dataset.labels, nc=nc, class_weights=cw ) # image weights dataset.indices = random.choices( range(dataset.n), weights=iw, k=dataset.n ) # rand weighted idx # Broadcast if DDP if rank != -1: indices = ( torch.tensor(dataset.indices) if rank == 0 else torch.zeros(dataset.n) ).int() dist.broadcast(indices, 0) if rank != 0: dataset.indices = indices.cpu().numpy() # Update mosaic border # b = int(random.uniform(0.25 * imgsz, 0.75 * imgsz + gs) // gs * gs) # dataset.mosaic_border = [b - imgsz, -b] # height, width borders mloss = torch.zeros(4, device=device) # mean losses if rank != -1: dataloader.sampler.set_epoch(epoch) pbar = enumerate(dataloader) logger.info( ("\n" + "%10s" * 8) % ("Epoch", "gpu_mem", "box", "obj", "cls", "total", "targets", "img_size") ) if rank in [-1, 0]: pbar = tqdm(pbar, total=nb) # progress bar optimizer.zero_grad() for i, ( imgs, targets, paths, _, ) in ( pbar ): # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = ( imgs.to(device, non_blocking=True).float() / 255.0 ) # uint8 to float32, 0-255 to 0.0-1.0 # Warmup if ni <= nw: xi = [0, nw] # x interp # model.gr = np.interp(ni, xi, [0.0, 1.0]) # iou loss ratio (obj_loss = 1.0 or iou) accumulate = max( 1, np.interp(ni, xi, [1, nbs / total_batch_size]).round() ) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x["lr"] = np.interp( ni, xi, [ hyp["warmup_bias_lr"] if j == 2 else 0.0, x["initial_lr"] * lf(epoch), ], ) if "momentum" in x: x["momentum"] = np.interp( ni, xi, [hyp["warmup_momentum"], hyp["momentum"]] ) # Multi-scale if opt.multi_scale: sz = random.randrange(imgsz * 0.5, imgsz * 1.5 + gs) // gs * gs # size sf = sz / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [ math.ceil(x * sf / gs) * gs for x in imgs.shape[2:] ] # new shape (stretched to gs-multiple) imgs = F.interpolate( imgs, size=ns, mode="bilinear", align_corners=False ) # Forward with amp.autocast(enabled=cuda): pred = model(imgs) # forward loss, loss_items = compute_loss( pred, targets.to(device), model ) # loss scaled by batch_size if rank != -1: loss *= ( opt.world_size ) # gradient averaged between devices in DDP mode # Backward scaler.scale(loss).backward() # Optimize if ni % accumulate == 0: scaler.step(optimizer) # optimizer.step scaler.update() optimizer.zero_grad() if ema: ema.update(model) # Print if rank in [-1, 0]: mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = "%.3gG" % ( torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0 ) # (GB) s = ("%10s" * 2 + "%10.4g" * 6) % ( "%g/%g" % (epoch, epochs - 1), mem, *mloss, targets.shape[0], imgs.shape[-1], ) pbar.set_description(s) # Plot if plots and ni < 3: f = save_dir / f"train_batch{ni}.jpg" # filename plot_images(images=imgs, targets=targets, paths=paths, fname=f) # if tb_writer: # tb_writer.add_image(f, result, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard elif plots and ni == 3 and wandb: wandb.log( { "Mosaics": [ wandb.Image(str(x), caption=x.name) for x in save_dir.glob("train*.jpg") ] } ) # end batch ------------------------------------------------------------------------------------------------ # end epoch ---------------------------------------------------------------------------------------------------- # Scheduler lr = [x["lr"] for x in optimizer.param_groups] # for tensorboard scheduler.step() # DDP process 0 or single-GPU if rank in [-1, 0]: # mAP if ema: ema.update_attr( model, include=["yaml", "nc", "hyp", "gr", "names", "stride"] ) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP results, maps, times = test.test( opt.data, batch_size=total_batch_size, imgsz=imgsz_test, model=ema.ema, single_cls=opt.single_cls, dataloader=testloader, save_dir=save_dir, plots=plots and final_epoch, log_imgs=opt.log_imgs if wandb else 0, ) # Write with open(results_file, "a") as f: f.write( s + "%10.4g" * 7 % results + "\n" ) # P, R, [email protected], [email protected], val_loss(box, obj, cls) if len(opt.name) and opt.bucket: os.system( "gsutil cp %s gs://%s/results/results%s.txt" % (results_file, opt.bucket, opt.name) ) # Log tags = [ "train/box_loss", "train/obj_loss", "train/cls_loss", # train loss "metrics/precision", "metrics/recall", "metrics/mAP_0.5", "metrics/mAP_0.5:0.95", "val/box_loss", "val/obj_loss", "val/cls_loss", # val loss "x/lr0", "x/lr1", "x/lr2", ] # params for x, tag in zip(list(mloss[:-1]) + list(results) + lr, tags): if tb_writer: tb_writer.add_scalar(tag, x, epoch) # tensorboard if wandb: wandb.log({tag: x}) # W&B # Update best mAP fi = fitness( np.array(results).reshape(1, -1) ) # weighted combination of [P, R, [email protected], [email protected]] if fi > best_fitness: best_fitness = fi # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, "r") as f: # create checkpoint ckpt = { "epoch": epoch, "best_fitness": best_fitness, "training_results": f.read(), "model": ema.ema, "optimizer": None if final_epoch else optimizer.state_dict(), "wandb_id": wandb_run.id if wandb else None, } # Save last, best and delete torch.save(ckpt, last) if best_fitness == fi: torch.save(ckpt, best) del ckpt # end epoch ---------------------------------------------------------------------------------------------------- # end training if rank in [-1, 0]: # Strip optimizers n = opt.name if opt.name.isnumeric() else "" fresults, flast, fbest = ( save_dir / f"results{n}.txt", wdir / f"last{n}.pt", wdir / f"best{n}.pt", ) for f1, f2 in zip( [wdir / "last.pt", wdir / "best.pt", results_file], [flast, fbest, fresults] ): if f1.exists(): os.rename(f1, f2) # rename if str(f2).endswith(".pt"): # is *.pt strip_optimizer(f2) # strip optimizer os.system( "gsutil cp %s gs://%s/weights" % (f2, opt.bucket) ) if opt.bucket else None # upload # Finish if plots: plot_results(save_dir=save_dir) # save as results.png if wandb: files = [ "results.png", "precision_recall_curve.png", "confusion_matrix.png", ] wandb.log( { "Results": [ wandb.Image(str(save_dir / f), caption=f) for f in files if (save_dir / f).exists() ] } ) logger.info( "%g epochs completed in %.3f hours.\n" % (epoch - start_epoch + 1, (time.time() - t0) / 3600) ) else: dist.destroy_process_group() wandb.run.finish() if wandb and wandb.run else None torch.cuda.empty_cache() return results
def drivethru_0001_alexnet_cifar(config_data, tab_level=0, verbose=250): print('drivethru_0001_alexnet_mnist()') PROGRESS_TRACK_EVERY_N_PERCENT = 5. tracker_name = 'drivethru_0001_alexnet_cifar' config_data = set_debug_config(IS_DEBUG, config_data) pm.print_recursive_dict(config_data, tab_level=tab_level + 2, verbose=verbose, verbose_threshold=None) data_loader = load_cifar_0001(config_data, train=True, shuffle=True, verbose=verbose) n_batch_train = len(data_loader) # print('n_batch_train:%s'%(str(n_batch_train))) #12500 data for batch size= 4 autoreloader = AutoReloaderTestCIFAR(config_data, shuffle=True) eval_every_n_iter = get_eval_every_n_iter( config_data, n_batch_train, config_data['general']['epoch'], NO_OF_EVALUATION_DESIRED=config_data['drivethru'] ['no_of_evals_per_run'], manual_specification=False, DEBUG_N_ITER_MAX_PER_EPOCH=DEBUG_N_ITER_MAX_PER_EPOCH) state_tracker = setup_state_tracker(config_data, tracker_name, for_training=True, verbose=250, tab_level=0) net = new_or_load_model(state_tracker, config_data, verbose=verbose, tab_level=tab_level) criterion, optimizer = setup_training_tools_0001(net, config_data, verbose=verbose, tab_level=tab_level + 1) save_data_by_iter_details = {} # split save data by runs total_iter_in_this_run = 0 total_global_iter = state_tracker.save_data_by_nth_run[ state_tracker.current_run]['total_iteration'] last_saved_epoch = 1 + state_tracker.get_latest_saved_epoch() progress_tracker = int(n_batch_train / (100 / PROGRESS_TRACK_EVERY_N_PERCENT)) pm.printv('Start drive through...' % (), tab_level=tab_level) for n_epoch in range(last_saved_epoch, last_saved_epoch + config_data['general']['epoch']): state_tracker.setup_for_this_epoch(n_epoch, tab_level=tab_level + 1, verbose=verbose) for i, data in enumerate(data_loader, 0): print_progress_percentage(i, progress_tracker, n_batch_train, verbose=250, tab_level=tab_level + 1) # if emergency_drivethru_loop(EMERGENCY_DRIVETHRU_LOOP_SIGNAL, total_iter_in_this_run, eval_every_n_iter): total_iter_in_this_run += 1; continue optimizer.zero_grad() x, y0 = data x = x.to(this_device) net.train() y = net(x) loss = compute_loss(criterion, y.squeeze(3).squeeze(2).cpu(), y0) loss.backward() optimizer.step() # Drive through LRP and evaluation if (total_iter_in_this_run + 1) % eval_every_n_iter == 0: save_data_by_iter_details, autoreloader = drive_thru_evaluation( net, autoreloader, x, y, y0, save_data_by_iter_details, i, total_global_iter + total_iter_in_this_run, n_epoch, n_of_test_data_per_eval=config_data['drivethru'] ['n_of_test_data_per_eval'], n_of_test_data_per_LRP_eval=config_data['drivethru'] ['n_of_test_data_per_LRP_eval'], DEBUG_DRIVE_TRHU_LOOP=DEBUG_DRIVE_TRHU_LOOP, tab_level=tab_level + 1, verbose=verbose) # just_in_time_display(save_data_by_iter_details, verbose=verbose, tab_level=tab_level+1) if DEBUG_DRIVE_TRHU_LOOP: return # FOR LOGGING total_iter_in_this_run += 1 state_tracker.store_loss_by_epoch(loss.item(), n_epoch) stop_iter, stop_epoch = DEBUG_train_loop_0002( DEBUG_N_ITER_MAX_PER_EPOCH, i, n_epoch - last_saved_epoch, tab_level=tab_level + 1, verbose=verbose) if stop_iter: break if DEBUG_DRIVE_TRHU_LOOP2: return state_tracker.update_epoch() if stop_epoch: break state_tracker.save_data_by_iter_details = save_data_by_iter_details state_tracker.update_state(total_iter_in_this_run, config_data) save_model_by_n_th_run(net, state_tracker, tab_level=tab_level, verbose=verbose) state_tracker.display_end_state(tab_level=tab_level + 1, verbose=verbose)
def test(cfg = None, data = None, weights=None, batch_size=32, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_hybrid=False, # for hybrid auto-labelling save_conf=False, # save auto-label confidences plots=True): # Initialize/load model and set device training = model is not None if not training: # called by train.py # called directly set_logging() # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = Model(cfg) model.load(weights) model = model.fuse() imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size # Configure model.eval() is_coco = data.endswith('coco.yaml') # is COCO dataset with open(data) as f: data = yaml.load(f, Loader=yaml.FullLoader) # model dict check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = jt.linspace(0.5, 0.95, 10) # iou vector for [email protected]:0.95 niou = iouv.numel() # Dataloader if not training: img = jt.zeros((1, 3, imgsz, imgsz)) # init img path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True, prefix=colorstr('test: ' if opt.task == 'test' else 'val: ')) seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = jt.zeros((3,)) jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.float32() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets nb, _, height, width = img.shape # batch size, channels, height, width with jt.no_grad(): # Run model t = time_synchronized() inf_out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if training: loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls # Run NMS targets[:, 2:] *= jt.array([width, height, width, height]) # to pixels lb = [targets[targets[:, 0] == i, 1:] for i in range(nb)] if save_hybrid else [] # for autolabelling t = time_synchronized() output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres, labels=lb) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(output): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((jt.zeros((0, niou), dtype="bool"), jt.array([]), jt.array([]), tcls)) continue # Predictions predn = pred.clone() predn[:, :4] = scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = jt.array(shapes[si][0])[jt.array([1, 0, 1, 0])] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(jt.array(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = jt.zeros((pred.shape[0], niou), dtype="bool") if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) tbox = scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels if plots: confusion_matrix.process_batch(predn, jt.contrib.concat((labels[:, 0:1], tbox), 1)) # Per target class for cls in jt.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero().view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious i ,ious = box_iou(predn[pi, :4], tbox[ti]).argmax(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.numpy(), pred[:, 4].numpy(), pred[:, 5].numpy(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() f = save_dir / f'test_batch{batch_i}_pred.jpg' # predictions Thread(target=plot_images, args=(img, output_to_target(output), paths, f, names), daemon=True).start() # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) ap50, ap = ap[:, 0], ap.mean(1) # [email protected], [email protected]:0.95 mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: nt = np.zeros((1,)) # Print results pf = '%20s' + '%12.3g' * 6 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc <= 20 and not training)) and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple if not training: print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = '../coco/annotations/instances_val2017.json' # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected]) except Exception as e: print(f'pycocotools unable to run: {e}') # Return results if not training: s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' print(f"Results saved to {save_dir}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.numpy() / len(dataloader)).tolist()), maps, t
def test(data, weights=None, batch_size=16, imgsz=640, conf_thres=0.001, iou_thres=0.6, # for NMS save_json=False, single_cls=False, augment=False, verbose=False, model=None, dataloader=None, save_dir=Path(''), # for saving images save_txt=False, # for auto-labelling save_conf=False, plots=True, log_imgs=0): # number of logged images # Initialize/load model and set device training = model is not None if training: # called by train.py device = next(model.parameters()).device # get model device else: # called directly set_logging() device = select_device(opt.device, batch_size=batch_size) save_txt = opt.save_txt # save *.txt labels # Directories save_dir = Path(increment_path(Path(opt.project) / opt.name, exist_ok=opt.exist_ok)) # increment run (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model model = attempt_load(weights, map_location=device) # load FP32 model imgsz = check_img_size(imgsz, s=model.stride.max()) # check img_size # Multi-GPU disabled, incompatible with .half() https://github.com/ultralytics/yolov5/issues/99 # if device.type != 'cpu' and torch.cuda.device_count() > 1: # model = nn.DataParallel(model) # Half half = device.type != 'cpu' # half precision only supported on CUDA if half: model.half() # Configure model.eval() is_coco = data.endswith('coco.yaml') # is COCO dataset with open(data) as f: data = yaml.load(f, Loader=yaml.FullLoader) # model dict check_dataset(data) # check nc = 1 if single_cls else int(data['nc']) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 niou = iouv.numel() # Logging log_imgs, wandb = min(log_imgs, 100), None # ceil try: import wandb # Weights & Biases except ImportError: log_imgs = 0 # Dataloader if not training: img = torch.zeros((1, 3, imgsz, imgsz), device=device) # init img _ = model(img.half() if half else img) if device.type != 'cpu' else None # run once path = data['test'] if opt.task == 'test' else data['val'] # path to val/test images dataloader = create_dataloader(path, imgsz, batch_size, model.stride.max(), opt, pad=0.5, rect=True)[0] seen = 0 names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} coco91class = coco80_to_coco91_class() s = ('%20s' + '%12s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', '[email protected]:.95') p, r, f1, mp, mr, map50, map, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class, wandb_images = [], [], [], [], [] for batch_i, (img, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): img = img.to(device, non_blocking=True) img = img.half() if half else img.float() # uint8 to fp16/32 img /= 255.0 # 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = img.shape # batch size, channels, height, width whwh = torch.Tensor([width, height, width, height]).to(device) # Disable gradients with torch.no_grad(): # Run model t = time_synchronized() inf_out, train_out = model(img, augment=augment) # inference and training outputs t0 += time_synchronized() - t # Compute loss if training: # if model has loss hyperparameters loss += compute_loss([x.float() for x in train_out], targets, model)[1][:3] # box, obj, cls # Run NMS t = time_synchronized() output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) t1 += time_synchronized() - t # Statistics per image for si, pred in enumerate(output): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class path = Path(paths[si]) seen += 1 if len(pred) == 0: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Predictions predn = pred.clone() scale_coords(img[si].shape[1:], predn[:, :4], shapes[si][0], shapes[si][1]) # native-space pred # Append to text file if save_txt: gn = torch.tensor(shapes[si][0])[[1, 0, 1, 0]] # normalization gain whwh for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format with open(save_dir / 'labels' / (path.stem + '.txt'), 'a') as f: f.write(('%g ' * len(line)).rstrip() % line + '\n') # W&B logging if plots and len(wandb_images) < log_imgs: box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, "class_id": int(cls), "box_caption": "%s %.3f" % (names[cls], conf), "scores": {"class_score": conf}, "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space wandb_images.append(wandb.Image(img[si], boxes=boxes, caption=path.name)) # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(path.stem) if path.stem.isnumeric() else path.stem box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({'image_id': image_id, 'category_id': coco91class[int(p[5])] if is_coco else int(p[5]), 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5)}) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) * whwh scale_coords(img[si].shape[1:], tbox, shapes[si][0], shapes[si][1]) # native-space labels # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero(as_tuple=False).view(-1) # prediction indices pi = (cls == pred[:, 5]).nonzero(as_tuple=False).view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(predn[pi, :4], tbox[ti]).max(1) # best ious, indices # Append detections detected_set = set() for j in (ious > iouv[0]).nonzero(as_tuple=False): d = ti[i[j]] # detected target if d.item() not in detected_set: detected_set.add(d.item()) detected.append(d) correct[pi[j]] = ious[j] > iouv # iou_thres is 1xn if len(detected) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append((correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Plot images if plots and batch_i < 3: f = save_dir / f'test_batch{batch_i}_labels.jpg' # filename plot_images(img, targets, paths, f, names) # labels f = save_dir / f'test_batch{batch_i}_pred.jpg' plot_images(img, output_to_target(output, width, height), paths, f, names) # predictions # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats) and stats[0].any(): p, r, ap, f1, ap_class = ap_per_class(*stats, plot=plots, save_dir=save_dir, names=names) p, r, ap50, ap = p[:, 0], r[:, 0], ap[:, 0], ap.mean(1) # [P, R, [email protected], [email protected]:0.95] mp, mr, map50, map = p.mean(), r.mean(), ap50.mean(), ap.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # W&B logging if plots and wandb and wandb.run: wandb.log({"Images": wandb_images}) wandb.log({"Validation": [wandb.Image(str(x), caption=x.name) for x in sorted(save_dir.glob('test*.jpg'))]}) # Print results pf = '%20s' + '%12.3g' * 6 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) # Print results per class if verbose and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + (imgsz, imgsz, batch_size) # tuple if not training: print('Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) # Save JSON if save_json and len(jdict): w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights anno_json = glob.glob('../coco/annotations/instances_val*.json')[0] # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json print('\nEvaluating pycocotools mAP... saving %s...' % pred_json) with open(pred_json, 'w') as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() eval.accumulate() eval.summarize() map, map50 = eval.stats[:2] # update results ([email protected]:0.95, [email protected]) except Exception as e: print('ERROR: pycocotools unable to run: %s' % e) # Return results if not training: print('Results saved to %s' % save_dir) model.float() # for training maps = np.zeros(nc) + map for i, c in enumerate(ap_class): maps[c] = ap[i] return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t
def test( cfg, names_file, path=None, weights=None, batch_size=16, img_size=416, conf_thres=0.001, iou_thres=0.6, # for nms save_json=False, single_cls=False, augment=False, model=None, dataloader=None, type='v3', save_dir=None): # Initialize/load model and set device assert (model or path) if model is None: device = torch_utils.select_device(opt.device, batch_size) verbose = opt.task == 'test' # Remove previous for f in glob.glob('test_batch*.jpg'): os.remove(f) # Initialize model model = Darknet(cfg, img_size, type) # Load weights # attempt_download(weights) if weights.endswith('.pt'): # pytorch format model.load_state_dict( torch.load(weights, map_location=device)['model'].state_dict()) else: # darknet format load_darknet_weights(model, weights) # Fuse model.fuse() model.to(device) if device.type != 'cpu' and torch.cuda.device_count() > 1: model = nn.DataParallel(model) else: # called by train.py device = next(model.parameters()).device # get model device verbose = False # Configure run nc = 1 if single_cls else int(len( open(names_file).readlines())) # number of classes names = load_classes(names_file) # class names iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for [email protected]:0.95 iouv = iouv[0].view(1) # comment for [email protected]:0.95 niou = iouv.numel() # Dataloader if dataloader is None: dataset = LoadImagesAndLabels(path, img_size, batch_size, rect=True, single_cls=opt.single_cls) batch_size = min(batch_size, len(dataset)) dataloader = DataLoader(dataset, batch_size=batch_size, num_workers=min([ os.cpu_count(), batch_size if batch_size > 1 else 0, 8 ]), pin_memory=True, collate_fn=dataset.collate_fn) seen = 0 model.eval() _ = model(torch.zeros( (1, 3, img_size, img_size), device=device)) if device.type != 'cpu' else None # run once coco91class = coco80_to_coco91_class() s = ('%20s' + '%10s' * 6) % ('Class', 'Images', 'Targets', 'P', 'R', '[email protected]', 'F1') p, r, f1, mp, mr, map, mf1, t0, t1 = 0., 0., 0., 0., 0., 0., 0., 0., 0. loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] for batch_i, (imgs, targets, paths, shapes) in enumerate(tqdm(dataloader, desc=s)): imgs = imgs.to( device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) nb, _, height, width = imgs.shape # batch size, channels, height, width whwh = torch.Tensor([width, height, width, height]).to(device) # Plot images with bounding boxes f = 'test_batch%g.jpg' % batch_i # filename if batch_i < 1 and not os.path.exists(f): plot_images(images=imgs, targets=targets, paths=paths, fname=f) # Disable gradients with torch.no_grad(): # Run model t = torch_utils.time_synchronized() inf_out, train_out = model(imgs) # inference and training outputs t0 += torch_utils.time_synchronized() - t # Compute loss if hasattr(model, 'hyp'): # if model has loss hyperparameters loss += compute_loss(train_out, targets, model)[1][:3] # GIoU, obj, cls # Run NMS t = torch_utils.time_synchronized() output = non_max_suppression(inf_out, conf_thres=conf_thres, iou_thres=iou_thres) # nms t1 += torch_utils.time_synchronized() - t # Statistics per image for si, pred in enumerate(output): labels = targets[targets[:, 0] == si, 1:] nl = len(labels) tcls = labels[:, 0].tolist() if nl else [] # target class seen += 1 if pred is None: if nl: stats.append((torch.zeros(0, niou, dtype=torch.bool), torch.Tensor(), torch.Tensor(), tcls)) continue # Append to text file # with open('test.txt', 'a') as file: # [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred] # Clip boxes to image bounds clip_coords(pred, (height, width)) # Append to pycocotools JSON dictionary if save_json: # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ... image_id = int(Path(paths[si]).stem.split('_')[-1]) box = pred[:, :4].clone() # xyxy scale_coords(imgs[si].shape[1:], box, shapes[si][0], shapes[si][1]) # to original shape box = xyxy2xywh(box) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(pred.tolist(), box.tolist()): jdict.append({ 'image_id': image_id, 'category_id': coco91class[int(p[5])], 'bbox': [round(x, 3) for x in b], 'score': round(p[4], 5) }) # Assign all predictions as incorrect correct = torch.zeros(pred.shape[0], niou, dtype=torch.bool, device=device) if nl: detected = [] # target indices tcls_tensor = labels[:, 0] # target boxes tbox = xywh2xyxy(labels[:, 1:5]) * whwh # Per target class for cls in torch.unique(tcls_tensor): ti = (cls == tcls_tensor).nonzero().view( -1) # prediction indices pi = (cls == pred[:, 5]).nonzero().view(-1) # target indices # Search for detections if pi.shape[0]: # Prediction to target ious ious, i = box_iou(pred[pi, :4], tbox[ti]).max( 1) # best ious, indices # Append detections for j in (ious > iouv[0]).nonzero(): d = ti[i[j]] # detected target if d not in detected: detected.append(d) correct[ pi[j]] = ious[j] > iouv # iou_thres is 1xn if len( detected ) == nl: # all targets already located in image break # Append statistics (correct, conf, pcls, tcls) stats.append( (correct.cpu(), pred[:, 4].cpu(), pred[:, 5].cpu(), tcls)) # Compute statistics stats = [np.concatenate(x, 0) for x in zip(*stats)] # to numpy if len(stats): p, r, ap, f1, ap_class = ap_per_class(*stats) if niou > 1: p, r, ap, f1 = p[:, 0], r[:, 0], ap.mean( 1), ap[:, 0] # [P, R, [email protected]:0.95, [email protected]] mp, mr, map, mf1 = p.mean(), r.mean(), ap.mean(), f1.mean() nt = np.bincount(stats[3].astype(np.int64), minlength=nc) # number of targets per class else: nt = torch.zeros(1) # Print results pf = '%20s' + '%10.3g' * 6 # print format print(pf % ('all', seen, nt.sum(), mp, mr, map, mf1)) # Print results per class if verbose and nc > 1 and len(stats): for i, c in enumerate(ap_class): print(pf % (names[c], seen, nt[c], p[i], r[i], ap[i], f1[i])) # Print speeds if verbose or save_json: t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + ( img_size, img_size, batch_size) # tuple print( 'Speed: %.1f/%.1f/%.1f ms inference/NMS/total per %gx%g image at batch-size %g' % t) maps = np.zeros(nc) + map t = tuple(x / seen * 1E3 for x in (t0, t1, t0 + t1)) + ( img_size, img_size, batch_size) # tuple # Save JSON if save_json and map and len(jdict): print('\nCOCO mAP with pycocotools...') imgIds = [ int(Path(x).stem.split('_')[-1]) for x in dataloader.dataset.img_files ] with open('results.json', 'w') as file: json.dump(jdict, file) try: from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval except: print( 'WARNING: missing pycocotools package, can not compute official COCO mAP. See requirements.txt.' ) # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb cocoGt = COCO(glob.glob('../coco/annotations/instances_val*.json') [0]) # initialize COCO ground truth api cocoDt = cocoGt.loadRes('results.json') # initialize COCO pred api cocoEval = COCOeval(cocoGt, cocoDt, 'bbox') cocoEval.params.imgIds = imgIds # [:32] # only evaluate these images cocoEval.evaluate() cocoEval.accumulate() cocoEval.summarize() map, map50 = cocoEval.stats[: 2] # update results ([email protected]:0.95, [email protected]) return (mp, mr, map50, map, *(loss.cpu() / len(dataloader)).tolist()), maps, t # Return results for i, c in enumerate(ap_class): maps[c] = ap[i] print("class {} ap is {}".format(i, ap[i])) # print("map is", map) return (mp, mr, map, mf1, *(loss.cpu() / len(dataloader)).tolist()), maps, t