def train(config, ADMM, device, train_loader, criterion, optimizer, scheduler, epoch): config.model.train() ce_loss = None for batch_idx, (data, target) in enumerate(train_loader): # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: if scheduler is not None: scheduler.step() data, target = data.to(device), target.to(device) if config.gpu is not None: data = data.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu, non_blocking=True) if config.mixup: data, target_a, target_b, lam = mixup_data(data, target, config.alpha) optimizer.zero_grad() output = config.model(data) if config.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, config.smooth) else: ce_loss = criterion(output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, data, batch_idx) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, ce_loss) # append admm losss if config.admm: mixed_loss.backward() else: ce_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[name] optimizer.step() if batch_idx % config.print_freq == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), ce_loss.item()))
def train(train_loader, config, ADMM, criterion, optimizer, scheduler, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode config.model.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: scheduler.step() input = input.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu) data = input if config.mixup: input, target_a, target_b, lam = mixup_data( input, target, config.alpha) # compute output output = config.model(input) if config.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, config.smooth) else: ce_loss = criterion(output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, data, i) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, ce_loss) # append admm losss # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(ce_loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if config.admm: mixed_loss.backward() else: ce_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[name] optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % config.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) print("cross_entropy loss: {}".format(ce_loss))
def train(hyp): # batch_time = AverageMeter() # data_time = AverageMeter() # losses = AverageMeter() cfg = opt.cfg data = opt.data epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs batch_size = opt.batch_size accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64) weights = opt.weights # initial training weights imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test) # Image Sizes gs = 32 # (pixels) grid size assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs) opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max) if opt.multi_scale: if imgsz_min == imgsz_max: imgsz_min //= 1.5 imgsz_max //= 0.667 grid_min, grid_max = imgsz_min // gs, imgsz_max // gs imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs) img_size = imgsz_max # initialize with max size # Configure run init_seeds() data_dict = parse_data_cfg(data) train_path = data_dict['train'] test_path = data_dict['valid'] nc = 1 if opt.single_cls else int(data_dict['classes']) # number of classes hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset # Remove previous results for f in glob.glob('*_batch*.jpg') + glob.glob(results_file): os.remove(f) # Initialize model model = Darknet(cfg).to(device) # Optimizer pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in dict(model.named_parameters()).items(): if '.bias' in k: pg2 += [v] # biases elif 'Conv2d.weight' in k: pg1 += [v] # apply weight_decay else: pg0 += [v] # all else if opt.adam: # hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4) optimizer = optim.Adam(pg0, lr=hyp['lr0']) # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1) else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 start_epoch = 0 best_fitness = 0.0 # attempt_download(weights) if opt.freeze_layers: output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)] freeze_layer_indices = [x for x in range(len(model.module_list)) if (x not in output_layer_indices) and (x - 1 not in output_layer_indices)] for idx in freeze_layer_indices: for parameter in model.module_list[idx].parameters(): parameter.requires_grad_(False) # Mixed precision training https://github.com/NVIDIA/apex if mixed_precision: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # Scheduler https://arxiv.org/pdf/1812.01187.pdf lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # see link below # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 # Plot lr schedule # y = [] # for _ in range(epochs): # scheduler.step() # y.append(optimizer.param_groups[0]['lr']) # plt.plot(y, '.-', label='LambdaLR') # plt.xlabel('epoch') # plt.ylabel('LR') # plt.tight_layout() # plt.savefig('LR.png', dpi=300) # Dataset dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True, hyp=hyp, # augmentation hyperparameters rect=opt.rect, # rectangular training cache_images=opt.cache_images, single_cls=opt.single_cls) # Dataloader batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=nw, shuffle=not opt.rect, # Shuffle=True unless rectangular training is used pin_memory=True, collate_fn=dataset.collate_fn) # Testloader testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size, hyp=hyp, rect=True, cache_images=opt.cache_images, single_cls=opt.single_cls), batch_size=batch_size, num_workers=nw, pin_memory=True, collate_fn=dataset.collate_fn) initial_rho = opt.rho t0 = time.time() """=====================""" """ multi-rho admm train""" """=====================""" if opt.admm: opt.notest = True # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except Exception as e: s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) print(e) raise KeyError(s) from e del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(int(0.7 * nb), 500) # burn-in iterations, max(0.7 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for i in range(opt.rho_num): current_rho = initial_rho * 10 ** i ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=current_rho) admm.admm_initialization(opt, ADMM=ADMM, model=model) # intialize Z variable for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ print("current rho: {}".format(current_rho)) model.train() masks = {} if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss admm.z_u_update(opt, ADMM, model, device, dataloader, optimizer, epoch, imgs, i, tb_writer) # update Z and U variables loss, admm_loss, mixed_loss = admm.append_admm_loss(opt, ADMM, model, loss) # append admm losss if mixed_precision: with amp.scale_loss(mixed_loss, optimizer) as scaled_loss: scaled_loss.backward() else: mixed_loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot # if ni < 1: # f = 'train_batch%g.jpg' % i # filename # res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) # if tb_writer: # tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler if opt.admm: admm.admm_adjust_learning_rate(optimizer, epoch, opt) else: scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest: # Calculate mAP #or final_epoch is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: best_fitness = fi # end epoch ---------------------------------------------------------------------------------------------------- # end training # admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- admm.admm_adjust_learning_rate(optimizer, epoch, opt) # end admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- print("Saving model.") torch.save( model.module.state_dict() if type(model) is nn.parallel.DistributedDataParallel else model.state_dict(), "./model_pruned/yolov4_{}_{}_{}.pt".format( current_rho, opt.config_file, opt.sparsity_type)) if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() # return results """==============""" """masked retrain""" """==============""" if opt.masked_retrain: ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=initial_rho) if not opt.resume: # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. print("\n>_ Loading file: ./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type)) chkpt = torch.load("./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type), map_location=device) # chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt, strict=False) #['model'] except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e #----------------------------------------------hard prune------------------------------------------------ admm.hard_prune(opt, ADMM, model) #----------------------------------------------hard prune------------------------------------------------ else: try: chkpt = torch.load(weights, map_location=device) chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e # load optimizer if chkpt['optimizer'] is not None: optimizer.load_state_dict(chkpt['optimizer']) best_fitness = chkpt['best_fitness'] # load results if chkpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(chkpt['training_results']) # write results.txt start_epoch = chkpt['epoch'] + 1 del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] if opt.masked_retrain: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot if ni < 1: f = 'train_batch%g.jpg' % i # filename res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) if tb_writer: tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP is_coco = any( [x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: #results[2] best_fitness = fi #results[2] print("\n>_ Got better accuracy {:.3f}% now...\n".format(results[2])) # torch.save(ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), # "./model_retrained/yolov4_retrained_acc_{:.3f}_{}rhos_{}_{}.pt".format(results[2], opt.rho_num, opt.config_file, opt.sparsity_type)) # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint chkpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), 'optimizer': None if final_epoch else optimizer.state_dict()} # Save last, best and delete torch.save(chkpt, last) if (best_fitness == fi) and not final_epoch: torch.save(chkpt, best) del chkpt # end epoch ---------------------------------------------------------------------------------------------------- # end training test_sparsity(model) print("Best Acc: {:.4f}".format(results[2])) n = opt.name if len(n): n = '_' + n if not n.isnumeric() else n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): if os.path.exists(f1): os.rename(f1, f2) # rename ispt = f2.endswith('.pt') # is *.pt strip_optimizer(f2) if ispt else None # strip optimizer os.system('gsutil cp %s gs://%s/weights' % ( f2, opt.bucket)) if opt.bucket and ispt else None # upload if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() return results
def train(train_loader, criterion, optimizer, epoch, config): batch_time = AverageMeter() data_time = AverageMeter() nat_losses = AverageMeter() adv_losses = AverageMeter() nat_loss = 0 adv_loss = 0 nat_top1 = AverageMeter() adv_top1 = AverageMeter() # switch to train mode config.model.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: scheduler.step() if config.gpu is not None: input = input.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu, non_blocking=True) if config.mixup: input, target_a, target_b, lam = mixup_data( input, target, config.alpha) # compute output nat_output, adv_output, pert_inputs = config.model(input, target) if config.mixup: adv_loss = mixup_criterion(criterion, adv_output, target_a, target_b, lam, config.smooth) nat_loss = mixup_criterion(criterion, nat_output, target_a, target_b, lam, config.smooth) else: adv_loss = criterion(adv_output, target, smooth=config.smooth) nat_loss = criterion(nat_output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, input, i) # update Z and U adv_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, adv_loss) # append admm losss # measure accuracy and record loss nat_acc1, _ = accuracy(nat_output, target, topk=(1, 5)) adv_acc1, _ = accuracy(adv_output, target, topk=(1, 5)) nat_losses.update(nat_loss.item(), input.size(0)) adv_losses.update(adv_loss.item(), input.size(0)) adv_top1.update(adv_acc1[0], input.size(0)) nat_top1.update(nat_acc1[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if config.admm: mixed_loss.backward() else: adv_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[ name] #returns boolean array called mask when weights are above treshhold optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % config.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Nat_Loss {nat_loss.val:.4f} ({nat_loss.avg:.4f})\t' 'Nat_Acc@1 {nat_top1.val:.3f} ({nat_top1.avg:.3f})\t' 'Adv_Loss {adv_loss.val:.4f} ({adv_loss.avg:.4f})\t' 'Adv_Acc@1 {adv_top1.val:.3f} ({adv_top1.avg:.3f})\t'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, nat_loss=nat_losses, nat_top1=nat_top1, adv_loss=adv_losses, adv_top1=adv_top1))
def main(): # Training settings parser = argparse.ArgumentParser(description='PyTorch MNIST Example') parser.add_argument('--config_file', type=str, default='', help ="config file") parser.add_argument('--stage', type=str, default='', help ="select the pruning stage") args = parser.parse_args() config = Config(args) use_cuda = True init = Init_Func(config.init_func) torch.manual_seed(config.random_seed) device = torch.device("cuda" if use_cuda else "cpu") kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {} train_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=True, download=True, transform=transforms.Compose([ transforms.ToTensor() #transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=64, shuffle=True, **kwargs) test_loader = torch.utils.data.DataLoader( datasets.MNIST('../data', train=False, transform=transforms.Compose([ transforms.ToTensor() #transforms.Normalize((0.1307,), (0.3081,)) ])), batch_size=1000, shuffle=True, **kwargs) model = None if config.arch == 'lenet_bn': model = LeNet_BN().to(device) elif config.arch == 'lenet': model = LeNet().to(device) elif config.arch == 'lenet_adv': model = LeNet_adv(w=config.width_multiplier).to(device) if config.arch not in model_names: raise Exception("unknown model architecture") ### for initialization experiments for name,W in model.named_parameters(): if 'conv' in name and 'bias' not in name: print ('initialization uniform') #W.data = torch.nn.init.uniform_(W.data) W.data = init.init(W.data) model = AttackPGD(model,config) #### loading initialization ''' ### for lottery tickets experiments read_dict = np.load('lenet_adv_retrained_w16_1_cut.pt_init.npy').item() for name,W in model.named_parameters(): if name not in read_dict: continue print (name) #print ('{} has shape {}'.format(name,read_dict[name].shape)) print (read_dict[name].shape) W.data = torch.from_numpy(read_dict[name]) ''' config.model = model if config.load_model: # unlike resume, load model does not care optimizer status or start_epoch print('==> Loading from {}'.format(config.load_model)) config.model.load_state_dict(torch.load(config.load_model, map_location=lambda storage, loc: storage)) #config.model.load_state_dict(torch.load(config.load_model,map_location = {'cuda:0':'cuda:{}'.format(config.gpu)})) torch.cuda.set_device(config.gpu) config.model.cuda(config.gpu) test(config, device, test_loader) ADMM = None config.prepare_pruning() if config.admm: ADMM = admm.ADMM(config) optimizer = None if (config.optimizer == 'sgd'): optimizer = torch.optim.SGD(config.model.parameters(), config.lr, momentum=0.9, weight_decay=1e-6) elif (config.optimizer =='adam'): optimizer = torch.optim.Adam(config.model.parameters(),config.lr) scheduler = optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=config.epochs*len(train_loader),eta_min=4e-08) if config.resume: if os.path.isfile(config.resume): checkpoint = torch.load(config.resume) config.start_epoch = checkpoint['epoch'] best_adv_acc = checkpoint['best_adv_acc'] model.load_state_dict(checkpoint['state_dict']) optimizer.load_state_dict(checkpoint['optimizer']) print("=> loaded checkpoint '{}' (epoch {})" .format(config.resume, checkpoint['epoch'])) else: print("=> no checkpoint found at '{}'".format(config.resume)) if config.masked_retrain: # make sure small weights are pruned and confirm the acc print ("<============masking both weights and gradients for retrain") admm.masking(config) print ("<============testing sparsity before retrain") admm.test_sparsity(config) test(config, device, test_loader) if config.masked_progressive: admm.zero_masking(config) for epoch in range(0, config.epochs+1): if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: if config.lr_scheduler == 'cosine': scheduler.step() elif config.lr_scheduler == 'sgd': if epoch == 20: config.lr/=10 for param_group in optimizer.param_groups: param_group['lr'] = config.lr else: pass # it uses adam train(config,ADMM,device, train_loader, optimizer, epoch) test(config, device, test_loader) admm.test_sparsity(config) test(config, device, test_loader) if config.save_model and config.admm: print ('saving model {}'.format(config.save_model)) torch.save(config.model.state_dict(),config.save_model)
def prune_train(args, pre_mask, ADMM, train_loader, criterion, optimizer, scheduler, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() idx_loss_dict = {} # switch to train mode model.train() end = time.time() for i, (input, target) in enumerate(train_loader): target = target.long().cuda() # measure data loading time data_time.update(time.time() - end) # adjust learning rate if args.admm: admm.admm_adjust_learning_rate(optimizer, epoch, args) else: scheduler.step() input = input.float().cuda() if args.mixup: input, target_a, target_b, lam = mixup_data( input, target, args.alpha) # compute output output = model(input) if args.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth) else: ce_loss = criterion(output, target, smooth=args.smooth) mixed_loss = ce_loss if args.admm: admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( args, ADMM, model, ce_loss) # append admm loss if args.admm_mask: admm.y_k_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Y\K ce_loss, admm_loss, mixed_loss = admm.append_mask_loss( args, ADMM, model, mixed_loss) # measure accuracy and record loss acc1, _ = accuracy(output, target, topk=(1, 5)) losses.update(ce_loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if args.admm or args.admm_mask: mixed_loss.backward(retain_graph=True) else: ce_loss.backward() if pre_mask: with torch.no_grad(): for name, W in (model.named_parameters()): # shared layers if name in args.fixed_layer: W.grad *= 0 continue # pruned weight layers: fix weight for previous task if name in args.pruned_layer and name in pre_mask: W.grad *= pre_mask[name].cuda() # adaptively learn the mask: fix mask for trainable weight part if args.adaptive_mask and 'mask' in name and args.admm: W.grad *= args.mask[name.replace('w_mask', 'weight')].cuda() #W.grad *= 100 optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.log_interval == 0: for param_group in optimizer.param_groups: current_lr = param_group['lr'] print('({0}) lr:[{1:.5f}] ' 'Epoch: [{2}][{3}/{4}]\t' 'Status: admm-[{5}] retrain-[{6}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'.format( args.optmzr, current_lr, epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1)) if i % 100 == 0: idx_loss_dict[i] = losses.avg