def train(args, ADMM, model, device, train_loader, optimizer, epoch, writer, masks): model.train() #print(masks) ce_loss = None for batch_idx, (data, target) in enumerate(train_loader): data, target = data.cuda(), target.cuda() optimizer.zero_grad() output = model(data) ce_loss = F.cross_entropy(output, target) admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, data, batch_idx, writer) # update Z and U variables ce_loss, admm_loss, mixed_loss = admm.append_admm_loss(args, ADMM, model, ce_loss) # append admm losss mixed_loss.backward() for name, W in model.named_parameters(): for mask in masks: if name in mask: W.grad *= mask[name] optimizer.step() if batch_idx % args.log_interval == 0: print("({}) cross_entropy loss: {}, mixed_loss : {}".format(args.optmzr, ce_loss, mixed_loss)) print('admm Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), ce_loss.item()))
def train(config,ADMM,device,train_loader,optimizer,epoch): config.model.train() adv_loss = None for batch_idx, (data, target) in enumerate(train_loader): data, target = data.to(device), target.to(device) if config.gpu is not None: data = data.cuda(config.gpu, non_blocking = True) target = target.cuda(config.gpu,non_blocking = True) optimizer.zero_grad() nat_output,adv_output,pert_inputs = config.model(data,target) nat_loss = F.cross_entropy(nat_output, target) adv_loss = F.cross_entropy(adv_output, target) if config.admm: admm.admm_update(config,ADMM,device,train_loader,optimizer,epoch,data,batch_idx) # update Z and U adv_loss,admm_loss,mixed_loss = admm.append_admm_loss(config,ADMM,adv_loss) # append admm losss if config.admm: mixed_loss.backward() else: adv_loss.backward() #nat_loss.backward() if config.masked_progressive: with torch.no_grad(): for name,W in config.model.named_parameters(): if name in config.zero_masks: W.grad *=config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name,W in config.model.named_parameters(): if name in config.masks: W.grad *=config.masks[name] optimizer.step() if batch_idx % config.print_freq == 0: print ("nat_cross_entropy loss: {} adv_cross_entropy loss : {}".format(nat_loss,adv_loss)) print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), adv_loss.item()))
def train(lr, epoch = 0): # Turn on training mode which enables dropout. model.train() total_loss = 0. start_time = time.time() ntokens = len(corpus.dictionary) hidden = model.init_hidden(args.batch_size) for batch, i in enumerate(range(0, train_data.size(0) - 1, args.bptt)): data, targets = get_batch(train_data, i) # Starting each batch, we detach the hidden state from how it was previously produced. # If we didn't, the model would try backpropagating all the way to start of the dataset. hidden = repackage_hidden(hidden) model.zero_grad() output, hidden = model(data, hidden) loss = criterion(output.view(-1, ntokens), targets) # if args.admm: if stage == 'admm': ce_loss = loss admm.admm_update(args,ADMM,model,None,None,None,epoch,None,batch) # update Z and U ce_loss,admm_loss,mixed_loss = admm.append_admm_loss(args,ADMM,model,ce_loss) # append admm losss loss = mixed_loss loss.backward() # `clip_grad_norm` helps prevent the exploding gradient problem in RNNs / LSTMs. torch.nn.utils.clip_grad_norm_(model.parameters(), args.clip) if stage == 'masked_retrain': for name,W in model.named_parameters(): if name in config.masks: W.grad.data *= config.masks[name] for p in model.parameters(): p.data.add_(-lr, p.grad.data) total_loss += loss.item() if batch % args.log_interval == 0 and batch > 0: cur_loss = total_loss / args.log_interval elapsed = time.time() - start_time print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.2f} | ms/batch {:5.2f} | ' 'loss {:5.2f} | ppl {:8.2f}'.format( epoch, batch, len(train_data) // args.bptt, lr, elapsed * 1000 / args.log_interval, cur_loss, math.exp(cur_loss))) total_loss = 0 start_time = time.time()
def train(config, ADMM, device, train_loader, criterion, optimizer, scheduler, epoch): config.model.train() ce_loss = None for batch_idx, (data, target) in enumerate(train_loader): # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: if scheduler is not None: scheduler.step() data, target = data.to(device), target.to(device) if config.gpu is not None: data = data.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu, non_blocking=True) if config.mixup: data, target_a, target_b, lam = mixup_data(data, target, config.alpha) optimizer.zero_grad() output = config.model(data) if config.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, config.smooth) else: ce_loss = criterion(output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, data, batch_idx) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, ce_loss) # append admm losss if config.admm: mixed_loss.backward() else: ce_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[name] optimizer.step() if batch_idx % config.print_freq == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(data), len(train_loader.dataset), 100. * batch_idx / len(train_loader), ce_loss.item()))
def train(train_loader, config, ADMM, criterion, optimizer, scheduler, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() top5 = AverageMeter() # switch to train mode config.model.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: scheduler.step() input = input.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu) data = input if config.mixup: input, target_a, target_b, lam = mixup_data( input, target, config.alpha) # compute output output = config.model(input) if config.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, config.smooth) else: ce_loss = criterion(output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, data, i) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, ce_loss) # append admm losss # measure accuracy and record loss acc1, acc5 = accuracy(output, target, topk=(1, 5)) losses.update(ce_loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) top5.update(acc5[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if config.admm: mixed_loss.backward() else: ce_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[name] optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % config.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f} ({top1.avg:.3f})\t' 'Acc@5 {top5.val:.3f} ({top5.avg:.3f})'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, loss=losses, top1=top1, top5=top5)) print("cross_entropy loss: {}".format(ce_loss))
def train(hyp): # batch_time = AverageMeter() # data_time = AverageMeter() # losses = AverageMeter() cfg = opt.cfg data = opt.data epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs batch_size = opt.batch_size accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64) weights = opt.weights # initial training weights imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test) # Image Sizes gs = 32 # (pixels) grid size assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs) opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max) if opt.multi_scale: if imgsz_min == imgsz_max: imgsz_min //= 1.5 imgsz_max //= 0.667 grid_min, grid_max = imgsz_min // gs, imgsz_max // gs imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs) img_size = imgsz_max # initialize with max size # Configure run init_seeds() data_dict = parse_data_cfg(data) train_path = data_dict['train'] test_path = data_dict['valid'] nc = 1 if opt.single_cls else int(data_dict['classes']) # number of classes hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset # Remove previous results for f in glob.glob('*_batch*.jpg') + glob.glob(results_file): os.remove(f) # Initialize model model = Darknet(cfg).to(device) # Optimizer pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in dict(model.named_parameters()).items(): if '.bias' in k: pg2 += [v] # biases elif 'Conv2d.weight' in k: pg1 += [v] # apply weight_decay else: pg0 += [v] # all else if opt.adam: # hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4) optimizer = optim.Adam(pg0, lr=hyp['lr0']) # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1) else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 start_epoch = 0 best_fitness = 0.0 # attempt_download(weights) if opt.freeze_layers: output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)] freeze_layer_indices = [x for x in range(len(model.module_list)) if (x not in output_layer_indices) and (x - 1 not in output_layer_indices)] for idx in freeze_layer_indices: for parameter in model.module_list[idx].parameters(): parameter.requires_grad_(False) # Mixed precision training https://github.com/NVIDIA/apex if mixed_precision: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # Scheduler https://arxiv.org/pdf/1812.01187.pdf lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # see link below # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 # Plot lr schedule # y = [] # for _ in range(epochs): # scheduler.step() # y.append(optimizer.param_groups[0]['lr']) # plt.plot(y, '.-', label='LambdaLR') # plt.xlabel('epoch') # plt.ylabel('LR') # plt.tight_layout() # plt.savefig('LR.png', dpi=300) # Dataset dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True, hyp=hyp, # augmentation hyperparameters rect=opt.rect, # rectangular training cache_images=opt.cache_images, single_cls=opt.single_cls) # Dataloader batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=nw, shuffle=not opt.rect, # Shuffle=True unless rectangular training is used pin_memory=True, collate_fn=dataset.collate_fn) # Testloader testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size, hyp=hyp, rect=True, cache_images=opt.cache_images, single_cls=opt.single_cls), batch_size=batch_size, num_workers=nw, pin_memory=True, collate_fn=dataset.collate_fn) initial_rho = opt.rho t0 = time.time() """=====================""" """ multi-rho admm train""" """=====================""" if opt.admm: opt.notest = True # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except Exception as e: s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) print(e) raise KeyError(s) from e del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(int(0.7 * nb), 500) # burn-in iterations, max(0.7 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for i in range(opt.rho_num): current_rho = initial_rho * 10 ** i ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=current_rho) admm.admm_initialization(opt, ADMM=ADMM, model=model) # intialize Z variable for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ print("current rho: {}".format(current_rho)) model.train() masks = {} if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss admm.z_u_update(opt, ADMM, model, device, dataloader, optimizer, epoch, imgs, i, tb_writer) # update Z and U variables loss, admm_loss, mixed_loss = admm.append_admm_loss(opt, ADMM, model, loss) # append admm losss if mixed_precision: with amp.scale_loss(mixed_loss, optimizer) as scaled_loss: scaled_loss.backward() else: mixed_loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot # if ni < 1: # f = 'train_batch%g.jpg' % i # filename # res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) # if tb_writer: # tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler if opt.admm: admm.admm_adjust_learning_rate(optimizer, epoch, opt) else: scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest: # Calculate mAP #or final_epoch is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: best_fitness = fi # end epoch ---------------------------------------------------------------------------------------------------- # end training # admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- admm.admm_adjust_learning_rate(optimizer, epoch, opt) # end admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- print("Saving model.") torch.save( model.module.state_dict() if type(model) is nn.parallel.DistributedDataParallel else model.state_dict(), "./model_pruned/yolov4_{}_{}_{}.pt".format( current_rho, opt.config_file, opt.sparsity_type)) if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() # return results """==============""" """masked retrain""" """==============""" if opt.masked_retrain: ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=initial_rho) if not opt.resume: # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. print("\n>_ Loading file: ./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type)) chkpt = torch.load("./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type), map_location=device) # chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt, strict=False) #['model'] except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e #----------------------------------------------hard prune------------------------------------------------ admm.hard_prune(opt, ADMM, model) #----------------------------------------------hard prune------------------------------------------------ else: try: chkpt = torch.load(weights, map_location=device) chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e # load optimizer if chkpt['optimizer'] is not None: optimizer.load_state_dict(chkpt['optimizer']) best_fitness = chkpt['best_fitness'] # load results if chkpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(chkpt['training_results']) # write results.txt start_epoch = chkpt['epoch'] + 1 del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] if opt.masked_retrain: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot if ni < 1: f = 'train_batch%g.jpg' % i # filename res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) if tb_writer: tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP is_coco = any( [x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: #results[2] best_fitness = fi #results[2] print("\n>_ Got better accuracy {:.3f}% now...\n".format(results[2])) # torch.save(ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), # "./model_retrained/yolov4_retrained_acc_{:.3f}_{}rhos_{}_{}.pt".format(results[2], opt.rho_num, opt.config_file, opt.sparsity_type)) # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint chkpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), 'optimizer': None if final_epoch else optimizer.state_dict()} # Save last, best and delete torch.save(chkpt, last) if (best_fitness == fi) and not final_epoch: torch.save(chkpt, best) del chkpt # end epoch ---------------------------------------------------------------------------------------------------- # end training test_sparsity(model) print("Best Acc: {:.4f}".format(results[2])) n = opt.name if len(n): n = '_' + n if not n.isnumeric() else n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): if os.path.exists(f1): os.rename(f1, f2) # rename ispt = f2.endswith('.pt') # is *.pt strip_optimizer(f2) if ispt else None # strip optimizer os.system('gsutil cp %s gs://%s/weights' % ( f2, opt.bucket)) if opt.bucket and ispt else None # upload if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() return results
def train(train_loader, criterion, optimizer, epoch, config): batch_time = AverageMeter() data_time = AverageMeter() nat_losses = AverageMeter() adv_losses = AverageMeter() nat_loss = 0 adv_loss = 0 nat_top1 = AverageMeter() adv_top1 = AverageMeter() # switch to train mode config.model.train() end = time.time() for i, (input, target) in enumerate(train_loader): # measure data loading time data_time.update(time.time() - end) # adjust learning rate if config.admm: admm.admm_adjust_learning_rate(optimizer, epoch, config) else: scheduler.step() if config.gpu is not None: input = input.cuda(config.gpu, non_blocking=True) target = target.cuda(config.gpu, non_blocking=True) if config.mixup: input, target_a, target_b, lam = mixup_data( input, target, config.alpha) # compute output nat_output, adv_output, pert_inputs = config.model(input, target) if config.mixup: adv_loss = mixup_criterion(criterion, adv_output, target_a, target_b, lam, config.smooth) nat_loss = mixup_criterion(criterion, nat_output, target_a, target_b, lam, config.smooth) else: adv_loss = criterion(adv_output, target, smooth=config.smooth) nat_loss = criterion(nat_output, target, smooth=config.smooth) if config.admm: admm.admm_update(config, ADMM, device, train_loader, optimizer, epoch, input, i) # update Z and U adv_loss, admm_loss, mixed_loss = admm.append_admm_loss( config, ADMM, adv_loss) # append admm losss # measure accuracy and record loss nat_acc1, _ = accuracy(nat_output, target, topk=(1, 5)) adv_acc1, _ = accuracy(adv_output, target, topk=(1, 5)) nat_losses.update(nat_loss.item(), input.size(0)) adv_losses.update(adv_loss.item(), input.size(0)) adv_top1.update(adv_acc1[0], input.size(0)) nat_top1.update(nat_acc1[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if config.admm: mixed_loss.backward() else: adv_loss.backward() if config.masked_progressive: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.zero_masks: W.grad *= config.zero_masks[name] if config.masked_retrain: with torch.no_grad(): for name, W in config.model.named_parameters(): if name in config.masks: W.grad *= config.masks[ name] #returns boolean array called mask when weights are above treshhold optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % config.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Nat_Loss {nat_loss.val:.4f} ({nat_loss.avg:.4f})\t' 'Nat_Acc@1 {nat_top1.val:.3f} ({nat_top1.avg:.3f})\t' 'Adv_Loss {adv_loss.val:.4f} ({adv_loss.avg:.4f})\t' 'Adv_Acc@1 {adv_top1.val:.3f} ({adv_top1.avg:.3f})\t'.format( epoch, i, len(train_loader), batch_time=batch_time, data_time=data_time, nat_loss=nat_losses, nat_top1=nat_top1, adv_loss=adv_losses, adv_top1=adv_top1))
def run_admm(data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict,cfg_file,processed_first,next_config_file,ADMM,masks,ep,ck): # This function processes the current chunk using the information in cfg_file. In parallel, the next chunk is load into the CPU memory # Reading chunk-specific cfg file (first argument-mandatory file) if not(os.path.exists(cfg_file)): sys.stderr.write('ERROR: The config file %s does not exist!\n'%(cfg_file)) sys.exit(0) else: config = configparser.ConfigParser() config.read(cfg_file) # Setting torch seed seed=int(config['exp']['seed']) torch.manual_seed(seed) random.seed(seed) np.random.seed(seed) # Reading config parameters output_folder=config['exp']['out_folder'] multi_gpu=strtobool(config['exp']['multi_gpu']) to_do=config['exp']['to_do'] info_file=config['exp']['out_info'] model=config['model']['model'].split('\n') forward_outs=config['forward']['forward_out'].split(',') forward_normalize_post=list(map(strtobool,config['forward']['normalize_posteriors'].split(','))) forward_count_files=config['forward']['normalize_with_counts_from'].split(',') require_decodings=list(map(strtobool,config['forward']['require_decoding'].split(','))) use_cuda=strtobool(config['exp']['use_cuda']) save_gpumem=strtobool(config['exp']['save_gpumem']) is_production=strtobool(config['exp']['production']) if to_do=='train': batch_size=int(config['batches']['batch_size_train']) if to_do=='valid': batch_size=int(config['batches']['batch_size_valid']) if to_do=='forward': batch_size=1 # ***** Reading the Data******** if processed_first: # admm初始化的工作,咱们都在这儿做了吧 # Reading all the features and labels for this chunk shared_list=[] p=threading.Thread(target=read_lab_fea, args=(cfg_file,is_production,shared_list,output_folder,)) p.start() p.join() data_name=shared_list[0] data_end_index=shared_list[1] fea_dict=shared_list[2] lab_dict=shared_list[3] arch_dict=shared_list[4] data_set=shared_list[5] # converting numpy tensors into pytorch tensors and put them on GPUs if specified if not(save_gpumem) and use_cuda: data_set=torch.from_numpy(data_set).float().cuda() else: data_set=torch.from_numpy(data_set).float() # Reading all the features and labels for the next chunk shared_list=[] p=threading.Thread(target=read_lab_fea, args=(next_config_file,is_production,shared_list,output_folder,)) p.start() # Reading model and initialize networks inp_out_dict=fea_dict [nns,costs]=model_init(inp_out_dict,model,config,arch_dict,use_cuda,multi_gpu,to_do) if processed_first: ADMM = admm.ADMM(config, nns) # optimizers initialization optimizers=optimizer_init(nns,config,arch_dict) # pre-training and multi-gpu init for net in nns.keys(): pt_file_arch=config[arch_dict[net][0]]['arch_pretrain_file'] if pt_file_arch!='none': checkpoint_load = torch.load(pt_file_arch) nns[net].load_state_dict(checkpoint_load['model_par']) optimizers[net].load_state_dict(checkpoint_load['optimizer_par']) optimizers[net].param_groups[0]['lr']=float(config[arch_dict[net][0]]['arch_lr']) # loading lr of the cfg file for pt if multi_gpu: nns[net] = torch.nn.DataParallel(nns[net]) if to_do=='forward': post_file={} for out_id in range(len(forward_outs)): if require_decodings[out_id]: out_file=info_file.replace('.info','_'+forward_outs[out_id]+'_to_decode.ark') else: out_file=info_file.replace('.info','_'+forward_outs[out_id]+'.ark') post_file[forward_outs[out_id]]=open_or_fd(out_file,output_folder,'wb') if strtobool(config['exp']['retrain']) and processed_first and strtobool(config['exp']['masked_progressive']): # make sure small weights are pruned and confirm the acc print ("<============masking both weights and gradients for retrain") masks = admm.masking(config, ADMM, nns) print("<============all masking statistics") masks = admm.zero_masking(config, nns) print ("<============testing sparsity before retrain") admm.test_sparsity(config, nns, ADMM) if strtobool(config['exp']['masked_progressive']) and processed_first and strtobool(config['exp']['admm']): masks = admm.zero_masking(config, nns) # check automatically if the model is sequential seq_model=is_sequential_dict(config,arch_dict) # ***** Minibatch Processing loop******** if seq_model or to_do=='forward': N_snt=len(data_name) N_batches=int(N_snt/batch_size) else: N_ex_tr=data_set.shape[0] N_batches=int(N_ex_tr/batch_size) beg_batch=0 end_batch=batch_size snt_index=0 beg_snt=0 start_time = time.time() # array of sentence lengths arr_snt_len=shift(shift(data_end_index, -1,0)-data_end_index,1,0) arr_snt_len[0]=data_end_index[0] loss_sum=0 err_sum=0 inp_dim=data_set.shape[1] for i in range(N_batches): max_len=0 if seq_model: max_len=int(max(arr_snt_len[snt_index:snt_index+batch_size])) inp= torch.zeros(max_len,batch_size,inp_dim).contiguous() for k in range(batch_size): snt_len=data_end_index[snt_index]-beg_snt N_zeros=max_len-snt_len # Appending a random number of initial zeros, tge others are at the end. N_zeros_left=random.randint(0,N_zeros) # randomizing could have a regularization effect inp[N_zeros_left:N_zeros_left+snt_len,k,:]=data_set[beg_snt:beg_snt+snt_len,:] beg_snt=data_end_index[snt_index] snt_index=snt_index+1 else: # features and labels for batch i if to_do!='forward': inp= data_set[beg_batch:end_batch,:].contiguous() else: snt_len=data_end_index[snt_index]-beg_snt inp= data_set[beg_snt:beg_snt+snt_len,:].contiguous() beg_snt=data_end_index[snt_index] snt_index=snt_index+1 # use cuda if use_cuda: inp=inp.cuda() if to_do=='train': # Forward input, with autograd graph active outs_dict=forward_model(fea_dict,lab_dict,arch_dict,model,nns,costs,inp,inp_out_dict,max_len,batch_size,to_do,forward_outs) if strtobool(config['exp']['admm']): batch_idx = i + ck admm.admm_update(config,ADMM,nns, ep,batch_idx) # update Z and U outs_dict['loss_final'],admm_loss,mixed_loss = admm.append_admm_loss(config,ADMM,nns,outs_dict['loss_final']) # append admm losss for opt in optimizers.keys(): optimizers[opt].zero_grad() if strtobool(config['exp']['admm']): mixed_loss.backward() else: outs_dict['loss_final'].backward() if strtobool(config['exp']['masked_progressive']) and not strtobool(config['exp']['retrain']): with torch.no_grad(): for net in nns.keys(): for name, W in nns[net].named_parameters(): if name in masks: W.grad *=masks[name] break if strtobool(config['exp']['retrain']): with torch.no_grad(): for net in nns.keys(): for name, W in nns[net].named_parameters(): if name in masks: W.grad *=masks[name] break # Gradient Clipping (th 0.1) #for net in nns.keys(): # torch.nn.utils.clip_grad_norm_(nns[net].parameters(), 0.1) for opt in optimizers.keys(): if not(strtobool(config[arch_dict[opt][0]]['arch_freeze'])): optimizers[opt].step() else: with torch.no_grad(): # Forward input without autograd graph (save memory) outs_dict=forward_model(fea_dict,lab_dict,arch_dict,model,nns,costs,inp,inp_out_dict,max_len,batch_size,to_do,forward_outs) if to_do=='forward': for out_id in range(len(forward_outs)): out_save=outs_dict[forward_outs[out_id]].data.cpu().numpy() if forward_normalize_post[out_id]: # read the config file counts = load_counts(forward_count_files[out_id]) out_save=out_save-np.log(counts/np.sum(counts)) # save the output write_mat(output_folder,post_file[forward_outs[out_id]], out_save, data_name[i]) else: loss_sum=loss_sum+outs_dict['loss_final'].detach() err_sum=err_sum+outs_dict['err_final'].detach() # update it to the next batch beg_batch=end_batch end_batch=beg_batch+batch_size # Progress bar if to_do == 'train': status_string="Training | (Batch "+str(i+1)+"/"+str(N_batches)+")"+" | L:" +str(round(loss_sum.cpu().item()/(i+1),3)) if i==N_batches-1: status_string="Training | (Batch "+str(i+1)+"/"+str(N_batches)+")" if to_do == 'valid': status_string="Validating | (Batch "+str(i+1)+"/"+str(N_batches)+")" if to_do == 'forward': status_string="Forwarding | (Batch "+str(i+1)+"/"+str(N_batches)+")" progress(i, N_batches, status=status_string) elapsed_time_chunk=time.time() - start_time loss_tot=loss_sum/N_batches err_tot=err_sum/N_batches # clearing memory del inp, outs_dict, data_set # save the model if to_do=='train': for net in nns.keys(): checkpoint={} if multi_gpu: checkpoint['model_par']=nns[net].module.state_dict() else: checkpoint['model_par']=nns[net].state_dict() checkpoint['optimizer_par']=optimizers[net].state_dict() out_file=info_file.replace('.info','_'+arch_dict[net][0]+'.pkl') torch.save(checkpoint, out_file) if to_do=='forward': for out_name in forward_outs: post_file[out_name].close() # Write info file with open(info_file, "w") as text_file: text_file.write("[results]\n") if to_do!='forward': text_file.write("loss=%s\n" % loss_tot.cpu().numpy()) text_file.write("err=%s\n" % err_tot.cpu().numpy()) text_file.write("elapsed_time_chunk=%f\n" % elapsed_time_chunk) text_file.close() # Getting the data for the next chunk (read in parallel) p.join() data_name=shared_list[0] data_end_index=shared_list[1] fea_dict=shared_list[2] lab_dict=shared_list[3] arch_dict=shared_list[4] data_set=shared_list[5] # converting numpy tensors into pytorch tensors and put them on GPUs if specified if not(save_gpumem) and use_cuda: data_set=torch.from_numpy(data_set).float().cuda() else: data_set=torch.from_numpy(data_set).float() return [data_name,data_set,data_end_index,fea_dict,lab_dict,arch_dict,masks,ADMM]
def admm_quant_train_by_step(config, audio_processor, model, criterion, optimizer, epoch, model_settings, time_shift_samples, sess, name_list, device): batch_time = AverageMeter() data_time = AverageMeter() ce_losses = AverageMeter() mixed_losses = AverageMeter() top1 = AverageMeter() # switch to train mode model.train() train_set_size = audio_processor.set_size('training') max_step_epoch = train_set_size // config.batch_size input_frequency_size = model_settings[ 'dct_coefficient_count'] # sequence length 10 input_time_size = model_settings['spectrogram_length'] # input_size 25 end = time.time() for i in range(0, train_set_size, config.batch_size): input, target = audio_processor.get_data( config.batch_size, 0, model_settings, config.background_frequency, config.background_volume, time_shift_samples, 'training', sess) # measure data loading time data_time.update(time.time() - end) target = torch.Tensor(target).cuda() _, target = target.max(dim=1) input = input.reshape((-1, input_time_size, input_frequency_size)) input = torch.Tensor(input).cuda() input_var = torch.autograd.Variable(input) target_var = torch.autograd.Variable(target) # compute output output = model(input_var) ce_loss = criterion(output, target_var) admm.z_u_update(config, model, device, epoch, i, name_list, print) # update Z and U variables ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( model, ce_loss) # append admm losss # compute gradient optimizer.zero_grad() mixed_loss.backward() optimizer.step() # measure accuracy and record loss prec1 = accuracy(output.data, target)[0] ce_losses.update(ce_loss.data, input.size(0)) mixed_losses.update(mixed_loss.data, input.size(0)) top1.update(prec1, input.size(0)) # measure elapsed time batch_time.update(time.time() - end) end = time.time() if (i // config.batch_size) % config.print_freq == 0: print('Epoch: [{0}][{1}/{2}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Data {data_time.val:.3f} ({data_time.avg:.3f})\t' 'Cross Entropy Loss {ce_loss.val:.4f} ({ce_loss.avg:.4f})\t' 'Mixed Loss {mixed_loss.val:.4f} ({mixed_loss.avg:.4f})\t' 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})'.format( epoch, i // config.batch_size, max_step_epoch, batch_time=batch_time, data_time=data_time, ce_loss=ce_losses, mixed_loss=mixed_losses, top1=top1))
def prune_train(args, pre_mask, ADMM, train_loader, criterion, optimizer, scheduler, epoch): batch_time = AverageMeter() data_time = AverageMeter() losses = AverageMeter() top1 = AverageMeter() idx_loss_dict = {} # switch to train mode model.train() end = time.time() for i, (input, target) in enumerate(train_loader): target = target.long().cuda() # measure data loading time data_time.update(time.time() - end) # adjust learning rate if args.admm: admm.admm_adjust_learning_rate(optimizer, epoch, args) else: scheduler.step() input = input.float().cuda() if args.mixup: input, target_a, target_b, lam = mixup_data( input, target, args.alpha) # compute output output = model(input) if args.mixup: ce_loss = mixup_criterion(criterion, output, target_a, target_b, lam, args.smooth) else: ce_loss = criterion(output, target, smooth=args.smooth) mixed_loss = ce_loss if args.admm: admm.z_u_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Z and U ce_loss, admm_loss, mixed_loss = admm.append_admm_loss( args, ADMM, model, ce_loss) # append admm loss if args.admm_mask: admm.y_k_update(args, ADMM, model, device, train_loader, optimizer, epoch, input, i, writer) # update Y\K ce_loss, admm_loss, mixed_loss = admm.append_mask_loss( args, ADMM, model, mixed_loss) # measure accuracy and record loss acc1, _ = accuracy(output, target, topk=(1, 5)) losses.update(ce_loss.item(), input.size(0)) top1.update(acc1[0], input.size(0)) # compute gradient and do SGD step optimizer.zero_grad() if args.admm or args.admm_mask: mixed_loss.backward(retain_graph=True) else: ce_loss.backward() if pre_mask: with torch.no_grad(): for name, W in (model.named_parameters()): # shared layers if name in args.fixed_layer: W.grad *= 0 continue # pruned weight layers: fix weight for previous task if name in args.pruned_layer and name in pre_mask: W.grad *= pre_mask[name].cuda() # adaptively learn the mask: fix mask for trainable weight part if args.adaptive_mask and 'mask' in name and args.admm: W.grad *= args.mask[name.replace('w_mask', 'weight')].cuda() #W.grad *= 100 optimizer.step() # measure elapsed time batch_time.update(time.time() - end) end = time.time() if i % args.log_interval == 0: for param_group in optimizer.param_groups: current_lr = param_group['lr'] print('({0}) lr:[{1:.5f}] ' 'Epoch: [{2}][{3}/{4}]\t' 'Status: admm-[{5}] retrain-[{6}]\t' 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t' 'Loss {loss.val:.4f} ({loss.avg:.4f})\t' 'Acc@1 {top1.val:.3f}% ({top1.avg:.3f}%)\t'.format( args.optmzr, current_lr, epoch, i, len(train_loader), args.admm, args.masked_retrain, batch_time=data_time, loss=losses, top1=top1)) if i % 100 == 0: idx_loss_dict[i] = losses.avg