def do_admmtrain(args,model,train_loader,test_loader,sparsity_type,prune_ratios,masks,base_model_path,admm_path): """=====================""" """ multi-rho admm train""" """=====================""" initial_rho = args.rho current_rho = initial_rho if args.admm: for i in range(args.rho_num): current_rho = initial_rho * 10 ** i if i == 0: print("Loading" + base_model_path) model.load_state_dict(torch.load(base_model_path)) # admm train need basline model model.cuda() else: print("Loading: "+admm_path+"/cifar_vgg{}_{}_{}_{}.pt".format(args.depth, current_rho / 10, args.config_file, args.optmzr)) model.load_state_dict(torch.load(admm_path+"/cifar_vgg{}_{}_{}_{}.pt".format(args.depth, current_rho / 10, args.config_file, args.optmzr))) model.cuda() ADMM = admm.ADMM(model, sparsity_type,prune_ratios, rho = current_rho) admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable # admm train best_prec1 = 0. lr = args.lr / 10 if args.optmzr == "adam": optimizer = optim.Adam(model.parameters(), lr=lr, weight_decay=args.weight_decay) if args.optmzr == "sgd": optimizer = optim.SGD(model.parameters(), lr=lr, momentum=args.momentum, weight_decay=args.weight_decay) for epoch in range(1, args.epochs + 1): print("current rho: {}".format(current_rho)) train(args, ADMM, model, device, train_loader, optimizer, epoch, writer,masks) prec1 = test(args, model, device, test_loader) best_prec1 = max(prec1, best_prec1) print("Best Acc: {:.4f}".format(best_prec1)) print("Saving model: " + admm_path+"/cifar_vgg{}_{}_{}_{}.pt".format(args.depth, current_rho, args.config_file, args.optmzr)) torch.save(model.state_dict(), admm_path+"/cifar_vgg{}_{}_{}_{}.pt".format(args.depth, current_rho, args.config_file, args.optmzr)) return admm_path+"/cifar_vgg{}_{}_{}_{}.pt".format(args.depth, current_rho, args.config_file, args.optmzr)
def train(hyp): # batch_time = AverageMeter() # data_time = AverageMeter() # losses = AverageMeter() cfg = opt.cfg data = opt.data epochs = opt.epochs # 500200 batches at bs 64, 117263 images = 273 epochs batch_size = opt.batch_size accumulate = max(round(64 / batch_size), 1) # accumulate n times before optimizer update (bs 64) weights = opt.weights # initial training weights imgsz_min, imgsz_max, imgsz_test = opt.img_size # img sizes (min, max, test) # Image Sizes gs = 32 # (pixels) grid size assert math.fmod(imgsz_min, gs) == 0, '--img-size %g must be a %g-multiple' % (imgsz_min, gs) opt.multi_scale |= imgsz_min != imgsz_max # multi if different (min, max) if opt.multi_scale: if imgsz_min == imgsz_max: imgsz_min //= 1.5 imgsz_max //= 0.667 grid_min, grid_max = imgsz_min // gs, imgsz_max // gs imgsz_min, imgsz_max = int(grid_min * gs), int(grid_max * gs) img_size = imgsz_max # initialize with max size # Configure run init_seeds() data_dict = parse_data_cfg(data) train_path = data_dict['train'] test_path = data_dict['valid'] nc = 1 if opt.single_cls else int(data_dict['classes']) # number of classes hyp['cls'] *= nc / 80 # update coco-tuned hyp['cls'] to current dataset # Remove previous results for f in glob.glob('*_batch*.jpg') + glob.glob(results_file): os.remove(f) # Initialize model model = Darknet(cfg).to(device) # Optimizer pg0, pg1, pg2 = [], [], [] # optimizer parameter groups for k, v in dict(model.named_parameters()).items(): if '.bias' in k: pg2 += [v] # biases elif 'Conv2d.weight' in k: pg1 += [v] # apply weight_decay else: pg0 += [v] # all else if opt.adam: # hyp['lr0'] *= 0.1 # reduce lr (i.e. SGD=5E-3, Adam=5E-4) optimizer = optim.Adam(pg0, lr=hyp['lr0']) # optimizer = AdaBound(pg0, lr=hyp['lr0'], final_lr=0.1) else: optimizer = optim.SGD(pg0, lr=hyp['lr0'], momentum=hyp['momentum'], nesterov=True) optimizer.add_param_group({'params': pg1, 'weight_decay': hyp['weight_decay']}) # add pg1 with weight_decay optimizer.add_param_group({'params': pg2}) # add pg2 (biases) print('Optimizer groups: %g .bias, %g Conv2d.weight, %g other' % (len(pg2), len(pg1), len(pg0))) del pg0, pg1, pg2 start_epoch = 0 best_fitness = 0.0 # attempt_download(weights) if opt.freeze_layers: output_layer_indices = [idx - 1 for idx, module in enumerate(model.module_list) if isinstance(module, YOLOLayer)] freeze_layer_indices = [x for x in range(len(model.module_list)) if (x not in output_layer_indices) and (x - 1 not in output_layer_indices)] for idx in freeze_layer_indices: for parameter in model.module_list[idx].parameters(): parameter.requires_grad_(False) # Mixed precision training https://github.com/NVIDIA/apex if mixed_precision: model, optimizer = amp.initialize(model, optimizer, opt_level='O1', verbosity=0) # Scheduler https://arxiv.org/pdf/1812.01187.pdf lf = lambda x: (((1 + math.cos(x * math.pi / epochs)) / 2) ** 1.0) * 0.95 + 0.05 # cosine scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf) scheduler.last_epoch = start_epoch - 1 # see link below # https://discuss.pytorch.org/t/a-problem-occured-when-resuming-an-optimizer/28822 # Plot lr schedule # y = [] # for _ in range(epochs): # scheduler.step() # y.append(optimizer.param_groups[0]['lr']) # plt.plot(y, '.-', label='LambdaLR') # plt.xlabel('epoch') # plt.ylabel('LR') # plt.tight_layout() # plt.savefig('LR.png', dpi=300) # Dataset dataset = LoadImagesAndLabels(train_path, img_size, batch_size, augment=True, hyp=hyp, # augmentation hyperparameters rect=opt.rect, # rectangular training cache_images=opt.cache_images, single_cls=opt.single_cls) # Dataloader batch_size = min(batch_size, len(dataset)) nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8]) # number of workers dataloader = torch.utils.data.DataLoader(dataset, batch_size=batch_size, num_workers=nw, shuffle=not opt.rect, # Shuffle=True unless rectangular training is used pin_memory=True, collate_fn=dataset.collate_fn) # Testloader testloader = torch.utils.data.DataLoader(LoadImagesAndLabels(test_path, imgsz_test, batch_size, hyp=hyp, rect=True, cache_images=opt.cache_images, single_cls=opt.single_cls), batch_size=batch_size, num_workers=nw, pin_memory=True, collate_fn=dataset.collate_fn) initial_rho = opt.rho t0 = time.time() """=====================""" """ multi-rho admm train""" """=====================""" if opt.admm: opt.notest = True # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except Exception as e: s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) print(e) raise KeyError(s) from e del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(int(0.7 * nb), 500) # burn-in iterations, max(0.7 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for i in range(opt.rho_num): current_rho = initial_rho * 10 ** i ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=current_rho) admm.admm_initialization(opt, ADMM=ADMM, model=model) # intialize Z variable for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ print("current rho: {}".format(current_rho)) model.train() masks = {} if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss admm.z_u_update(opt, ADMM, model, device, dataloader, optimizer, epoch, imgs, i, tb_writer) # update Z and U variables loss, admm_loss, mixed_loss = admm.append_admm_loss(opt, ADMM, model, loss) # append admm losss if mixed_precision: with amp.scale_loss(mixed_loss, optimizer) as scaled_loss: scaled_loss.backward() else: mixed_loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ('%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot # if ni < 1: # f = 'train_batch%g.jpg' % i # filename # res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) # if tb_writer: # tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler if opt.admm: admm.admm_adjust_learning_rate(optimizer, epoch, opt) else: scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest: # Calculate mAP #or final_epoch is_coco = any([x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: best_fitness = fi # end epoch ---------------------------------------------------------------------------------------------------- # end training # admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- admm.admm_adjust_learning_rate(optimizer, epoch, opt) # end admm_adjust_learning_rate ---------------------------------------------------------------------------------------------------- print("Saving model.") torch.save( model.module.state_dict() if type(model) is nn.parallel.DistributedDataParallel else model.state_dict(), "./model_pruned/yolov4_{}_{}_{}.pt".format( current_rho, opt.config_file, opt.sparsity_type)) if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() # return results """==============""" """masked retrain""" """==============""" if opt.masked_retrain: ADMM = admm.ADMM(model, file_name="./prune_config/" + opt.config_file + ".yaml", rho=initial_rho) if not opt.resume: # possible weights are '*.pt', 'yolov3-spp.pt', 'yolov3-tiny.pt' etc. print("\n>_ Loading file: ./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type)) chkpt = torch.load("./model_pruned/yolov4_{}_{}_{}.pt".format(initial_rho * 10 ** (opt.rho_num - 1), opt.config_file, opt.sparsity_type), map_location=device) # chkpt = torch.load(weights, map_location=device) # load model try: # chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt, strict=False) #['model'] except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e #----------------------------------------------hard prune------------------------------------------------ admm.hard_prune(opt, ADMM, model) #----------------------------------------------hard prune------------------------------------------------ else: try: chkpt = torch.load(weights, map_location=device) chkpt['model'] = {k: v for k, v in chkpt['model'].items() if model.state_dict()[k].numel() == v.numel()} model.load_state_dict(chkpt['model'], strict=False) except KeyError as e: # s = "%s is not compatible with %s. Specify --weights '' or specify a --cfg compatible with %s. " \ # "See https://github.com/ultralytics/yolov3/issues/657" % (opt.weights, opt.cfg, opt.weights) raise KeyError() from e # load optimizer if chkpt['optimizer'] is not None: optimizer.load_state_dict(chkpt['optimizer']) best_fitness = chkpt['best_fitness'] # load results if chkpt.get('training_results') is not None: with open(results_file, 'w') as file: file.write(chkpt['training_results']) # write results.txt start_epoch = chkpt['epoch'] + 1 del chkpt # Initialize distributed training if device.type != 'cpu' and torch.cuda.device_count() > 1 and torch.distributed.is_available(): dist.init_process_group(backend='nccl', # 'distributed backend' init_method='tcp://127.0.0.1:9999', # distributed training init method world_size=1, # number of nodes for distributed training rank=0) # distributed training node rank model = torch.nn.parallel.DistributedDataParallel(model, find_unused_parameters=True) model.yolo_layers = model.module.yolo_layers # move yolo layer indices to top level # Model parameters model.nc = nc # attach number of classes to model model.hyp = hyp # attach hyperparameters to model model.gr = 1.0 # giou loss ratio (obj_loss = 1.0 or giou) model.class_weights = labels_to_class_weights(dataset.labels, nc).to(device) # attach class weights # Model EMA ema = torch_utils.ModelEMA(model) # Start training nb = len(dataloader) # number of batches n_burn = max(3 * nb, 500) # burn-in iterations, max(3 epochs, 500 iterations) maps = np.zeros(nc) # mAP per class # torch.autograd.set_detect_anomaly(True) results = (0, 0, 0, 0, 0, 0, 0) # 'P', 'R', 'mAP', 'F1', 'val GIoU', 'val Objectness', 'val Classification' print('Image sizes %g - %g train, %g test' % (imgsz_min, imgsz_max, imgsz_test)) print('Using %g dataloader workers' % nw) print('Starting training for %g epochs...' % epochs) for epoch in range(start_epoch, epochs): # epoch ------------------------------------------------------------------ model.train() if opt.masked_retrain and not opt.combine_progressive: print("full acc re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name not in ADMM.prune_ratios: continue above_threshold, W = admm.weight_pruning(opt, W, ADMM.prune_ratios[name]) W.data = W masks[name] = above_threshold elif opt.combine_progressive: print("progressive admm-train/re-train masking") masks = {} for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): weight = W.cpu().detach().numpy() non_zeros = weight != 0 non_zeros = non_zeros.astype(np.float32) zero_mask = torch.from_numpy(non_zeros).cuda() W = torch.from_numpy(weight).cuda() W.data = W masks[name] = zero_mask # Update image weights (optional) if dataset.image_weights: w = model.class_weights.cpu().numpy() * (1 - maps) ** 2 # class weights image_weights = labels_to_image_weights(dataset.labels, nc=nc, class_weights=w) dataset.indices = random.choices(range(dataset.n), weights=image_weights, k=dataset.n) # rand weighted idx mloss = torch.zeros(4).to(device) # mean losses print(('\n' + '%10s' * 8) % ('Epoch', 'gpu_mem', 'GIoU', 'obj', 'cls', 'total', 'targets', 'img_size')) pbar = tqdm(enumerate(dataloader), total=nb) # progress bar for i, (imgs, targets, paths, _) in pbar: # batch ------------------------------------------------------------- ni = i + nb * epoch # number integrated batches (since train start) imgs = imgs.to(device).float() / 255.0 # uint8 to float32, 0 - 255 to 0.0 - 1.0 targets = targets.to(device) # Burn-in if ni <= n_burn: xi = [0, n_burn] # x interp model.gr = np.interp(ni, xi, [0.0, 1.0]) # giou loss ratio (obj_loss = 1.0 or giou) accumulate = max(1, np.interp(ni, xi, [1, 64 / batch_size]).round()) for j, x in enumerate(optimizer.param_groups): # bias lr falls from 0.1 to lr0, all other lrs rise from 0.0 to lr0 x['lr'] = np.interp(ni, xi, [0.1 if j == 2 else 0.0, x['initial_lr'] * lf(epoch)]) x['weight_decay'] = np.interp(ni, xi, [0.0, hyp['weight_decay'] if j == 1 else 0.0]) if 'momentum' in x: x['momentum'] = np.interp(ni, xi, [0.9, hyp['momentum']]) # Multi-Scale if opt.multi_scale: if ni / accumulate % 1 == 0: # adjust img_size (67% - 150%) every 1 batch img_size = random.randrange(grid_min, grid_max + 1) * gs sf = img_size / max(imgs.shape[2:]) # scale factor if sf != 1: ns = [math.ceil(x * sf / gs) * gs for x in imgs.shape[2:]] # new shape (stretched to 32-multiple) imgs = F.interpolate(imgs, size=ns, mode='bilinear', align_corners=False) # Forward pred = model(imgs) # Loss loss, loss_items = compute_loss(pred, targets, model) if not torch.isfinite(loss): print('WARNING: non-finite loss, ending training ', loss_items) return results # Backward loss *= batch_size / 64 # scale loss if mixed_precision: with amp.scale_loss(loss, optimizer) as scaled_loss: scaled_loss.backward() else: loss.backward() if opt.combine_progressive: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] if opt.masked_retrain: with torch.no_grad(): for name, W in (model.module.named_parameters() if type( model) is torch.nn.parallel.DistributedDataParallel else model.named_parameters()): if name in masks: W.grad *= masks[name] # Optimize if ni % accumulate == 0: optimizer.step() optimizer.zero_grad() ema.update(model) # Print mloss = (mloss * i + loss_items) / (i + 1) # update mean losses mem = '%.3gG' % (torch.cuda.memory_cached() / 1E9 if torch.cuda.is_available() else 0) # (GB) s = ('%10s' * 2 + '%10.3g' * 6) % ( '%g/%g' % (epoch, epochs - 1), mem, *mloss, len(targets), img_size) pbar.set_description(s) # Plot if ni < 1: f = 'train_batch%g.jpg' % i # filename res = plot_images(images=imgs, targets=targets, paths=paths, fname=f) if tb_writer: tb_writer.add_image(f, res, dataformats='HWC', global_step=epoch) # tb_writer.add_graph(model, imgs) # add model to tensorboard # end batch ------------------------------------------------------------------------------------------------ # Update scheduler scheduler.step() # Process epoch results ema.update_attr(model) final_epoch = epoch + 1 == epochs if not opt.notest or final_epoch: # Calculate mAP is_coco = any( [x in data for x in ['coco.data', 'coco2014.data', 'coco2017.data']]) and model.nc == 80 results, maps = test.test(cfg, data, batch_size=batch_size, imgsz=imgsz_test, model=ema.ema, save_json=final_epoch and is_coco, single_cls=opt.single_cls, dataloader=testloader, multi_label=ni > n_burn) # Write with open(results_file, 'a') as f: f.write(s + '%10.3g' * 7 % results + '\n') # P, R, mAP, F1, test_losses=(GIoU, obj, cls) if len(opt.name) and opt.bucket: os.system('gsutil cp results.txt gs://%s/results/results%s.txt' % (opt.bucket, opt.name)) # Tensorboard if tb_writer: tags = ['train/giou_loss', 'train/obj_loss', 'train/cls_loss', 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/F1', 'val/giou_loss', 'val/obj_loss', 'val/cls_loss'] for x, tag in zip(list(mloss[:-1]) + list(results), tags): tb_writer.add_scalar(tag, x, epoch) # Update best mAP fi = fitness(np.array(results).reshape(1, -1)) # fitness_i = weighted combination of [P, R, mAP, F1] if fi > best_fitness: #results[2] best_fitness = fi #results[2] print("\n>_ Got better accuracy {:.3f}% now...\n".format(results[2])) # torch.save(ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), # "./model_retrained/yolov4_retrained_acc_{:.3f}_{}rhos_{}_{}.pt".format(results[2], opt.rho_num, opt.config_file, opt.sparsity_type)) # Save model save = (not opt.nosave) or (final_epoch and not opt.evolve) if save: with open(results_file, 'r') as f: # create checkpoint chkpt = {'epoch': epoch, 'best_fitness': best_fitness, 'training_results': f.read(), 'model': ema.ema.module.state_dict() if hasattr(model, 'module') else ema.ema.state_dict(), 'optimizer': None if final_epoch else optimizer.state_dict()} # Save last, best and delete torch.save(chkpt, last) if (best_fitness == fi) and not final_epoch: torch.save(chkpt, best) del chkpt # end epoch ---------------------------------------------------------------------------------------------------- # end training test_sparsity(model) print("Best Acc: {:.4f}".format(results[2])) n = opt.name if len(n): n = '_' + n if not n.isnumeric() else n fresults, flast, fbest = 'results%s.txt' % n, wdir + 'last%s.pt' % n, wdir + 'best%s.pt' % n for f1, f2 in zip([wdir + 'last.pt', wdir + 'best.pt', 'results.txt'], [flast, fbest, fresults]): if os.path.exists(f1): os.rename(f1, f2) # rename ispt = f2.endswith('.pt') # is *.pt strip_optimizer(f2) if ispt else None # strip optimizer os.system('gsutil cp %s gs://%s/weights' % ( f2, opt.bucket)) if opt.bucket and ispt else None # upload if not opt.evolve: plot_results() # save as results.png print('%g epochs completed in %.3f hours.\n' % (epoch - start_epoch + 1, (time.time() - t0) / 3600)) # dist.destroy_process_group() if torch.cuda.device_count() > 1 else None # torch.cuda.empty_cache() return results
def main(): global args, best_prec1 args = parser.parse_args() args.save_name = 'loushu' if args.logger: logging.basicConfig(level=logging.INFO, format='%(message)s') logger = logging.getLogger() try: os.makedirs("logger", exist_ok=True) except TypeError: raise Exception("Direction not create!") logger.addHandler( logging.FileHandler( strftime('logger/GSC_%m-%d-%Y-%H:%M_id_') + str(uuid.uuid4()) + '.log', 'a')) global print print = logger.info print("The config arguments showed as below:") print(args) use_cuda = not args.no_cuda and torch.cuda.is_available() device = torch.device("cuda" if use_cuda else "cpu") # Check the save_dir exists or not if not os.path.exists(args.save_dir): os.makedirs(args.save_dir) print("Current network is {}".format(args.arch)) # Start a new TensorFlow session. sess = tf.InteractiveSession() # Begin by making sure we have the training data we need. If you already have # training data of your own, use `--data_url= ` on the command line to avoid # downloading. model_settings = models.prepare_model_settings( len(input_data.prepare_words_list(args.wanted_words.split(','))), args.sample_rate, args.clip_duration_ms, args.window_size_ms, args.window_stride_ms, args.dct_coefficient_count) print(model_settings) audio_processor = input_data.AudioProcessor(args.data_url, args.data_dir, args.silence_percentage, args.unknown_percentage, args.wanted_words.split(','), args.validation_percentage, args.testing_percentage, model_settings) # fingerprint_size = model_settings['fingerprint_size'] # label_count = model_settings['label_count'] # train_set_size = audio_processor.set_size('training') # print('set_size=%d', train_set_size) # valid_set_size = audio_processor.set_size('validation') # print('set_size=%d', valid_set_size) time_shift_samples = int((args.time_shift_ms * args.sample_rate) / 1000) # train_loader = torch.utils.data.DataLoader( # GSCDataset(args.data_url, args.data_dir, args.silence_percentage, args.unknown_percentage, # args.wanted_words.split(','), args.validation_percentage, args.testing_percentage, # model_settings, sess, args.arch, mode="training", background_frequency=args.background_frequency, # background_volume_range=args.background_frequency, time_shift=time_shift_samples), shuffle=True, # batch_size=args.batch_size, num_workers=args.workers) # print("train set size: {}".format(len(train_loader.dataset))) val_loader = torch.utils.data.DataLoader(GSCDataset( args.data_url, args.data_dir, args.silence_percentage, args.unknown_percentage, args.wanted_words.split(','), args.validation_percentage, args.testing_percentage, model_settings, sess, args.arch, mode="validation"), batch_size=args.batch_size, num_workers=args.workers) print("validation set size: {}".format(len(val_loader.dataset))) test_loader = torch.utils.data.DataLoader(GSCDataset( args.data_url, args.data_dir, args.silence_percentage, args.unknown_percentage, args.wanted_words.split(','), args.validation_percentage, args.testing_percentage, model_settings, sess, args.arch, mode="testing"), batch_size=args.batch_size, num_workers=args.workers) print("test set size: {}".format(len(test_loader.dataset))) #model = models.create_model(model_settings, args.arch, args.model_size_info) model = models.create_model(model_settings, args.arch, args.model_size_info, args.save_act_value, args.save_act_dir) model.cuda() # optionally resume from a checkpoint if args.resume: if os.path.isfile(args.resume): print("=> loading checkpoint '{}'".format(args.resume)) checkpoint = torch.load(args.resume, map_location='cuda:0') try: model.load_state_dict(checkpoint) except: print("Trying load with dict 'state_dict'") try: model.load_state_dict(checkpoint['state_dict']) except: print("Cann't load model") return else: print("=> no checkpoint found at '{}'".format(args.resume)) return cudnn.benchmark = True # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss().cuda() if args.optimizer_type == "sgd": optimizer = torch.optim.SGD(model.parameters(), args.lr, momentum=args.momentum, weight_decay=args.weight_decay, nesterov=True) elif args.optimizer_type == "adam": optimizer = torch.optim.Adam(model.parameters(), args.lr, weight_decay=args.weight_decay) elif args.optimizer_type == "adamw": optimizer = AdamW(model.parameters(), lr=args.lr, weight_decay=args.weight_decay) elif args.optimizer_type == "rmsprop": optimizer = torch.optim.RMSprop(model.parameters(), args.lr, weight_decay=args.weight_decay) elif args.optimizer_type == "adagrad": optimizer = torch.optim.Adagrad(model.parameters(), args.lr, weight_decay=args.weight_decay) elif args.optimizer_type == "adadelta": optimizer = torch.optim.Adadelta(model.parameters(), args.lr, weight_decay=args.weight_decay) else: raise ValueError("The optimizer type is not defined!") if args.evaluate: # validate(val_loader, model, criterion) validate_by_step(args, audio_processor, model, criterion, model_settings, sess) #test(test_loader, model, criterion) test_by_step(args, audio_processor, model, criterion, model_settings, sess) return # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs * len(train_loader), # eta_min=4e-08) if args.admm_quant: name_list = [] for name, w in model.named_parameters(): if "weight" or "bias" in name: name_list.append(name) print("Quantized Layer name list is :") print(", ".join(name_list)) print("Before quantized:") validate_by_step(args, audio_processor, model, criterion, model_settings, sess) admm.admm_initialization(args, model, device, name_list, print) print("After quantized:") validate_quant_by_step(args, audio_processor, model, criterion, model_settings, sess, name_list, device) for epoch in range(args.start_epoch, args.epochs): if args.lr_scheduler == 'default': adjust_learning_rate(optimizer, epoch) elif args.lr_scheduler == 'cosine': pass admm_quant_train_by_step(args, audio_processor, model, criterion, optimizer, epoch, model_settings, time_shift_samples, sess, name_list, device) # evaluate on validation set print("After Quantized:") prec1, quantized_model = validate_quant_by_step( args, audio_processor, model, criterion, model_settings, sess, name_list, device) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 if is_best: path_name = os.path.join( args.save_dir, '{arch}_{type}_{num_bits}bits_quantized_GSC_acc_{prec1:.3f}_{add}.pt' .format(arch=args.arch, type=args.quant_type, num_bits=args.num_bits, prec1=best_prec1, add=args.save_name)) new_path_name = os.path.join( args.save_dir, '{arch}_{type}_{num_bits}bits_quantized_GSC_acc_{prec1:.3f}_{add}.pt' .format(arch=args.arch, type=args.quant_type, num_bits=args.num_bits, prec1=prec1, add=args.save_name)) if os.path.isfile(path_name): os.remove(path_name) best_prec1 = prec1 save_checkpoint(quantized_model, new_path_name) print("Admm training, best top 1 acc {best_prec1:.3f}".format( best_prec1=best_prec1)) print("Best testing dataset:") test_by_step(args, audio_processor, quantized_model, criterion, model_settings, sess) else: print( "Admm training, best top 1 acc {best_prec1:.3f}, current top 1 acc {prec1:.3f}" .format(best_prec1=best_prec1, prec1=prec1)) else: for epoch in range(args.start_epoch, args.epochs): if args.lr_scheduler == 'default': adjust_learning_rate(optimizer, epoch) elif args.lr_scheduler == 'cosine': pass # scheduler.step() # train for one epoch train_by_step(args, audio_processor, model, criterion, optimizer, epoch, model_settings, time_shift_samples, sess) # evaluate on validation set # prec1 = validate(val_loader, model, criterion) prec1 = validate_by_step(args, audio_processor, model, criterion, model_settings, sess) # remember best prec@1 and save checkpoint is_best = prec1 > best_prec1 if is_best: path_name = os.path.join( args.save_dir, '{arch}_GSC_acc_{prec1:.3f}_{add}.pt'.format( arch=args.arch, prec1=best_prec1, add=args.save_name)) new_path_name = os.path.join( args.save_dir, '{arch}_GSC_acc_{prec1:.3f}_{add}.pt'.format( arch=args.arch, prec1=prec1, add=args.save_name)) if os.path.isfile(path_name): os.remove(path_name) best_prec1 = prec1 save_checkpoint(model, new_path_name) print( "Current best validation accuracy {best_prec1:.3f}".format( best_prec1=best_prec1)) else: print("Current validation accuracy {prec1:.3f}, " "best validation accuracy {best_prec1:.3f}".format( prec1=prec1, best_prec1=best_prec1)) # test(test_loader, model, criterion) test_by_step(args, audio_processor, model, criterion, model_settings, sess)
criterion = nn.CrossEntropyLoss() ADMM = None config = None if args.admm or args.masked_retrain: config = admm.Config(args, model) print(config.prune_ratios) for name,_ in model.named_parameters(): if name in config.prune_ratios: print('{} will be pruned'.format(name)) else: print('{} willnot be pruned'.format(name)) if args.admm: ADMM = admm.ADMM(model, config) admm.admm_initialization(args, ADMM, model) # intialize Z, U variable ############################################################################### # Training code ############################################################################### def repackage_hidden(h): """Wraps hidden states in new Tensors, to detach them from their history.""" if isinstance(h, torch.Tensor): return h.detach() else: return tuple(repackage_hidden(v) for v in h) # get_batch subdivides the source data into chunks of length args.bptt. # If source is equal to the example output of the batchify function, with
def admm_prune(args, pre_mask, task, train_loader): """ bag of tricks set-ups """ initial_rho = args.rho criterion = CrossEntropyLossMaybeSmooth(smooth_eps=args.smooth_eps).cuda() args.smooth = args.smooth_eps > 0.0 args.mixup = args.alpha > 0.0 optimizer_init_lr = args.warmup_lr if args.warmup else args.lr optimizer = None if args.optmzr == 'sgd': optimizer = torch.optim.SGD(model.parameters(), optimizer_init_lr, momentum=0.9, weight_decay=1e-4) elif args.optmzr == 'adam': optimizer = torch.optim.Adam(model.parameters(), optimizer_init_lr) ''' Set learning rate ''' scheduler = None if args.lr_scheduler == 'cosine': scheduler = optim.lr_scheduler.CosineAnnealingLR( optimizer, T_max=args.epochs_prune * len(train_loader), eta_min=4e-08) elif args.lr_scheduler == 'default': # my learning rate scheduler for cifar, following https://github.com/kuangliu/pytorch-cifar epoch_milestones = [65, 100, 130, 190, 220, 250, 280] """ Set the learning rate of each parameter task to the initial lr decayed by gamma once the number of epoch reaches one of the milestones """ scheduler = optim.lr_scheduler.MultiStepLR( optimizer, milestones=[i * len(train_loader) for i in epoch_milestones], gamma=0.5) else: raise Exception("unknown lr scheduler") if args.warmup: scheduler = GradualWarmupScheduler(optimizer, multiplier=args.lr / args.warmup_lr, total_iter=args.warmup_epochs * len(train_loader), after_scheduler=scheduler) # backup model weights if args.heritage_weight or args.adaptive_mask: model_backup = copy.deepcopy(model.state_dict()) # get mask for training & set pre-trained (for previous tasks) weights to be zero if pre_mask: pre_mask = mask_reverse(args, pre_mask) set_model_mask(model, pre_mask) ''' if heritage or adaptive, copy weights back to model not for first task ''' if args.heritage_weight or args.adaptive_mask: if args.mask: with torch.no_grad(): for name, W in (model.named_parameters()): if name in args.pruned_layer: W.data += model_backup[name].data * args.mask[ name].cuda() ''' Start Pruning... ''' for i in range(args.rho_num): current_rho = initial_rho * 10**i if args.config_file: config = "./profile/" + args.config_file + ".yaml" elif args.config_setting: config = args.prune_ratios else: raise Exception("must provide a config setting.") ADMM = admm.ADMM(args, model, config=config, rho=current_rho) admm.admm_initialization(args, ADMM=ADMM, model=model) # intialize Z variable # admm train best_prec1 = 0. for epoch in range(1, args.epochs_prune + 1): print("current rho: {}".format(current_rho)) prune_train(args, pre_mask, ADMM, train_loader, criterion, optimizer, scheduler, epoch) prec1 = pipeline.test_model(args, model) best_prec1 = max(prec1, best_prec1) print("Best Acc: {:.4f}%".format(best_prec1)) save_path = os.path.join(args.save_path_exp, 'task' + str(task)) torch.save( model.state_dict(), save_path + "/prunned_{}{}_{}_{}_{}_{}.pt".format( args.arch, args.depth, current_rho, args.config_file, args.optmzr, args.sparsity_type))