def finetune(opt, input, output): # get options dataloader_train, dataloader_val = custom_get_dataloaders(opt) best_config = pd.read_csv(input) channel_config = np.array(best_config) print(channel_config.shape) if channel_config.shape[1] != 37: raise Exception result = [] pertrain_net_path = opt.net_path for i in range(channel_config.shape[0]): print(i + 1) temp = main(opt, channel_config[i, :], dataloader_train, dataloader_val, pertrain_net_path) result.append(temp) try: data = pd.DataFrame( result, columns='before_acc after_acc kappa Flops Params'.split(' ')) data.to_csv(output) except Exception: data = pd.DataFrame( result, columns='before_acc after_acc kappa Flops Params'.split(' ')) data.to_csv('/flag.csv')
def main(): # get options opt = BaseOptions().parse() # basic settings os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1] if torch.cuda.is_available(): device = 'cuda' torch.backends.cudnn.benchmark = True else: device = 'cpu' ##################### Get Dataloader #################### _, dataloader_test = custom_get_dataloaders(opt) # dummy_input is sample input of dataloaders if hasattr(dataloader_test, 'dataset'): dummy_input = dataloader_test.dataset.__getitem__(0) dummy_input = dummy_input[0] dummy_input = dummy_input.unsqueeze(0) else: # for imagenet dali loader dummy_input = torch.rand(1, 3, 224, 224) ##################### Evaluate Baseline Model #################### net = ModelWrapper(opt) net = net.to(device) net.parallel(opt.gpu_ids) flops_before, params_before = model_summary(net.get_compress_part(), dummy_input) del net ##################### Evaluate Pruned Model #################### net = ModelWrapper(opt) net.load_checkpoint(opt.pruned_model) net = net.to(device) flops_after, params_after = model_summary(net.get_compress_part(), dummy_input) net.parallel(opt.gpu_ids) acc_after = net.get_eval_scores(dataloader_test) #################### Report ##################### print('######### Report #########') print('Model:{}'.format(opt.model_name)) print('Checkpoint:{}'.format(opt.pruned_model)) print('FLOPs of Original Model:{:.3f}G;Params of Original Model:{:.2f}M'. format(flops_before / 1e9, params_before / 1e6)) print('FLOPs of Pruned Model:{:.3f}G;Params of Pruned Model:{:.2f}M'. format(flops_after / 1e9, params_after / 1e6)) print('Top-1 Acc of Pruned Model on {}:{}'.format(opt.dataset_name, acc_after['accuracy'])) print('##########################')
def main(opt): # basic settings os.environ["CUDA_VISIBLE_DEVICES"]=str(opt.gpu_ids)[1:-1] if torch.cuda.is_available(): device = 'cuda' torch.backends.cudnn.benchmark = True else: device = 'cpu' ##################### Get Dataloader #################### dataloader_train, dataloader_val = custom_get_dataloaders(opt) # dummy_input is sample input of dataloaders if hasattr(dataloader_val, 'dataset'): dummy_input = dataloader_val.dataset.__getitem__(0) dummy_input = dummy_input[0] dummy_input = dummy_input.unsqueeze(0) else: # for imagenet dali loader dummy_input = torch.rand(1, 3, 224, 224) ##################### Create Baseline Model #################### net = ModelWrapper(opt) net.load_checkpoint(opt.checkpoint) flops_before, params_before = model_summary(net.get_compress_part(), dummy_input) ##################### Pruning Strategy Generation ############### compression_scheduler = distiller.file_config(net.get_compress_part(), net.optimizer, opt.compress_schedule_path) num_layer = len(compression_scheduler.policies[1]) channel_config = get_pruning_strategy(opt, num_layer) # pruning strategy compression_scheduler = random_compression_scheduler(compression_scheduler, channel_config) ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ### thinning(net, compression_scheduler, input_tensor=dummy_input) print(net) flops_after, params_after = model_summary(net.get_compress_part(), dummy_input) ratio = flops_after / flops_before print('FLOPs ratio:', ratio) if ratio < opt.flops_target - 0.005 or ratio > opt.flops_target + 0.005: # illegal pruning strategy return net = net.to(device) net.parallel(opt.gpu_ids) net.get_compress_part().train() with torch.no_grad(): for index, sample in enumerate(tqdm(dataloader_train, leave=False)): _ = net.get_loss(sample) if index > 50: break strategy_score = net.get_eval_scores(dataloader_val)['accuracy'] #################### Save Pruning Strategy and Score ######### log_file = open(opt.output_file, 'a+') log_file.write("{} {} ".format(strategy_score, ratio)) for item in channel_config: log_file.write("{} ".format(str(item))) log_file.write('\n') log_file.close() print('Eval Score:{}'.format(strategy_score))
def train(): # get options opt = BaseOptions().parse() print('lr:', opt.lr, 'weight_decay:', opt.weight_decay) dataloader_train, dataloader_val = custom_get_dataloaders(opt) main(opt, dataloader_train, dataloader_val, opt.net_path)
def main(opt): # basic settings os.environ["CUDA_VISIBLE_DEVICES"] = str(opt.gpu_ids)[1:-1] if torch.cuda.is_available(): device = "cuda" torch.backends.cudnn.benchmark = True else: device = "cpu" ##################### Get Dataloader #################### dataloader_train, dataloader_val = custom_get_dataloaders(opt) # dummy_input is sample input of dataloaders if hasattr(dataloader_val, "dataset"): dummy_input = dataloader_val.dataset.__getitem__(0) dummy_input = dummy_input[0] dummy_input = dummy_input.unsqueeze(0) else: # for imagenet dali loader dummy_input = torch.rand(1, 3, 224, 224) ##################### Create Baseline Model #################### net = ModelWrapper(opt) net.load_checkpoint(opt.checkpoint) flops_before, params_before = model_summary(net.get_compress_part(), dummy_input) ##################### Load Pruning Strategy ############### compression_scheduler = distiller.file_config(net.get_compress_part(), net.optimizer, opt.compress_schedule_path) channel_config = get_channel_config(opt.search_result, opt.strategy_id) # pruning strategy compression_scheduler = random_compression_scheduler( compression_scheduler, channel_config) ###### Adaptive-BN-based Candidate Evaluation of Pruning Strategy ### thinning(net, compression_scheduler, input_tensor=dummy_input) flops_after, params_after = model_summary(net.get_compress_part(), dummy_input) ratio = flops_after / flops_before print("FLOPs ratio:", ratio) net = net.to(device) net.parallel(opt.gpu_ids) net.get_compress_part().train() with torch.no_grad(): for index, sample in enumerate(tqdm(dataloader_train, leave=False)): _ = net.get_loss(sample) if index > 100: break strategy_score = net.get_eval_scores(dataloader_val)["accuracy"] print("Result file:{}, Strategy ID:{}, Evaluation score:{}".format( opt.search_result, opt.strategy_id, strategy_score)) ##################### Fine-tuning ######################### lr_scheduler = optim.lr_scheduler.CosineAnnealingLR( net.optimizer, opt.epoch) reporter = Reporter(opt) best_acc = 0 net._net.train() for epoch in range(1, opt.epoch + 1): reporter.log_metric("lr", net.optimizer.param_groups[0]["lr"], epoch) train_loss = train_epoch( net, dataloader_train, net.optimizer, ) reporter.log_metric("train_loss", train_loss, epoch) lr_scheduler.step() scores = net.get_eval_scores(dataloader_val) print("==> Evaluation: Epoch={} Acc={}".format(epoch, str(scores))) reporter.log_metric("eval_acc", scores["accuracy"], epoch) if scores["accuracy"] > best_acc: best_acc = scores["accuracy"] reporter.log_metric("best_acc", best_acc, epoch) save_checkpoints( scores["accuracy"], net._net, reporter, opt.exp_name, epoch, ) print("==> Training epoch %d" % epoch)