def quantize_process(model): #util.print_model_parameters(model) print( '---------------------- Before weight sharing ---------------------------' ) criterion = nn.CrossEntropyLoss().cuda() acc = util.validate(args, test_loader, model, criterion) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") # Weight sharing old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) print( '----------------------- After weight sharing ---------------------------' ) acc = util.validate(args, test_loader, model, criterion) torch.save(model, f"{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(model, f"{args.save_dir}/{args.out_quantized_folder}") util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '----------------------- quantize retrain -------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, use_cuda) #acc = util.test(model, test_loader, use_cuda=True) acc = util.validate(args, test_loader, model, criterion) torch.save( model, f"{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel") util.layer2torch(model, f"{args.save_dir}/{args.out_quantized_re_folder}") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized_bit{args.bits}_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"accuracy retrain after weight sharing\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
def quantize_process(model): print('------------------------------- accuracy before weight sharing ----------------------------------') acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print('------------------------------- accuacy after weight sharing -------------------------------') tempfc1=torch.index_select(model.fc1.weight, 0, model.invrow1.cuda()) model.fc1.weight=torch.nn.Parameter(torch.index_select(tempfc1, 1, model.invcol1.cuda())) tempfc2=torch.index_select(model.fc2.weight, 0, model.invrow2.cuda()) model.fc2.weight=torch.nn.Parameter(torch.index_select(tempfc2, 1, model.invcol2.cuda())) tempfc3=torch.index_select(model.fc3.weight, 0, model.invrow3.cuda()) model.fc3.weight=torch.nn.Parameter(torch.index_select(tempfc3, 1, model.invcol3.cuda())) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing(model, args.model_mode, args.bits) temp1=torch.index_select(model.fc1.weight, 0, model.rowp1.cuda()) model.fc1.weight=torch.nn.Parameter(torch.index_select(temp1, 1, model.colp1.cuda())) temp2=torch.index_select(model.fc2.weight, 0, model.rowp2.cuda()) model.fc2.weight=torch.nn.Parameter(torch.index_select(temp2, 1, model.colp2.cuda())) temp3=torch.index_select(model.fc3.weight, 0, model.rowp3.cuda()) model.fc3.weight=torch.nn.Parameter(torch.index_select(temp3, 1, model.colp3.cuda())) acc = util.validate(val_loader, model, args) util.save_checkpoint({ 'state_dict': model.state_dict(), 'best_prec1': acc, }, True, filename=os.path.join(args.save_dir, 'checkpoint_{}_alpha_{}.tar'.format('quantized',args.alpha))) util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}" , model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print('------------------------------- retraining -------------------------------------------') util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) util.save_checkpoint({ 'state_dict': model.state_dict(), 'best_prec1': acc, }, True, filename=os.path.join(args.save_dir, 'checkpoint_{}_alpha_{}.tar'.format('quantized_re',args.alpha))) util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}" , model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log(f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_bit{args.bits}_retrain{args.reepochs}.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
def quantize_process(model): print( '------------------------------- accuracy before weight sharing ----------------------------------' ) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print( '------------------------------- accuacy after weight sharing -------------------------------' ) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}", model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '------------------------------- retraining -------------------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}", model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
print(use_cuda) ########################################################################################################################################################## ########################################################################################################################################################## ########################################################################################################################################################## ########################################################################################################################################################## # Define the model model = torch.load("saves/model_after_retraining.ptmodel") print(model) print('accuracy before weight sharing') util.test(model, use_cuda) print("---------------------------------") ########################################################################################################################################################## ########################################################################################################################################################## ########################################################################################################################################################## ########################################################################################################################################################## # Weight sharing apply_weight_sharing(model) print('accuacy after weight sharing') accuracy = util.test(model, use_cuda) # Save the new model os.makedirs('saves', exist_ok=True) torch.save(model, args.output)
def quantize_process(model): print( '------------------------------- accuracy before weight sharing ----------------------------------' ) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print( '------------------------------- accuacy after weight sharing -------------------------------' ) tempfc1 = torch.index_select(model.fc1.weight, 0, model.invrow1.cuda()) model.fc1.weight = torch.nn.Parameter( torch.index_select(tempfc1, 1, model.invcol1.cuda())) tempfc2 = torch.index_select(model.fc2.weight, 0, model.invrow2.cuda()) model.fc2.weight = torch.nn.Parameter( torch.index_select(tempfc2, 1, model.invcol2.cuda())) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) temp1 = torch.index_select(model.fc1.weight, 0, model.rowp1.cuda()) model.fc1.weight = torch.nn.Parameter( torch.index_select(temp1, 1, model.colp1.cuda())) temp2 = torch.index_select(model.fc2.weight, 0, model.rowp2.cuda()) model.fc2.weight = torch.nn.Parameter( torch.index_select(temp2, 1, model.colp2.cuda())) acc = util.validate(val_loader, model, args) torch.save(model, f"{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}", model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '------------------------------- retraining -------------------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) torch.save( model, f"{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel") util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}", model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model