def quantize_process(model): #util.print_model_parameters(model) print( '---------------------- Before weight sharing ---------------------------' ) criterion = nn.CrossEntropyLoss().cuda() acc = util.validate(args, test_loader, model, criterion) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") # Weight sharing old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) print( '----------------------- After weight sharing ---------------------------' ) acc = util.validate(args, test_loader, model, criterion) torch.save(model, f"{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(model, f"{args.save_dir}/{args.out_quantized_folder}") util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '----------------------- quantize retrain -------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, use_cuda) #acc = util.test(model, test_loader, use_cuda=True) acc = util.validate(args, test_loader, model, criterion) torch.save( model, f"{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel") util.layer2torch(model, f"{args.save_dir}/{args.out_quantized_re_folder}") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized_bit{args.bits}_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"accuracy retrain after weight sharing\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
def quantize_process(model): print('------------------------------- accuracy before weight sharing ----------------------------------') acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print('------------------------------- accuacy after weight sharing -------------------------------') tempfc1=torch.index_select(model.fc1.weight, 0, model.invrow1.cuda()) model.fc1.weight=torch.nn.Parameter(torch.index_select(tempfc1, 1, model.invcol1.cuda())) tempfc2=torch.index_select(model.fc2.weight, 0, model.invrow2.cuda()) model.fc2.weight=torch.nn.Parameter(torch.index_select(tempfc2, 1, model.invcol2.cuda())) tempfc3=torch.index_select(model.fc3.weight, 0, model.invrow3.cuda()) model.fc3.weight=torch.nn.Parameter(torch.index_select(tempfc3, 1, model.invcol3.cuda())) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing(model, args.model_mode, args.bits) temp1=torch.index_select(model.fc1.weight, 0, model.rowp1.cuda()) model.fc1.weight=torch.nn.Parameter(torch.index_select(temp1, 1, model.colp1.cuda())) temp2=torch.index_select(model.fc2.weight, 0, model.rowp2.cuda()) model.fc2.weight=torch.nn.Parameter(torch.index_select(temp2, 1, model.colp2.cuda())) temp3=torch.index_select(model.fc3.weight, 0, model.rowp3.cuda()) model.fc3.weight=torch.nn.Parameter(torch.index_select(temp3, 1, model.colp3.cuda())) acc = util.validate(val_loader, model, args) util.save_checkpoint({ 'state_dict': model.state_dict(), 'best_prec1': acc, }, True, filename=os.path.join(args.save_dir, 'checkpoint_{}_alpha_{}.tar'.format('quantized',args.alpha))) util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}" , model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print('------------------------------- retraining -------------------------------------------') util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) util.save_checkpoint({ 'state_dict': model.state_dict(), 'best_prec1': acc, }, True, filename=os.path.join(args.save_dir, 'checkpoint_{}_alpha_{}.tar'.format('quantized_re',args.alpha))) util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}" , model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log(f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_bit{args.bits}_retrain{args.reepochs}.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
def quantize_process(model): print( '------------------------------- accuracy before weight sharing ----------------------------------' ) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print( '------------------------------- accuacy after weight sharing -------------------------------' ) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}", model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '------------------------------- retraining -------------------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}", model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model
def quantize_process(): util.topic_log("Accuracy before weight sharing") top1_acc, top5_acc = util.val_epoch(val_loader, model, args, topk=(1, 5)) util.log(args.log_file_path, f"accuracy before weight sharing\t{top1_acc} ({top5_acc})") util.topic_log("Accuracy after weight sharing") layer_mame_to_quan_indices = apply_weight_sharing(model, args) top1_acc, top5_acc = util.val_epoch(val_loader, model, args, topk=(1, 5)) util.save_masked_checkpoint(model, "quantized", top1_acc, "initial", args) util.log( args.log_file_path, f"accuracy after weight sharing {args.bits}bits\t{top1_acc} ({top5_acc})" ) util.topic_log("Quantize retraining") util.quantized_retrain(model, args, layer_mame_to_quan_indices, train_loader, val_loader) top1_acc, top5_acc = util.val_epoch(val_loader, model, args, topk=(1, 5)) util.save_masked_checkpoint(model, "quantized", top1_acc, "end", args) util.log(args.log_file_path, f"accuracy after qauntize and retrain\t{top1_acc} ({top5_acc})")
def quantize_process(model): print( '------------------------------- accuracy before weight sharing ----------------------------------' ) acc = util.validate(val_loader, model, args) util.log(f"{args.save_dir}/{args.log}", f"accuracy before weight sharing\t{acc}") print( '------------------------------- accuacy after weight sharing -------------------------------' ) tempfc1 = torch.index_select(model.fc1.weight, 0, model.invrow1.cuda()) model.fc1.weight = torch.nn.Parameter( torch.index_select(tempfc1, 1, model.invcol1.cuda())) tempfc2 = torch.index_select(model.fc2.weight, 0, model.invrow2.cuda()) model.fc2.weight = torch.nn.Parameter( torch.index_select(tempfc2, 1, model.invcol2.cuda())) old_weight_list, new_weight_list, quantized_index_list, quantized_center_list = apply_weight_sharing( model, args.model_mode, args.bits) temp1 = torch.index_select(model.fc1.weight, 0, model.rowp1.cuda()) model.fc1.weight = torch.nn.Parameter( torch.index_select(temp1, 1, model.colp1.cuda())) temp2 = torch.index_select(model.fc2.weight, 0, model.rowp2.cuda()) model.fc2.weight = torch.nn.Parameter( torch.index_select(temp2, 1, model.colp2.cuda())) acc = util.validate(val_loader, model, args) torch.save(model, f"{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"weight\t{args.save_dir}/{args.out_quantized_folder}") util.log(f"{args.save_dir}/{args.log}", f"model\t{args.save_dir}/model_quantized.ptmodel") util.log(f"{args.save_dir}/{args.log}", f"accuracy after weight sharing {args.bits}bits\t{acc}") util.layer2torch(f"{args.save_dir}/{args.out_quantized_folder}", model) util.save_parameters(f"{args.save_dir}/{args.out_quantized_folder}", new_weight_list) print( '------------------------------- retraining -------------------------------------------' ) util.quantized_retrain(model, args, quantized_index_list, quantized_center_list, train_loader, val_loader) acc = util.validate(val_loader, model, args) torch.save( model, f"{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel") util.layer2torch(f"{args.save_dir}/{args.out_quantized_re_folder}", model) util.log(f"{args.save_dir}/{args.log}", f"weight:{args.save_dir}/{args.out_quantized_re_folder}") util.log( f"{args.save_dir}/{args.log}", f"model:{args.save_dir}/model_quantized_retrain{args.reepochs}.ptmodel" ) util.log(f"{args.save_dir}/{args.log}", f"acc after qauntize and retrain\t{acc}") weight_list = util.parameters2list(model.children()) util.save_parameters(f"{args.save_dir}/{args.out_quantized_re_folder}", weight_list) return model