def evaluation_baseline(option): if option["model"] == "VGG16": model = models.VGG(layers=16) elif option["model"] == "MobileNet": model = models.MobileNet(alpha=option["alpha"]) elif option["model"] == "LeNet5": model = models.LeNet5() elif option["model"] == "ConvNet": model = models.ConvNet() elif option["model"] == "ConvNet_s": model = models.ConvNet_s() if option["model"] == "LeNet5": test_dl = load_mnist("test", 1, 1, option["batch"]) else: test_dl = load_cifar10("test", 1, 1, option["batch"]) load_model = torch.load(option["model_path"][0], map_location=option["dev"]) model._modules = load_model['_modules'] model.load_state_dict(load_model['state_dict']) model = model.to(option["dev"]) print("Evaluation") print("\t{}".format(option["model_path"][0])) result = evaluate(model, test_dl)
def eprune_baseline(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = models.VGG(layers=16) elif option["model"] == "MobileNet": model = models.MobileNet(alpha=option["alpha"]) if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = models.LeNet5() elif option["model"] == "ConvNet": model = models.ConvNet() elif option["model"] == "ConvNet_s": model = models.ConvNet_s() _save_name = _save_name + "_ep_baseline" if option["model"] == "LeNet5": train_dl = load_mnist("train", 1, 1, option["batch"]) # (train_dl, valid_dl) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = load_cifar10("train", 1, 1, option["batch"]) test_dl = load_cifar10("test", 1, 1, option["batch"]) load_model = torch.load(option["model_path"][0], map_location=option["dev"]) model._modules = load_model['_modules'] model.load_state_dict(load_model['state_dict']) model = model.to(option["dev"]) print("Element-wise Pruning - Baseline") print("\t{}".format(option["model_path"][0])) pruning = ePruning(model, train_dl[0], train_dl[1], option["threshold_ratio"]) save_name = _save_name + "_{:03d}".format((option["threshold_ratio"])) history = pruning.iterative_pruning(finetuning=option["retraining"], epoch=option["epoch"], lr=option["lr"], schedule=option["schedule"], save_name=save_name, save_mode=option["save_option"]) print("* Evaluating Test Sets") result = evaluate(model, test_dl)
def train_baseline(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = models.VGG(layers=16) elif option["model"] == "MobileNet": model = models.MobileNet(alpha=option["alpha"]) if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = models.LeNet5() lenet_size = 32 elif option["model"] == "LeNet300100": model = models.LeNet300100() lenet_size = 28 elif option["model"] == "ConvNet": model = models.ConvNet() elif option["model"] == "ConvNet_s": model = models.ConvNet_s() elif option["model"] == "ResNet20": model = models.ResNet20() save_name = _save_name + "_baseline" if option["model"] == "LeNet5" or option["model"] == "LeNet300100": train_dl = load_mnist("train", 1, 1, option["batch"], lenet_size) # (train_dl, valid_dl) test_dl = load_mnist("test", 1, 1, option["batch"], lenet_size) else: train_dl = load_cifar10("train", 1, 1, option["batch"]) test_dl = load_cifar10("test", 1, 1, option["batch"]) model = model.to(option["dev"]) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=option["lr"]) history = fit(model, train_dl[0], train_dl[1], loss_fn, opt, option["epoch"], option["schedule"], save_name, option["save_option"]) result = evaluate(model, test_dl)
def finetuning_baseline(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = models.VGG(layers=16) elif option["model"] == "MobileNet": model = models.MobileNet(alpha=option["alpha"]) if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = models.LeNet5() elif option["model"] == "ConvNet": model = models.ConvNet() elif option["model"] == "ConvNet_s": model = models.ConvNet_s() save_name = _save_name + "_ft_baseline" if option["model"] == "LeNet5": train_dl = load_mnist("train", 1, 1, option["batch"]) # (train_dl, valid_dl) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = load_cifar10("train", 1, 1, option["batch"]) test_dl = load_cifar10("test", 1, 1, option["batch"]) load_model = torch.load(option["model_path"][0], map_location=option["dev"]) model._modules = load_model['_modules'] model.load_state_dict(load_model['state_dict']) model = model.to(option["dev"]) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model.parameters(), lr=option["lr"]) history = fit(model, train_dl[0], train_dl[1], loss_fn, opt, option["epoch"], option["schedule"], save_name, option["save_option"]) result = evaluate(model, test_dl)
def evaluation_rcm(option): if option["model"] == "VGG16": model = [ models.VGG(layers=16, classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "MobileNet": model = [ models.MobileNet(alpha=option["alpha"], classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "LeNet5": model = [ models.LeNet5(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet": model = [ models.ConvNet(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet_s": model = [ models.ConvNet_s(classification=10) for _ in range(option["rcm"]) ] if option["model"] == "LeNet5": test_dl = load_mnist("test", 1, 1, option["batch"]) else: test_dl = load_cifar10("test", 1, 1, option["batch"]) i = 0 for m in model: load_model = torch.load(option["model_path"][i], map_location=option["dev"]) m._modules = load_model['_modules'] m.load_state_dict(load_model['state_dict']) m = m.to(option["dev"]) i = i + 1 result = distributed_evaluate(model, test_dl)
def train_rcm(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = [ models.VGG(layers=16, classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "MobileNet": model = [ models.MobileNet(alpha=option["alpha"], classification=10) for _ in range(option["rcm"]) ] if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = [ models.LeNet5(classification=10) for _ in range(option["rcm"]) ] lenet_size = 32 elif option["model"] == "LeNet300100": model = [models.LeNet300100() for _ in range(option["rcm"])] lenet_size = 28 elif option["model"] == "ConvNet": model = [ models.ConvNet(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet_s": model = [ models.ConvNet_s(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ResNet20": model = [ models.ResNet20(classification=10) for _ in range(option["rcm"]) ] _save_name = _save_name + "_rcm{:d}".format(option["rcm"]) if option["model"] == "LeNet5" or option["model"] == "LeNet300100": train_dl = [ load_mnist("train", option["rcm"], i + 1, option["batch"], lenet_size) for i in range(option["rcm"]) ] # ([train_dl[0], ...], [valid_dl[0], ...]) test_dl = load_mnist("test", 1, 1, option["batch"], lenet_size) else: train_dl = [ load_cifar10("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] test_dl = load_cifar10("test", 1, 1, option["batch"]) load_model = torch.load(option["model_path"][0], map_location=option["dev"]) for i in range(option["rcm"]): print("Reduced Classification model #{:d}".format(i + 1)) classification = int(10 / option["rcm"]) + 1 model[i].load_state_dict(load_model["state_dict"]) last_in_features = model[i].classifier[-1].in_features model[i].classifier[-1] = nn.Linear(in_features=last_in_features, out_features=classification) model[i] = model[i].to(option["dev"]) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model[i].parameters(), lr=option["lr"]) save_name = _save_name + "_{:d}".format(i + 1) history = fit(model[i], train_dl[i][0], train_dl[i][1], loss_fn, opt, option["epoch"], option["schedule"], save_name, option["save_option"]) result = distributed_evaluate(model, test_dl)
def finetuning_rcm(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = [ models.VGG(layers=16, classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "MobileNet": model = [ models.MobileNet(alpha=option["alpha"], classification=10) for _ in range(option["rcm"]) ] if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = [ models.LeNet5(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet": model = [ models.ConvNet(classification=10) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet_s": model = [ models.ConvNet_s(classification=10) for _ in range(option["rcm"]) ] _save_name = _save_name + "_ft_rcm{:d}".format(option["rcm"]) if option["model"] == "LeNet5": train_dl = [ load_mnist("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] # ([train_dl[0], ...], [valid_dl[0], ...]) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = [ load_cifar10("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] test_dl = load_cifar10("test", 1, 1, option["batch"]) i = 0 for m in model: load_model = torch.load(option["model_path"][i], map_location=option["dev"]) m._modules = load_model['_modules'] m.load_state_dict(load_model['state_dict']) m = m.to(option["dev"]) i = i + 1 for i in range(option["rcm"]): print("Reduced Classification model #{:d}".format(i + 1)) loss_fn = nn.CrossEntropyLoss() opt = optim.Adam(model[i].parameters(), lr=option["lr"]) save_name = _save_name + "_{:d}".format(i + 1) history = fit(model[i], train_dl[i][0], train_dl[i][1], loss_fn, opt, option["epoch"], option["schedule"], save_name, option["save_option"]) result = distributed_evaluate(model, test_dl)
def eprune_rcm(option): _save_name = option["save_name"] classification = int(10 / option["rcm"]) + 1 if option["model"] == "VGG16": model = [ models.VGG(layers=16, classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "MobileNet": model = [ models.MobileNet(alpha=option["alpha"], classification=classification) for _ in range(option["rcm"]) ] if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = [ models.LeNet5(classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet": model = [ models.ConvNet(classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet_s": model = [ models.ConvNet_s(classification=classification) for _ in range(option["rcm"]) ] _save_name = _save_name + "_ep_rcm{:d}".format(option["rcm"]) if option["model"] == "LeNet5": train_dl = [ load_mnist("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] # ([train_dl[0], ...], [valid_dl[0], ...]) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = [ load_cifar10("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] test_dl = load_cifar10("test", 1, 1, option["batch"]) i = 0 for m in model: load_model = torch.load(option["model_path"][i], map_location=option["dev"]) m._modules = load_model['_modules'] m.load_state_dict(load_model['state_dict']) m = m.to(option["dev"]) i = i + 1 print("Element-wise Pruning - RCM{}".format(option["rcm"])) for i in range(option["rcm"]): print("Reduced Classification model #{:d}".format(i + 1)) print("\t{}".format(option["model_path"][i])) pruning = ePruning(model[i], train_dl[i][0], train_dl[i][1], option["threshold_ratio"]) save_name = _save_name + "_{:03d}_{:d}".format( (option["threshold_ratio"]), i + 1) history = pruning.iterative_pruning(finetuning=option["retraining"], epoch=option["epoch"], lr=option["lr"], schedule=option["schedule"], save_name=save_name, save_mode=option["save_option"]) print("* Evaluating Test Sets") result = distributed_evaluate(model, test_dl)
def prune_rcm(option): _save_name = option["save_name"] classification = int(10 / option["rcm"]) + 1 if option["model"] == "VGG16": model = [ models.VGG(layers=16, classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "MobileNet": model = [ models.MobileNet(alpha=option["alpha"], classification=classification) for _ in range(option["rcm"]) ] if option["alpha"] == 1.0: _save_name = _save_name + "x1.0" else: _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = [ models.LeNet5(classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet": model = [ models.ConvNet(classification=classification) for _ in range(option["rcm"]) ] elif option["model"] == "ConvNet_s": model = [ models.ConvNet_s(classification=classification) for _ in range(option["rcm"]) ] _save_name = _save_name + "_fp_rcm{:d}".format(option["rcm"]) i = 0 for m in model: load_model = torch.load(option["model_path"][i], map_location=option["dev"]) m._modules = load_model['_modules'] m.load_state_dict(load_model['state_dict']) m = m.to(option["dev"]) i = i + 1 if option["model"] == "LeNet5": train_dl = [ load_mnist("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] # ([train_dl[0], ...], [valid_dl[0], ...]) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = [ load_cifar10("train", option["rcm"], i + 1, option["batch"]) for i in range(option["rcm"]) ] test_dl = load_cifar10("test", 1, 1, option["batch"]) csv_name = _save_name write_pruning_result(model, test_dl, option["rcm"], 0, csv_name) for i in range(option["prune_step"]): print("Filter Pruning #{:d}".format(i + 1)) for j in range(option["rcm"]): print("Reduced Classification model #{:d}".format(j + 1)) save_name = _save_name + "_{:03d}_{:d}".format(i + 1, j + 1) pruning = Pruning(model[j], train_dl[j][0], train_dl[j][1]) history = pruning.iterative_pruning( one_epoch_remove=option["filters_removed"], finetuning=option["retraining"], epoch=option["epoch"], lr=option["lr"], save_name=save_name, save_mode=option["save_option"]) if option["retraining"]: if save_name is not None: if not (os.path.isdir("analysis")): os.makedirs(os.path.join("analysis")) with open('./analysis/{}_{:d}_train_loss.csv'.format( csv_name, j + 1), 'a', newline='') as csvfile: training_file = csv.writer(csvfile) training_file.writerows([history['train'][0]]) with open('./analysis/{}_{:d}_train_acc.csv'.format( csv_name, j + 1), 'a', newline='') as csvfile: training_file = csv.writer(csvfile) training_file.writerows([history['train'][1]]) with open('./analysis/{}_{:d}_valid_loss.csv'.format( csv_name, j + 1), 'a', newline='') as csvfile: validation_file = csv.writer(csvfile) validation_file.writerows([history['valid'][0]]) with open('./analysis/{}_{:d}_valid_acc.csv'.format( csv_name, j + 1), 'a', newline='') as csvfile: validation_file = csv.writer(csvfile) validation_file.writerows([history['valid'][1]]) write_pruning_result(model, test_dl, option["rcm"], i + 1, csv_name)
def prune_baseline(option): _save_name = option["save_name"] if option["model"] == "VGG16": model = models.VGG(layers=16) elif option["model"] == "MobileNet": model = models.MobileNet(alpha=option["alpha"]) _save_name = _save_name + "x{:n}".format(option["alpha"]) elif option["model"] == "LeNet5": model = models.LeNet5() elif option["model"] == "ConvNet": model = models.ConvNet() elif option["model"] == "ConvNet_s": model = models.ConvNet_s() _save_name = _save_name + "_fp_baseline" if option["model"] == "LeNet5": train_dl = load_mnist("train", 1, 1, option["batch"]) # (train_dl, valid_dl) test_dl = load_mnist("test", 1, 1, option["batch"]) else: train_dl = load_cifar10("train", 1, 1, option["batch"]) test_dl = load_cifar10("test", 1, 1, option["batch"]) load_model = torch.load(option["model_path"][0], map_location=option["dev"]) model._modules = load_model['_modules'] model.load_state_dict(load_model['state_dict']) model = model.to(option["dev"]) csv_name = _save_name write_pruning_result(model, test_dl, option["rcm"], 0, csv_name) for i in range(option["prune_step"]): print("Filter Pruning #{:d}".format(i + 1)) save_name = _save_name + "_{:03d}".format(i + 1) pruning = Pruning(model, train_dl[0], train_dl[1]) history = pruning.iterative_pruning( one_epoch_remove=option["filters_removed"], finetuning=option["retraining"], epoch=option["epoch"], lr=option["lr"], save_name=save_name, save_mode=option["save_option"]) if option["retraining"]: if save_name is not None: if not (os.path.isdir("analysis")): os.makedirs(os.path.join("analysis")) with open('./analysis/{}_train_loss.csv'.format(csv_name), 'a', newline='') as csvfile: training_file = csv.writer(csvfile) training_file.writerows([history['train'][0]]) with open('./analysis/{}_train_acc.csv'.format(csv_name), 'a', newline='') as csvfile: training_file = csv.writer(csvfile) training_file.writerows([history['train'][1]]) with open('./analysis/{}_valid_loss.csv'.format(csv_name), 'a', newline='') as csvfile: validation_file = csv.writer(csvfile) validation_file.writerows([history['valid'][0]]) with open('./analysis/{}_valid_acc.csv'.format(csv_name), 'a', newline='') as csvfile: validation_file = csv.writer(csvfile) validation_file.writerows([history['valid'][1]]) write_pruning_result(model, test_dl, option["rcm"], i + 1, csv_name)