Esempio n. 1
0
def write_pruning_result(model, test_dl, rcm, sequence, save_name):
    if rcm == 1:
        result = evaluate(model, test_dl)
    else:
        result = distributed_evaluate(model, test_dl)

    if save_name is not None:
        if not (os.path.isdir("analysis")):
            os.makedirs(os.path.join("analysis"))

    with open('./analysis/{}_eval.csv'.format(save_name), 'a',
              newline='') as csvfile:
        acc_file = csv.writer(csvfile)
        acc_file.writerow(
            [sequence,
             float(result["acc"]),
             float(result["f1score"])])
Esempio n. 2
0
def evaluation_rcm(option):
    if option["model"] == "VGG16":
        model = [
            models.VGG(layers=16, classification=10)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "MobileNet":
        model = [
            models.MobileNet(alpha=option["alpha"], classification=10)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "LeNet5":
        model = [
            models.LeNet5(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet":
        model = [
            models.ConvNet(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet_s":
        model = [
            models.ConvNet_s(classification=10) for _ in range(option["rcm"])
        ]

    if option["model"] == "LeNet5":
        test_dl = load_mnist("test", 1, 1, option["batch"])
    else:
        test_dl = load_cifar10("test", 1, 1, option["batch"])

    i = 0
    for m in model:
        load_model = torch.load(option["model_path"][i],
                                map_location=option["dev"])
        m._modules = load_model['_modules']
        m.load_state_dict(load_model['state_dict'])
        m = m.to(option["dev"])
        i = i + 1

    result = distributed_evaluate(model, test_dl)
Esempio n. 3
0
def train_rcm(option):
    _save_name = option["save_name"]
    if option["model"] == "VGG16":
        model = [
            models.VGG(layers=16, classification=10)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "MobileNet":
        model = [
            models.MobileNet(alpha=option["alpha"], classification=10)
            for _ in range(option["rcm"])
        ]
        if option["alpha"] == 1.0:
            _save_name = _save_name + "x1.0"
        else:
            _save_name = _save_name + "x{:n}".format(option["alpha"])
    elif option["model"] == "LeNet5":
        model = [
            models.LeNet5(classification=10) for _ in range(option["rcm"])
        ]
        lenet_size = 32
    elif option["model"] == "LeNet300100":
        model = [models.LeNet300100() for _ in range(option["rcm"])]
        lenet_size = 28
    elif option["model"] == "ConvNet":
        model = [
            models.ConvNet(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet_s":
        model = [
            models.ConvNet_s(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ResNet20":
        model = [
            models.ResNet20(classification=10) for _ in range(option["rcm"])
        ]
    _save_name = _save_name + "_rcm{:d}".format(option["rcm"])

    if option["model"] == "LeNet5" or option["model"] == "LeNet300100":
        train_dl = [
            load_mnist("train", option["rcm"], i + 1, option["batch"],
                       lenet_size) for i in range(option["rcm"])
        ]  # ([train_dl[0], ...], [valid_dl[0], ...])
        test_dl = load_mnist("test", 1, 1, option["batch"], lenet_size)
    else:
        train_dl = [
            load_cifar10("train", option["rcm"], i + 1, option["batch"])
            for i in range(option["rcm"])
        ]
        test_dl = load_cifar10("test", 1, 1, option["batch"])

    load_model = torch.load(option["model_path"][0],
                            map_location=option["dev"])

    for i in range(option["rcm"]):
        print("Reduced Classification model #{:d}".format(i + 1))

        classification = int(10 / option["rcm"]) + 1

        model[i].load_state_dict(load_model["state_dict"])
        last_in_features = model[i].classifier[-1].in_features
        model[i].classifier[-1] = nn.Linear(in_features=last_in_features,
                                            out_features=classification)
        model[i] = model[i].to(option["dev"])

        loss_fn = nn.CrossEntropyLoss()
        opt = optim.Adam(model[i].parameters(), lr=option["lr"])

        save_name = _save_name + "_{:d}".format(i + 1)
        history = fit(model[i], train_dl[i][0], train_dl[i][1], loss_fn, opt,
                      option["epoch"], option["schedule"], save_name,
                      option["save_option"])

    result = distributed_evaluate(model, test_dl)
Esempio n. 4
0
def finetuning_rcm(option):
    _save_name = option["save_name"]
    if option["model"] == "VGG16":
        model = [
            models.VGG(layers=16, classification=10)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "MobileNet":
        model = [
            models.MobileNet(alpha=option["alpha"], classification=10)
            for _ in range(option["rcm"])
        ]
        if option["alpha"] == 1.0:
            _save_name = _save_name + "x1.0"
        else:
            _save_name = _save_name + "x{:n}".format(option["alpha"])
    elif option["model"] == "LeNet5":
        model = [
            models.LeNet5(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet":
        model = [
            models.ConvNet(classification=10) for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet_s":
        model = [
            models.ConvNet_s(classification=10) for _ in range(option["rcm"])
        ]
    _save_name = _save_name + "_ft_rcm{:d}".format(option["rcm"])

    if option["model"] == "LeNet5":
        train_dl = [
            load_mnist("train", option["rcm"], i + 1, option["batch"])
            for i in range(option["rcm"])
        ]  # ([train_dl[0], ...], [valid_dl[0], ...])
        test_dl = load_mnist("test", 1, 1, option["batch"])
    else:
        train_dl = [
            load_cifar10("train", option["rcm"], i + 1, option["batch"])
            for i in range(option["rcm"])
        ]
        test_dl = load_cifar10("test", 1, 1, option["batch"])

    i = 0
    for m in model:
        load_model = torch.load(option["model_path"][i],
                                map_location=option["dev"])
        m._modules = load_model['_modules']
        m.load_state_dict(load_model['state_dict'])
        m = m.to(option["dev"])
        i = i + 1

    for i in range(option["rcm"]):
        print("Reduced Classification model #{:d}".format(i + 1))

        loss_fn = nn.CrossEntropyLoss()
        opt = optim.Adam(model[i].parameters(), lr=option["lr"])

        save_name = _save_name + "_{:d}".format(i + 1)
        history = fit(model[i], train_dl[i][0], train_dl[i][1], loss_fn, opt,
                      option["epoch"], option["schedule"], save_name,
                      option["save_option"])

    result = distributed_evaluate(model, test_dl)
Esempio n. 5
0
def eprune_rcm(option):
    _save_name = option["save_name"]
    classification = int(10 / option["rcm"]) + 1
    if option["model"] == "VGG16":
        model = [
            models.VGG(layers=16, classification=classification)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "MobileNet":
        model = [
            models.MobileNet(alpha=option["alpha"],
                             classification=classification)
            for _ in range(option["rcm"])
        ]
        if option["alpha"] == 1.0:
            _save_name = _save_name + "x1.0"
        else:
            _save_name = _save_name + "x{:n}".format(option["alpha"])
    elif option["model"] == "LeNet5":
        model = [
            models.LeNet5(classification=classification)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet":
        model = [
            models.ConvNet(classification=classification)
            for _ in range(option["rcm"])
        ]
    elif option["model"] == "ConvNet_s":
        model = [
            models.ConvNet_s(classification=classification)
            for _ in range(option["rcm"])
        ]
    _save_name = _save_name + "_ep_rcm{:d}".format(option["rcm"])

    if option["model"] == "LeNet5":
        train_dl = [
            load_mnist("train", option["rcm"], i + 1, option["batch"])
            for i in range(option["rcm"])
        ]  # ([train_dl[0], ...], [valid_dl[0], ...])
        test_dl = load_mnist("test", 1, 1, option["batch"])
    else:
        train_dl = [
            load_cifar10("train", option["rcm"], i + 1, option["batch"])
            for i in range(option["rcm"])
        ]
        test_dl = load_cifar10("test", 1, 1, option["batch"])

    i = 0
    for m in model:
        load_model = torch.load(option["model_path"][i],
                                map_location=option["dev"])
        m._modules = load_model['_modules']
        m.load_state_dict(load_model['state_dict'])
        m = m.to(option["dev"])
        i = i + 1

    print("Element-wise Pruning - RCM{}".format(option["rcm"]))
    for i in range(option["rcm"]):
        print("Reduced Classification model #{:d}".format(i + 1))
        print("\t{}".format(option["model_path"][i]))
        pruning = ePruning(model[i], train_dl[i][0], train_dl[i][1],
                           option["threshold_ratio"])
        save_name = _save_name + "_{:03d}_{:d}".format(
            (option["threshold_ratio"]), i + 1)
        history = pruning.iterative_pruning(finetuning=option["retraining"],
                                            epoch=option["epoch"],
                                            lr=option["lr"],
                                            schedule=option["schedule"],
                                            save_name=save_name,
                                            save_mode=option["save_option"])

    print("* Evaluating Test Sets")
    result = distributed_evaluate(model, test_dl)