Exemplo n.º 1
0
def run_alex_prune_compare_soft(dataset_params):
    multi_history = MultiHistory()
    exec_param = ExecParams(n_pretrain_epoch=3,
                            n_epoch_retrain=1,
                            n_epoch_total=15,
                            pruner=TaylorExpansionFilterPruner)
    exec_param_no_prune = ExecParams(n_pretrain_epoch=0,
                                     n_epoch_retrain=0,
                                     n_epoch_total=15,
                                     pruner=TaylorExpansionFilterPruner)

    exec_name = "Alexnet 0%"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.0,
                                      prune_ratio=None),
                        exec_params=exec_param_no_prune,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)

    exec_name = "Alexnet 10%"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.01,
                                      prune_ratio=0.1),
                        exec_params=exec_param,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)

    exec_name = "Alexnet 30%"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.03,
                                      prune_ratio=0.3),
                        exec_params=exec_param,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)

    exec_name = "Alexnet 50%"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.05,
                                      prune_ratio=0.5),
                        exec_params=exec_param,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)

    exec_name = "Alexnet 75%"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.075,
                                      prune_ratio=0.75),
                        exec_params=exec_param,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    save_obj(multi_history, "history_alex_soft")
    multi_history.display_single_key(
        History.VAL_ACC_KEY, title="Comparing AlexNet by Level of Pruning")
Exemplo n.º 2
0
def run_fast_validation(dataset_params):
    multi_history = MultiHistory()
    exec_param = ExecParams(n_pretrain_epoch=1,
                            n_epoch_retrain=1,
                            n_epoch_total=3,
                            pruner=ActivationMeanFilterPruner)

    exec_name = "Alexnet test"
    h, s = exec_alexnet(exec_name,
                        PruningParams(max_percent_per_iteration=0.2,
                                      prune_ratio=0.2),
                        exec_params=exec_param,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY, title="TEST_RUN")
Exemplo n.º 3
0
def run_compare_pruning(dataset_params, retrain, total, split_ratio=1.0):
    debug_params = DebugHelper(only_test_end=True)
    pruning_param_no_prune = PruningParams(max_percent_per_iteration=0.0,
                                           prune_ratio=0.0)
    exec_param = ExecParams(n_pretrain_epoch=0,
                            n_epoch_retrain=retrain,
                            n_epoch_total=total,
                            batch_size=64,
                            pruner=CompleteTaylorExpansionFilterPruner)
    exec_param_large = ExecParams(n_pretrain_epoch=0,
                                  n_epoch_retrain=retrain,
                                  n_epoch_total=total,
                                  batch_size=32,
                                  pruner=CompleteTaylorExpansionFilterPruner)

    all_scores = {}
    exec_name = "AlexNet-degrad"
    score = []
    exec_param.best_result_save_path = "../saved/AlexNet-base/Pruned.pth".format(
        exec_name)
    exec_param.retrain_if_weight_loaded = True
    for i in range(0, 11):
        desired_pruning = (5.0 * i) / 100.0
        if desired_pruning != 0.0:
            pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
            pruning_param_no_prune.prune_ratio = desired_pruning
        else:
            pruning_param_no_prune.max_percent_per_iteration = None
            pruning_param_no_prune.prune_ratio = None
        h, s = exec_alexnet(exec_name,
                            pruning_params=pruning_param_no_prune,
                            exec_params=exec_param,
                            dataset_params=dataset_params,
                            debug_params=debug_params)
        score.append(s)

    all_scores["AlexNet"] = score
    display_graphs(score, all_scores, "AlexNet")

    exec_name = "ResNet18-degrad"
    score = []
    exec_param.best_result_save_path = "../saved/ResNet18-base/Pruned.pth".format(
        exec_name)
    exec_param.retrain_if_weight_loaded = True
    for i in range(0, 11):
        desired_pruning = (5.0 * i) / 100.0
        if desired_pruning != 0.0:
            pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
            pruning_param_no_prune.prune_ratio = desired_pruning
        else:
            pruning_param_no_prune.max_percent_per_iteration = None
            pruning_param_no_prune.prune_ratio = None
        h, s = exec_resnet18(exec_name,
                             pruning_params=pruning_param_no_prune,
                             exec_params=exec_param,
                             dataset_params=dataset_params,
                             debug_params=debug_params)
        score.append(s)

    all_scores["ResNet18"] = score
    display_graphs(score, all_scores, "ResNet18")

    # exec_name = "ResNet34-degrad"
    # score = []
    # exec_param.best_result_save_path = "../saved/ResNet34-base/Pruned.pth".format(exec_name)
    # exec_param.retrain_if_weight_loaded = True
    # for i in range(0, 11):
    #     desired_pruning = (5.0 * i)/100.0
    #     if desired_pruning != 0.0:
    #         pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
    #         pruning_param_no_prune.prune_ratio = desired_pruning
    #     else:
    #         pruning_param_no_prune.max_percent_per_iteration = None
    #         pruning_param_no_prune.prune_ratio = None
    #     h, s = exec_resnet34(exec_name, pruning_params=pruning_param_no_prune, exec_params=exec_param,
    #                          dataset_params=dataset_params, debug_params=debug_params)
    #     score.append(s)
    #
    # all_scores["ResNet34"] = score
    # display_graphs(score, all_scores, "ResNet34")
    #
    # exec_name = "ResNet50-degrad"
    # score = []
    # exec_param.best_result_save_path = "../saved/ResNet50-base/Pruned.pth".format(exec_name)
    # exec_param.retrain_if_weight_loaded = True
    # for i in range(0, 11):
    #     desired_pruning = (5.0 * i)/100.0
    #     if desired_pruning != 0.0:
    #         pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
    #         pruning_param_no_prune.prune_ratio = desired_pruning
    #     else:
    #         pruning_param_no_prune.max_percent_per_iteration = None
    #         pruning_param_no_prune.prune_ratio = None
    #     h, s = exec_resnet50(exec_name, pruning_params=pruning_param_no_prune, exec_params=exec_param,
    #                          dataset_params=dataset_params, debug_params=debug_params)
    #     score.append(s)
    #
    # all_scores["ResNet50"] = score
    # display_graphs(score, all_scores, "ResNet50")

    exec_name = "VGG16-degrad"
    score = []
    exec_param.best_result_save_path = "../saved/VGG16-base/Pruned.pth".format(
        exec_name)
    exec_param.retrain_if_weight_loaded = True
    for i in range(0, 11):
        desired_pruning = (5.0 * i) / 100.0
        if desired_pruning != 0.0:
            pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
            pruning_param_no_prune.prune_ratio = desired_pruning
        else:
            pruning_param_no_prune.max_percent_per_iteration = None
            pruning_param_no_prune.prune_ratio = None
        h, s = exec_dense_net(exec_name,
                              pruning_params=pruning_param_no_prune,
                              exec_params=exec_param_large,
                              dataset_params=dataset_params,
                              debug_params=debug_params)
        score.append(s)

    all_scores["VGG16"] = score
    display_graphs(score, all_scores, "VGG16")

    exec_name = "DenseNet121-degrad"
    score = []
    exec_param.best_result_save_path = "../saved/DenseNet121-base/Pruned.pth".format(
        exec_name)
    exec_param.retrain_if_weight_loaded = True
    for i in range(0, 11):
        desired_pruning = (5.0 * i) / 100.0
        if desired_pruning != 0.0:
            pruning_param_no_prune.max_percent_per_iteration = desired_pruning * split_ratio
            pruning_param_no_prune.prune_ratio = desired_pruning
        else:
            pruning_param_no_prune.max_percent_per_iteration = None
            pruning_param_no_prune.prune_ratio = None
        h, s = exec_dense_net(exec_name,
                              pruning_params=pruning_param_no_prune,
                              exec_params=exec_param_large,
                              dataset_params=dataset_params,
                              debug_params=debug_params)
        score.append(s)

    all_scores["DenseNet121"] = score
    display_graphs(score, all_scores, "DenseNet121")
Exemplo n.º 4
0
def run_strategy_prune_compare(dataset_params):
    exec_param_no_prune_large = ExecParams(n_pretrain_epoch=0,
                                           n_epoch_retrain=0,
                                           n_epoch_total=15,
                                           batch_size=16,
                                           pruner=TaylorExpansionFilterPruner)
    exec_param_no_prune_medium = ExecParams(n_pretrain_epoch=0,
                                            n_epoch_retrain=0,
                                            n_epoch_total=15,
                                            batch_size=32,
                                            pruner=TaylorExpansionFilterPruner)
    exec_param_w_prune_large = ExecParams(n_pretrain_epoch=5,
                                          n_epoch_retrain=1,
                                          n_epoch_total=15,
                                          batch_size=16,
                                          pruner=TaylorExpansionFilterPruner)
    exec_param_w_prune_medium = ExecParams(n_pretrain_epoch=5,
                                           n_epoch_retrain=1,
                                           n_epoch_total=15,
                                           batch_size=32,
                                           pruner=TaylorExpansionFilterPruner)
    exec_param_no_prune = ExecParams(n_pretrain_epoch=0,
                                     n_epoch_retrain=0,
                                     n_epoch_total=15,
                                     batch_size=64,
                                     pruner=TaylorExpansionFilterPruner)
    exec_param_w_prune = ExecParams(n_pretrain_epoch=5,
                                    n_epoch_retrain=1,
                                    n_epoch_total=15,
                                    batch_size=64,
                                    pruner=TaylorExpansionFilterPruner)
    exec_param_w_prune_squeeze = ExecParams(n_pretrain_epoch=5,
                                            n_epoch_retrain=1,
                                            n_epoch_total=15,
                                            batch_size=64,
                                            pruner=TaylorExpansionFilterPruner,
                                            force_forward_view=True)
    pruning_param_no_prune = PruningParams(max_percent_per_iteration=0.0,
                                           prune_ratio=None)
    pruning_param_w_prune = PruningParams(max_percent_per_iteration=0.05,
                                          prune_ratio=0.3)

    multi_history = MultiHistory()
    exec_name = "SqueezeNet-0"
    h, s = exec_squeeze_net(exec_name,
                            pruning_params=pruning_param_no_prune,
                            exec_params=exec_param_no_prune,
                            dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    exec_name = "SqueezeNet-30"
    h, s = exec_squeeze_net(exec_name,
                            pruning_params=pruning_param_w_prune,
                            exec_params=exec_param_w_prune_squeeze,
                            dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Comparing Models at 30% Pruning")

    exec_name = "densenet 121-0"
    h, s = exec_dense_net(exec_name,
                          pruning_params=pruning_param_no_prune,
                          exec_params=exec_param_no_prune_large,
                          dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    exec_name = "densenet 121-30"
    h, s = exec_dense_net(exec_name,
                          pruning_params=pruning_param_w_prune,
                          exec_params=exec_param_w_prune_large,
                          dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Comparing Models at 30% Pruning")

    exec_name = "Resnet 50-0"
    h, s = exec_resnet50(exec_name,
                         pruning_params=pruning_param_no_prune,
                         exec_params=exec_param_no_prune_medium,
                         dataset_params=dataset_params,
                         out_count=10)
    multi_history.append_history(exec_name, h)
    exec_name = "Resnet 50-30"
    h, s = exec_resnet50(exec_name,
                         pruning_params=pruning_param_w_prune,
                         exec_params=exec_param_w_prune_medium,
                         dataset_params=dataset_params,
                         out_count=10)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Comparing Models at 30% Pruning")

    #create a second history since I am not sure it will look nice in one graph
    multi_history2 = MultiHistory()
    exec_name = "vgg16 0"
    h, s = exec_vgg16(exec_name,
                      pruning_params=pruning_param_no_prune,
                      exec_params=exec_param_no_prune_medium,
                      dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)
    exec_name = "vgg16 30"
    h, s = exec_vgg16(exec_name,
                      pruning_params=pruning_param_w_prune,
                      exec_params=exec_param_w_prune_medium,
                      dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)

    exec_name = "Resnet 18-0"
    h, s = exec_resnet18(exec_name,
                         pruning_params=pruning_param_no_prune,
                         exec_params=exec_param_no_prune,
                         dataset_params=dataset_params,
                         out_count=10)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)
    exec_name = "Resnet 18-30"
    h, s = exec_resnet18(exec_name,
                         pruning_params=pruning_param_w_prune,
                         exec_params=exec_param_w_prune,
                         dataset_params=dataset_params,
                         out_count=10)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)

    exec_name = "Alexnet 0"
    h, s = exec_alexnet(exec_name,
                        pruning_params=pruning_param_no_prune,
                        exec_params=exec_param_no_prune,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)
    exec_name = "Alexnet 30"
    h, s = exec_alexnet(exec_name,
                        pruning_params=pruning_param_w_prune,
                        exec_params=exec_param_w_prune,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history2.append_history(exec_name, h)

    save_obj(multi_history, "history_compare")
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Comparing Models at 30% Pruning")
    multi_history2.display_single_key(History.VAL_ACC_KEY,
                                      title="Comparing Models at 30% Pruning")
Exemplo n.º 5
0
def train_models(dataset_params):
    pruning_param_no_prune = PruningParams(max_percent_per_iteration=0.0,
                                           prune_ratio=None)
    exec_param_no_prune = ExecParams(n_pretrain_epoch=0,
                                     n_epoch_retrain=0,
                                     n_epoch_total=15,
                                     batch_size=32,
                                     pruner=None)

    multi_history = MultiHistory()

    exec_name = "AlexNet-base"
    h, s = exec_alexnet(exec_name,
                        pruning_params=pruning_param_no_prune,
                        exec_params=exec_param_no_prune,
                        dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "ResNet18-base"
    h, s = exec_resnet18(exec_name,
                         pruning_params=pruning_param_no_prune,
                         exec_params=exec_param_no_prune,
                         dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "ResNet34-base"
    h, s = exec_resnet34(exec_name,
                         pruning_params=pruning_param_no_prune,
                         exec_params=exec_param_no_prune,
                         dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "ResNet50-base"
    h, s = exec_resnet50(exec_name,
                         pruning_params=pruning_param_no_prune,
                         exec_params=exec_param_no_prune,
                         dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "Squeeze-base"
    h, s = exec_squeeze_net(exec_name,
                            pruning_params=pruning_param_no_prune,
                            exec_params=exec_param_no_prune,
                            dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "DenseNet121-base"
    h, s = exec_dense_net(exec_name,
                          pruning_params=pruning_param_no_prune,
                          exec_params=exec_param_no_prune,
                          dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")

    exec_name = "VGG16-base"
    h, s = exec_vgg16(exec_name,
                      pruning_params=pruning_param_no_prune,
                      exec_params=exec_param_no_prune,
                      dataset_params=dataset_params)
    multi_history.append_history(exec_name, h)
    multi_history.display_single_key(History.VAL_ACC_KEY,
                                     title="Models training without pruning")