Beispiel #1
0
    def train_session(idx, train_index, val_index):
        os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
        train_list = [imgs[x] for x in train_index]
        val_list = [imgs[x] for x in val_index]
        train_loader = get_loader(train_list, config, 'train')
        val_loader = get_loader(val_list, config, 'val')
        model = models.__dict__[config['model']](pretrained=config['pretrain'])
        model, last_layer, feature_layer = finetune(model, config)
        model = torch.nn.DataParallel(model).cuda()
        criterion = nn.CrossEntropyLoss().cuda()
        optimizer = torch.optim.Adam([{
            'params': last_layer.parameters(),
            'lr': 1e-3
        }, {
            'params': feature_layer.parameters(),
            'lr': 1e-4
        }])
        trainer = Trainer(model,
                          optimizer,
                          criterion,
                          config,
                          train_loader,
                          val_loader,
                          regime=None)

        trainer.run()
Beispiel #2
0
    def test_session(log_folder, idx):
        os.environ['CUDA_VISIBLE_DEVICES'] = str(idx)
        model = models.__dict__[config['model']]()
        model, last_layer, feature_layer = finetune(model, config)
        model = torch.nn.DataParallel(model).cuda()
        best_model = sorted(glob.glob(log_folder + '/model_best*'))[-1]
        checkpoint = torch.load(best_model)
        model.load_state_dict(checkpoint['state_dict'])
        model.eval()

        test_loader = get_loader(test_list, config, 'val')
        softmax = nn.Softmax()
        probs_list = []
        names_list = []
        with tqdm(total=len(test_loader)) as pbar:
            for i, (input, target, names) in enumerate(test_loader):
                input_var = torch.autograd.Variable(input, volatile=True)
                prob = softmax(model(input_var)).data.cpu().numpy()
                img_id = [item.split('/')[-1].split('.')[0] for item in names]
                names_list += img_id
                probs_list += [item for item in prob]
                pbar.update(1)
        result = np.array(probs_list)
        output = zip(names_list, result)

        with open(
                os.path.join(log_folder,
                             'result_%s.csv' % checkpoint['epoch']), 'w') as f:
            f.write('name,invasive\n')
            for name, prob in output:
                f.write('%s,%f\n' % (name, prob[1]))
Beispiel #3
0
def main():

    args = parse_args_finetuning_pruning()
    print('------ Parameters for finetuning ------')
    for parameter, value in args.__dict__.items():
        print(f'{parameter}: {value}')
    print('---------------------------------------')

    if args.model_path is None:
        if args.verbose:
            print(f"No model was given, training {args.model} on {args.dataset} with {args.n_epochs} epochs.")
        model = train_model(args)
    else:
        model = torch.load(args.model_path)

    try:
        os.mkdir("temp")
    except FileExistsError:
        pass
    torch.save(model, "temp/model_finetuning_parameters.pt")


    if not args.download and args.data_dir == '../data':
        raise("ERROR: please provide the data directory from which to take the data.")

    kwargs = {'num_workers': 1, 'pin_memory': True} if (torch.cuda.is_available() and args.use_cuda) else {}
    device = torch.device("cuda:0" if (torch.cuda.is_available() and args.use_cuda) else "cpu")

    loader_class = get_loader(args.dataset)
    loader_object = loader_class(args.data_dir, args.batch_size, args.test_batch_size, 
                                 args.custom_transforms, args.crop_size)

    loader_train = loader_object.get_loader(train=True, download=args.download, kwargs=kwargs)
    loader_eval = loader_object.get_loader(train=False, download=args.download, kwargs=kwargs)

    baseline_accuracy = eval(model, loader_eval, device, args.verbose)
    accuracy_list = [baseline_accuracy]

    n_epochs_retrain = args.n_epochs_retrain

    for n_pruning_epochs in range(1, n_epochs_retrain + 1):
        model_ = torch.load("temp/model_finetuning_parameters.pt")
        accuracy_list.append(gradual_linear_pruning(model_, args.final_sparsity, loader_train, loader_eval, 
                                                    n_epochs_retrain, n_pruning_epochs, 1, device, args.optimizer,
                                                    args.loss, args.lr, args.verbose, baseline_accuracy, args.save_to, 
                                                    False, args.pruning_method))

    if args.show_plot:
        plt.plot(np.arange(n_epochs_retrain + 1), accuracy_list, label='Accuracy')
        plt.xlabel('Pruning rate')
        plt.ylabel('Accuracy')
        plt.legend(loc="lower left")
        plt.show()
Beispiel #4
0
def main():

    args = parse_args_test_norm_pruning()
    print('------ Parameters for test_norm_pruning ------')
    for parameter, value in args.__dict__.items():
        print(f'{parameter}: {value}')
    print('------------------------------------------')

    ### Get the model, train it if none was given
    if args.model_path is None:
        model = train_model(args)
    else:
        model = torch.load(args.model_path)

    ### Save the trained model to make sure to have the same model before pruning.
    try:
        os.mkdir("temp")
    except FileExistsError:
        pass
    torch.save(model, "temp/model_norm_pruning.pt")

    ### Get the loaders
    if not args.download and args.data_dir == '../data':
        raise (
            "ERROR: please provide the data directory from which to take the data."
        )

    kwargs = {
        'num_workers': 1,
        'pin_memory': True
    } if (torch.cuda.is_available() and args.use_cuda) else {}
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and args.use_cuda) else "cpu")

    loader_class = get_loader(args.dataset)
    loader_object = loader_class(args.data_dir, args.batch_size,
                                 args.test_batch_size, args.custom_transforms,
                                 args.crop_size)

    loader_train = loader_object.get_loader(train=True,
                                            download=args.download,
                                            kwargs=kwargs)
    loader_eval = loader_object.get_loader(train=False,
                                           download=args.download,
                                           kwargs=kwargs)

    ### Testing all the combination between the methods and pruning_rates given
    pruning_rates = args.pruning_rates
    methods = args.pruning_methods

    baseline_accuracy = eval(model, loader_eval, device, args.verbose)

    for method in methods:

        accs = []

        for pruning_rate in pruning_rates:
            model_ = torch.load("temp/model_norm_pruning.pt")
            accs.append(
                one_shot_pruning(model_, pruning_rate, loader_train,
                                 loader_eval, args.n_epochs_retrain, device,
                                 args.optimizer, args.loss, args.lr,
                                 args.verbose, baseline_accuracy, args.save_to,
                                 args.show_plot, method))

        plt.plot(pruning_rates, accs, label='Accuracy')
        plt.title('Accuracy w.r.t pruning rate ' + method)
        plt.xlabel('Pruning rate')
        plt.ylabel('Accuracy')
        plt.show()
Beispiel #5
0
def train_model(args):

    if not args.download and args.data_dir == '../data':
        print(
            "ERROR: please provide the data directory from which to take the data."
        )

    kwargs = {
        'num_workers': 1,
        'pin_memory': True
    } if (torch.cuda.is_available() and args.use_cuda) else {}
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and args.use_cuda) else "cpu")

    loader_class = get_loader(args.dataset)
    loader_object = loader_class(args.data_dir, args.batch_size,
                                 args.test_batch_size, args.custom_transforms,
                                 args.crop_size)

    loader_train = loader_object.get_loader(train=True,
                                            download=args.download,
                                            kwargs=kwargs)
    loader_eval = loader_object.get_loader(train=False,
                                           download=args.download,
                                           kwargs=kwargs)

    try:
        if args.save_model and args.save_to is None:
            os.mkdir(args.model)
        elif args.save_to is not None:
            os.mkdir(args.save_to)
    except FileExistsError:
        pass

    model = get_model(args.model)

    learning_curve = np.zeros((3, args.n_epochs))
    learning_curve[0, :] = np.arange(1, args.n_epochs + 1)

    for epoch in range(1, args.n_epochs + 1):
        if args.verbose:
            print("Epoch n°", epoch, ":")
        learning_curve[1,
                       epoch - 1] = train(model, loader_train, device,
                                          args.optimizer, args.loss, args.lr)
        learning_curve[2, epoch - 1] = eval(model, loader_eval, device)

        if args.save_model:
            torch.save(model, f"{args.model}/epoch_{epoch}.pt")

    if args.show_plot:
        fig = plt.figure().gca()

        plt.plot(learning_curve[0, :], learning_curve[1, :], label='Training')
        plt.plot(learning_curve[0, :],
                 learning_curve[2, :],
                 label='Evaluation')

        plt.title('Learning curve')
        plt.xlabel('Epochs')
        plt.ylabel('Accuracy')
        plt.legend(loc="lower left")
        fig.xaxis.set_major_locator(MaxNLocator(integer=True, min_n_ticks=10))

        plt.show()

    return model
def main():

    args = parse_args_test_gradual_pruning()
    print('------ Parameters for test_gradual_pruning ------')
    for parameter, value in args.__dict__.items():
        print(f'{parameter}: {value}')
    print('------------------------------------------')

    if args.model_path is None:
        if args.verbose:
            print(
                f"No model was given, training {args.model} on {args.dataset} with {args.n_epochs} epochs."
            )
        model = train_model(args)
    else:
        model = torch.load(args.model_path)

    try:
        os.mkdir("temp")
    except FileExistsError:
        pass
    torch.save(model, "temp/model_gradual_pruning.pt")

    if not args.download and args.data_dir == '../data':
        raise (
            "ERROR: please provide the data directory from which to take the data."
        )

    kwargs = {
        'num_workers': 1,
        'pin_memory': True
    } if (torch.cuda.is_available() and args.use_cuda) else {}
    device = torch.device("cuda:0" if (
        torch.cuda.is_available() and args.use_cuda) else "cpu")

    loader_class = get_loader(args.dataset)
    loader_object = loader_class(args.data_dir, args.batch_size,
                                 args.test_batch_size, args.custom_transforms,
                                 args.crop_size)

    loader_train = loader_object.get_loader(train=True,
                                            download=args.download,
                                            kwargs=kwargs)
    loader_eval = loader_object.get_loader(train=False,
                                           download=args.download,
                                           kwargs=kwargs)

    baseline_accuracy = eval(model, loader_eval, device, args.verbose)

    results_one_shot = one_shot_pruning(
        model, args.final_sparsity, loader_train, loader_eval,
        args.n_epochs_retrain, device, args.optimizer, args.loss, args.lr,
        args.verbose, baseline_accuracy, args.save_to, args.show_plot,
        args.pruning_method)
    if args.verbose:
        print(f"Accuracy obtained with one-shot pruning: {results_one_shot}")

    results_linear_pruning = gradual_linear_pruning(
        model, args.final_sparsity, loader_train, loader_eval,
        args.n_epochs_retrain, args.pruning_epochs, args.pruning_frequency,
        device, args.optimizer, args.loss, args.lr, args.verbose,
        baseline_accuracy, args.save_to, args.show_plot, args.pruning_method)
    if args.verbose:
        print(
            f"Accuracy obtained with linear gradual pruning: {results_one_shot}"
        )

    results_AGP = automated_gradual_pruning(
        model, args.final_sparsity, loader_train, loader_eval,
        args.n_epochs_retrain, args.pruning_epochs, args.pruning_frequency,
        device, args.optimizer, args.loss, args.lr, args.verbose,
        baseline_accuracy, args.save_to, args.show_plot, args.pruning_method)
    if args.verbose:
        print(
            f"Accuracy obtained with automated gradual pruning: {results_AGP}")