Example #1
0
def select_model(m): 
    if m == 'large': 
        model = pblm.mnist_model_large().cuda()
        _, test_loader = pblm.mnist_loaders(8)
    elif m == 'wide': 
        print("Using wide model with model_factor={}".format(args.model_factor))
        _, test_loader = pblm.mnist_loaders(64//args.model_factor)
        model = pblm.mnist_model_wide(args.model_factor).cuda()
    elif m == 'deep': 
        print("Using deep model with model_factor={}".format(args.model_factor))
        _, test_loader = pblm.mnist_loaders(64//(2**args.model_factor))
        model = pblm.mnist_model_deep(args.model_factor).cuda()
    else: 
        model = pblm.mnist_model().cuda() 
    return model
Example #2
0
        model = pblm.mnist_model_deep_custom().cuda()
    elif m == '500':
        model = pblm.mnist_500().cuda()
    else:
        model = pblm.mnist_model().cuda()
    return model


if __name__ == "__main__":
    args = pblm.argparser(opt='adam', verbose=200, starting_epsilon=0.01)
    print("saving file to {}".format(args.prefix))
    setproctitle.setproctitle(args.prefix)
    train_log = open(args.prefix + "_train.log", "w")
    test_log = open(args.prefix + "_test.log", "w")

    train_loader, _ = pblm.mnist_loaders(args.batch_size)
    _, test_loader = pblm.mnist_loaders(args.test_batch_size)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    for X, y in train_loader:
        break
    kwargs = pblm.args2kwargs(args, X=Variable(X.cuda()))
    best_err = 1

    sampler_indices = []
    model = [select_model(args.model)]

    for _ in range(0, args.cascade):
        if _ > 0:
          'Starting epsilon: {4}\t\t'
          'Sechduled length: {5}'.format(args.type, args.category, args.epochs,
                                         args.epsilon, args.starting_epsilon,
                                         args.schedule_length),
          end='\n')
    if args.l1_proj is not None:
        print('Projection vectors: {0}\t\t'
              'Train estimation: {1}\t\t'
              'Test estimation: {2}'.format(args.l1_proj, args.l1_train,
                                            args.l1_test),
              end='\n')

    # train-validation split
    train_loader, valid_loader, test_loader = pblm.mnist_loaders(
        batch_size=args.batch_size,
        path='../data',
        ratio=args.ratio,
        seed=args.seed)
    model = pblm.mnist_model().cuda()
    num_classes = model[-1].out_features

    # specify the task and construct the corresponding cost matrix
    folder_path = os.path.dirname(args.proctitle)
    if args.type == 'binary':
        input_mat = np.zeros((num_classes, num_classes), dtype=np.int)
        if args.category == 'single_seed':
            seed_clas = 9
            input_mat[seed_clas, :] = np.ones(num_classes)
            input_mat[seed_clas, seed_clas] = 0
            folder_path += '/class_' + str(seed_clas)
        else:
def plot_certificates():
    # Load data
    # The bounds in NN-space
    x_min = 0.
    x_max = 1.

    # Fix random seed for reproducibility
    seed = 0
    np.random.seed(seed)
    torch.manual_seed(seed)

    _, test_loader = pblm.mnist_loaders(50)

    # Get data into arrays for convenience
    for idx, (data, target) in enumerate(test_loader):
        if CUDA:
            data, target = data.float().cuda(), target.long().cuda()
        else:
            data, target = data.float(), target.long()
        #print(data.size())
        #raise Exception()

        data = data.view(-1, 1, 28, 28)
        x = data
        y = target
        break

    #print(x.size(), y.size())
    #raise Exception()

    #epochs = np.array([-1,0,2,4,6,9,14,19,24,32,41,49,61,74,86,99])
    epochs = np.array([32, 41, 49, 61, 74, 86, 99])
    robust_errs = []
    for epoch in epochs:
        epsilon = 0.1

        model = pblm.mnist_model()
        if epoch == -1:
            model.load_state_dict(
                torch.load(
                    './snapshots/mnist_baseline_batch_size_50_epochs_100_lr_0.001_opt_adam_real_time_False_seed_0_checkpoint_99.pth'
                ))
        else:
            model.load_state_dict(
                torch.load(
                    f'./snapshots/mnist_robustified_robust_batch_size_50_epochs_100_epsilon_0.1_l1_test_exact_l1_train_exact_lr_0.001_opt_adam_real_time_False_schedule_length_50_seed_0_starting_epsilon_0.01_checkpoint_{epoch}.pth'
                ))
        if CUDA:
            model.cuda()

        _, robust_err = robust_loss(model, epsilon, x, y)
        robust_errs.append(robust_err)

    robust_errs = np.array(robust_errs)

    results = pickle.load(
        open(f'./snapshots/mnist_extracted_exp_results.pickle', 'rb'))
    xs = np.array([-1, 0, 2, 4, 6, 9, 14, 19, 24, 32, 41, 49, 61, 74, 86, 99])

    our_results = {}
    for sigma in [0.1, 0.2, 0.3]:
        sigma_lg_ps = []
        our_results[sigma] = np.zeros(16)

        for sample_id in range(50):
            #print('sample id', sample_id)
            #if sample_id == 24:
            #  print([r.shape for r in results[(sample_id,sigma)]])
            #  lg_ps = np.array(results[(sample_id,sigma)])
            #  print('lg_ps', lg_ps.shape)
            #
            #  input()

            lg_ps = np.array(results[(sample_id, sigma)])

            #print('lg_ps', lg_ps.shape)
            #input()

            #print(lg_ps.shape)
            #if len(lg_ps.shape) == 1:
            #  lg_ps = lg_ps.reshape((-1,1))
            #  #print(lg_ps.shape)
            mean_ps = np.mean(lg_ps, axis=1)

            #print(mean_ps.shape, lg_ps.shape)

            our_results[sigma] += mean_ps != -250.0

            #print(mean_ps)
            #input()
            #raise Exception()

            #print(mean_ps.shape)
            #raise Exception()

        our_results[sigma] /= 50.0

    fig = plt.figure(figsize=(cm2inch(8.0), cm2inch(6.0)))
    ax = fig.add_subplot(1,
                         1,
                         1,
                         xlabel='epoch',
                         ylabel='fraction certified',
                         ylim=(-0.05, 1.0))

    ax.plot(xs,
            1. - our_results[0.3],
            color='navy',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.3$')
    ax.plot(xs,
            1. - our_results[0.2],
            color='seagreen',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.2$')
    ax.plot(xs,
            1. - our_results[0.1],
            color='firebrick',
            marker='.',
            linewidth=1.0,
            label=r'AMLS $\epsilon=0.1$')
    ax.plot(epochs,
            1. - robust_errs,
            color='grey',
            marker='.',
            linestyle='--',
            linewidth=1.0,
            label=r'W\&K $\epsilon=0.1$')

    #ax.legend(loc='lower right')
    ax.legend(bbox_to_anchor=(0.9, 0.15),
              loc="lower right",
              bbox_transform=fig.transFigure)
    ax.xaxis.set_tick_params(width=0.5)
    ax.yaxis.set_tick_params(width=0.5)
    ax.spines['left'].set_linewidth(0.5)
    ax.spines['bottom'].set_linewidth(0.5)
    sns.despine()

    #fig.savefig(f'mnist_test_robust_losses.pdf', bbox_inches='tight')
    fig.savefig(f'./results/robust/mnist_certificates.svg',
                bbox_inches='tight')
    plt.close(fig)
Example #5
0
    saved_filepath = ('./saved_log/' + args.proctitle)
    model_filepath = os.path.dirname('./models/' + args.proctitle)
    if not os.path.exists(saved_filepath):
        os.makedirs(saved_filepath)
    if not os.path.exists(model_filepath):
        os.makedirs(model_filepath)
    model_path = ('./models/' + args.proctitle)

    train_res = open(saved_filepath + '/train_res.txt', "w")
    test_res = open(saved_filepath + '/test_res.txt', "w")

    # load the data
    if args.prefix == "mnist":
        train_loader, _ = pblm.mnist_loaders(
            args.batch_size,
            '../data/mnist',
        )
        _, test_loader = pblm.mnist_loaders(args.batch_size_test,
                                            '../data/mnist')

    elif args.prefix == "custom_mnist":
        train_loader, _ = pblm.custom_mnist_loaders(
            batch_size=args.batch_size,
            train_path='../data/mnist/' + args.gan_type + '/train.npz',
            test_path='../data/mnist/' + args.gan_type + '/test.npz')
        _, test_loader = pblm.custom_mnist_loaders(
            batch_size=args.batch_size_test,
            train_path='../data/mnist/' + args.gan_type + '/train.npz',
            test_path='../data/mnist/' + args.gan_type + '/test.npz')

    else: