import waitGPU
waitGPU.wait(utilization=20, available_memory=10000, interval=10)

import problems as pblm
from trainer import *
import setproctitle

if __name__ == "__main__":
    args = pblm.argparser(prefix='mnist',
                          method='task_spec_robust',
                          opt='adam',
                          starting_epsilon=0.05,
                          epsilon=0.2,
                          thres=0.035)
    kwargs = pblm.args2kwargs(args)
    setproctitle.setproctitle('python')
    print("threshold for classification error: {:.1%}".format(args.thres))

    print('Matrix type: {0}\t\t'
          'Category: {1}\t\t'
          'Epoch number: {2}\t\t'
          'Targeted epsilon: {3}\t\t'
          'Starting epsilon: {4}\t\t'
          'Sechduled length: {5}'.format(args.type, args.category, args.epochs,
                                         args.epsilon, args.starting_epsilon,
                                         args.schedule_length),
          end='\n')
    if args.l1_proj is not None:
        print('Projection vectors: {0}\t\t'
              'Train estimation: {1}\t\t'
              'Test estimation: {2}'.format(args.l1_proj, args.l1_train,
Ejemplo n.º 2
0
if __name__ == "__main__":
    args = pblm.argparser(opt='adam', verbose=200, starting_epsilon=0.01)
    print("saving file to {}".format(args.prefix))
    setproctitle.setproctitle(args.prefix)
    train_log = open(args.prefix + "_train.log", "w")
    test_log = open(args.prefix + "_test.log", "w")

    train_loader, _ = pblm.mnist_loaders(args.batch_size)
    _, test_loader = pblm.mnist_loaders(args.test_batch_size)

    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    for X, y in train_loader:
        break
    kwargs = pblm.args2kwargs(args, X=Variable(X.cuda()))
    best_err = 1

    sampler_indices = []
    model = [select_model(args.model)]

    for _ in range(0, args.cascade):
        if _ > 0:
            # reduce dataset to just uncertified examples
            print("Reducing dataset...")
            train_loader = sampler_robust_cascade(train_loader,
                                                  model,
                                                  args.epsilon,
                                                  args.test_batch_size,
                                                  norm_type=args.norm_test,
                                                  bounded_input=True,
Ejemplo n.º 3
0
    args = pblm.argparser(opt='adam', verbose=200, starting_epsilon=0.01)
    print("saving file to {}".format(args.prefix))
    setproctitle.setproctitle(args.prefix)
    train_log = open(args.prefix + "_train.log", "w")
    test_log = open(args.prefix + "_test.log", "w")

    train_loader, _ = pblm.mnist_loaders(args.batch_size)
    _, test_loader = pblm.mnist_loaders(args.test_batch_size)

    torch.manual_seed(args.seed)
    torch.manual_seed(args.seed)

    for X, y in train_loader:
        break
    kwargs = pblm.args2kwargs(args, X=Variable(X.to(device)))
    best_err = 1

    sampler_indices = []
    model = [select_model(args.model)]

    for _ in range(0, args.cascade):
        if _ > 0:
            # reduce dataset to just uncertified examples
            print("Reducing dataset...")
            train_loader = sampler_robust_cascade(train_loader,
                                                  model,
                                                  args.epsilon,
                                                  args.test_batch_size,
                                                  norm_type=args.norm_test,
                                                  bounded_input=True,