Ejemplo n.º 1
0
def select_model_mnist(m):
    if m == 'large':
        print('Pick large size model')
        model = pblm.mnist_model_large().cuda()
        _, test_loader = pblm.mnist_loaders(8)
    elif m == 'wide':
        print("Using wide model with model_factor={}".format(
            args.model_factor))
        _, test_loader = pblm.mnist_loaders(64 // args.model_factor)
        model = pblm.mnist_model_wide(args.model_factor).cuda()
    elif m == 'deep':
        print("Using deep model with model_factor={}".format(
            args.model_factor))
        _, test_loader = pblm.mnist_loaders(64 // (2**args.model_factor))
        model = pblm.mnist_model_deep(args.model_factor).cuda()
    elif m == '500':
        model = pblm.mnist_500().cuda()
    elif m == 'tiny':
        model = pblm.mnist_tiny().cuda()
    else:
        model = pblm.mnist_model().cuda()
    return model
Ejemplo n.º 2
0
    #                 'epoch': t
    #             }, now_prefix + "_checkpoint.pth")
    #
    #         train_log.close()
    #         test_log.close()

    # # ----- Train M2 & M2M -----
    m2_time = time.time()

    print('----- Train M2 & M2M -----')
    setproctitle.setproctitle(args.M2_prefix)
    model_M2 = select_model(args.dataset, args.model)
    model_M2M = select_model(args.dataset, args.model)

    if args.dataset == 'MNIST':
        train_loader, _ = pblm.mnist_loaders(args.M2_batch_size)
        _, test_loader = pblm.mnist_loaders(args.M2_test_batch_size)
    elif args.dataset == 'CIFAR10':
        train_loader, _ = pblm.cifar_loaders(args.M2_batch_size)
        _, test_loader = pblm.cifar_loaders(args.M2_test_batch_size)

    eps_schedule = np.linspace(args.starting_epsilon, args.epsilon,
                               args.schedule_length)

    if args.opt == 'adam':
        opt = optim.Adam(model_M2.parameters(), lr=args.lr)
    elif args.opt == 'sgd':
        opt = optim.SGD(model_M2.parameters(),
                        lr=args.lr,
                        momentum=args.momentum,
                        weight_decay=args.weight_decay)
from examples.trainer import *
import examples.problems as pblm
import setproctitle

if __name__ == '__main__':
    args = pblm.argparser(prefix='mnist',
                          method='task_spec_robust',
                          opt='adam',
                          starting_epsilon=0.05,
                          epsilon=0.2)
    kwargs = pblm.args2kwargs(args)
    setproctitle.setproctitle('python')

    # train-validation split
    _, _, test_loader = pblm.mnist_loaders(batch_size=args.batch_size,
                                           path='./data',
                                           ratio=args.ratio,
                                           seed=args.seed)

    model = pblm.mnist_model().cuda()
    num_classes = model[-1].out_features

    # specify the task and the corresponding class semantic
    folder_path = os.path.dirname(args.proctitle)
    if args.type == 'binary':
        input_mat = np.zeros((num_classes, num_classes), dtype=np.int)
        if args.category == 'single_seed':
            seed_clas = 9
            input_mat[seed_clas, :] = np.ones(num_classes)
            input_mat[seed_clas, seed_clas] = 0
            folder_path += '/class_' + str(seed_clas)
        else:
Ejemplo n.º 4
0
                nn.ReLU(),
                nn.Linear(1024, 10)
            )
            return net
    elif dataset == 'CIFAR10':
        if model == 'small':
            return pblm.cifar_model()
        elif model == 'large':
            return pblm.cifar_model_large()
        elif model == 'resnet':
            return pblm.cifar_model_resnet(1, 1)


if __name__ == '__main__':
    args = parse_args()

    model = select_model(args.dataset, args.model)

    for fname in os.listdir('final_models'):
        if fname.endswith('.pth') and fname.startswith('%s_%s_%s_' % (args.dataset, args.epsilon, args.model)):
            print('Find weights in path: final_models/%s' % fname)
            model.load_state_dict(torch.load('final_models/' + fname)['state_dict'])
            model = model.cuda()
            if args.dataset == 'MNIST':
                _, test_loader = pblm.mnist_loaders(1)
            else:
                _, test_loader = pblm.cifar_loaders(1)
            res_log = open('linear_bound_tmp.txt', 'a')
            evaluate_robust(test_loader, model, float(args.epsilon), 100, res_log, 20)

Ejemplo n.º 5
0
            _psbl_path = args.M1_prefix + '_best.pth'
        elif m_type == 'M1P':
            _psbl_path = args.M1P_prefix + '_best.pth'
        elif m_type == 'M2P':
            _psbl_path = args.M2P_prefix + '_best.pth'
        elif m_type == 'M2PM':
            _psbl_path = args.M2PM_prefix + '_best.pth'
        elif m_type == 'M2':
            _psbl_path = args.M2_prefix + '_best.pth'
        elif m_type == 'M2M':
            _psbl_path = args.M2_prefix + '_mutual_model_best.pth'
        if os.path.exists(_psbl_path):
            model[m_type] = select_model(args.model)
            model[m_type].load_state_dict(torch.load(_psbl_path)['state_dict'])

    _, test_loader = pblm.mnist_loaders(params['batch_size'])
    _, robust_test_loader = pblm.mnist_loaders(params['robust_batch_size'])
    _, robust_transfer_test_loader = pblm.mnist_loaders(
        params['robust_transfer_batch_size'])
    _, actual_test_loader = pblm.mnist_loaders(
        params['actual_attack_batch_size'])

    res_log = open(args.prefix + '_test.txt', "w")
    setproctitle.setproctitle('test_clean_accuracy')
    print('Clean accuracy')
    print('Clean accuracy', file=res_log)
    for k in model:
        losses, errors = evaluate(test_loader, model[k], None, res_log, 10)
        print('\n')
        print(k, 'loss', losses, 'error', errors)
        print(k, 'loss', losses, 'error', errors, file=res_log)
Ejemplo n.º 6
0
def get_mnist_test_loader():
    train_loader, valid_loader, test_loader = pblm.mnist_loaders(
        batch_size=BATCH_SIZE, path='../data', ratio=VAL_RATIO, seed=SEED)
    return test_loader