예제 #1
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.extractor_save_dir):
        os.makedirs(config.extractor_save_dir)

    # Data loader
    train_loader = get_loader(config.dataset,
                              config.data_dir,
                              batch_size=config.batch_size,
                              mode='train',
                              shuffle=True,
                              num_workers=config.num_workers)
    val_loader = get_loader(config.dataset,
                            config.data_dir,
                            batch_size=config.batch_size,
                            mode='val',
                            shuffle=False,
                            num_workers=config.num_workers)

    # Training model
    Solver = solver_selector[config.mode]
    model = get_model(config.dataset)
    solver = Solver(train_loader, val_loader, config)
    solver.train(model, config.attack_method)
예제 #2
0
def evaluate(config):
    # Data loader
    val_loader = get_loader(config.dataset,
                            config.data_dir,
                            batch_size=np.inf,
                            mode='val',
                            shuffle=False,
                            num_workers=config.num_workers)
    test_loader = get_loader(config.dataset,
                             config.data_dir,
                             batch_size=np.inf,
                             mode='test',
                             shuffle=False,
                             num_workers=config.num_workers)
    # Evaluating model
    model = get_model(config.dataset, model_save_dir=config.model_save_dir)
    evaluator = Evaluator(model, config.dataset)

    val_acc = {}
    print('Val set:')
    val_acc['cln'] = evaluator.evaluate(val_loader)
    print('Clean:', val_acc['cln'])
    val_acc['fgsm'] = evaluator.evaluate_fgsm(val_loader)
    print('FGSM:', val_acc['fgsm'])
    val_acc['pgd'] = evaluator.evaluate_pgd(val_loader, num_iter=50)
    print('PGD:', val_acc['pgd'])

    test_acc = {}
    print('Test set:')
    test_acc['cln'] = evaluator.evaluate(test_loader)
    print('Clean:', test_acc['cln'])
    test_acc['fgsm'] = evaluator.evaluate_fgsm(test_loader)
    print('FGSM:', test_acc['fgsm'])
    test_acc['pgd'] = evaluator.evaluate_pgd(test_loader, num_iter=50)
    print('PGD:', test_acc['pgd'])
    test_acc['cw'] = evaluator.evaluate_cw(test_loader)
    print('CW:', test_acc['cw'])

    test_acc['loss_sensitivity'] = evaluator.evaluate_robust(test_loader)
    print('loss_sensitivity:', test_acc['loss_sensitivity'])

    for i in [
            5, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150
    ]:
        acc_pgd = evaluator.evaluate_pgd(test_loader, num_iter=i)
        print('PGD_{}: {}'.format(i, acc_pgd))

    return val_acc, test_acc
예제 #3
0
def main(config):
    # For fast training
    cudnn.benchmark = True

    # Create directories if not exist.
    if not os.path.exists(config.log_dir):
        os.makedirs(config.log_dir)
    if not os.path.exists(config.model_save_dir):
        os.makedirs(config.model_save_dir)
    if not os.path.exists(config.extractor_save_dir):
        os.makedirs(config.extractor_save_dir)

    # Feature extractor
    extractor = FeatureExtractor(config)
    extractor.run()

    # Graph constructor
    graph = GraphConstructor(config)

    # Data loader
    train_loader = get_loader(config.dataset,
                              config.data_dir,
                              batch_size=config.batch_size,
                              mode='train',
                              shuffle=True,
                              num_workers=config.num_workers,
                              graph=graph,
                              anchor_size=config.anchor_size)
    val_loader = get_loader(config.dataset,
                            config.data_dir,
                            batch_size=config.batch_size,
                            mode='val',
                            shuffle=False,
                            num_workers=config.num_workers)

    # Training model
    Solver = Solver_ATMR
    model = get_model(config.dataset)
    solver = Solver(train_loader, val_loader, graph, config)
    solver.train(model, config.attack_method)
if os.path.exists(out_dir):
    shutil.rmtree(out_dir)

os.makedirs(out_dir + '/examples')

ckpt = net_to_ckpt[args.net]

img_nums = [[int(x.split(' ')[0].split('_')[2][:-5]), (int(x.split(' ')[1]))] for x in open('./ilsvrc_metadata/val.txt').read().split('\n') if x]
img_nums = np.array(img_nums)


img, lab = tf.placeholder(shape=[None, 224, 224, 3], dtype=tf.float32), tf.placeholder(shape=[None], dtype=tf.int32)

sess = tf.InteractiveSession()
logits, xent = loader.get_model(sess, img, lab, ckpt, 224)

g, = tf.gradients(xent, [img])

def load_img(k):
    im_, _ = loader.load_img(img_nums[k,0])
    la_ = img_nums[k,1]
    return im_, la_

def get_top5(x):
    if len(x.shape) == 3:
        x = np.expand_dims(x, 0)

    np_logits = logits.eval(feed_dict={
        img:x,
    })