Esempio n. 1
0
        neg = layer_values_Ap[layer_name]

        triplet_loss_data_B_Bp_A[layer_name] = triplet_loss_dict(
            anchor, pos, neg, triplet_loss_type, regularize_lambda,
            triplet_loss_margins[layer_name]['B_Bp_A'])

model_var_B_Bp_A = x_Anat, y_Ainput, x_Bnat, layer_values_A[
    'x4'], layer_values_A['pre_softmax'], layer_values_B['x4'], layer_values_B[
        'pre_softmax'], is_training
model_var_attack = x_Aadv, a_Axent, y_Ainput, is_training, a_Aaccuracy
# model_var = n_Anum_correct, n_Axent, a_Anum_correct, a_Axent, x_Anat, x_Aadv, y_Ainput, is_training

model_var = n_Anum_correct, n_Axent, x_Anat, y_Ainput, is_training, n_Apredict

saver = tf.train.Saver(max_to_keep=3)
var_main_encoder = trainable_in('main_encoder')

if is_finetune:
    print('finetuning')
    if dataset_type == 'imagenet':
        # restore_var_list = slim.get_variables_to_restore(exclude=tf.get_collection(tf.GraphKeys.MODEL_VARIABLES, scope='logits'))
        var_main_encoder_var = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
                                                 scope='main_encoder')
        restore_var_list = remove_duplicate_node_from_list(
            var_main_encoder, var_main_encoder_var)
    else:
        var_main_encoder_var = tf.get_collection(tf.GraphKeys.MODEL_VARIABLES,
                                                 scope='main_encoder')
        restore_var_list = remove_duplicate_node_from_list(
            var_main_encoder, var_main_encoder_var)
def one_test(dataset_type,
             model_load_direction,
             attack_steps,
             attack_step_size,
             loss_func,
             rand_start=1,
             use_rand=True,
             model_name=None,
             momentum=0.0,
             save_filename=None,
             black_attack=False,
             vis_lossland_scape=False,
             model_type='ConvNet'):

    # dataset_type = 'cifar10'
    # model_load_direction = 'models/model_0_renamed'
    # model_load_direction = '/mnt/md0/FSRobust/cifar_models/triplet/switch_adv_only/cifar10,A_Ap_B,A1_Ap_B_1,'

    # model_load_direction ='/mnt/md0/FSRobust/cifar_models/triplet/April15/switch_adv_only_hardneg_mar0.03_lam10/cifar10,A_Ap_B,A1_Ap_B_1,'
    # model_load_direction = '/mnt/md0/FSRobust/cifar_models/triplet/backup/ml2_only/cifar10,A_Ap_B,A1_Ap_B_1,_0'

    # dataset_type = 'mnist'
    # model_load_direction = 'mnist_models/reproduce-secret'
    # model_load_direction = '/mnt/md0/FSRobust/mnist_models/April2/new_schedule_multilayer/mnist,A_Ap_B,A1_Ap_B_1,' #93.21%

    # model_load_direction = '/mnt/md0/FSRobust/mnist_models/April2/ml2_only_train_both/mnist,A_Ap_B,A1_Ap_B_1,'  #ALP l2

    precision = tf.float32

    model = None
    input_shape = None

    if dataset_type == 'cifar10':
        input_shape = [None, 32, 32, 3]
        with open('config_cifar.json') as config_file:
            config = json.load(config_file)
        data_path = config['data_path']

        from learning.model_vanilla import ModelVani
        if model_type == 'Res20':
            from learning.model_cifar10_resnet import CifarResNet
            model = CifarResNet(precision=precision,
                                ratio=config['mask_ratio'])
        elif model_type == 'ConvNet':
            from learning.convnet_cifar import CifarConvNet
            model = CifarConvNet(precision=precision,
                                 ratio=config['mask_ratio'])
        elif model_type == 'Res50':
            from learning.model_cifar10_resnet import CifarResNet
            model = CifarResNet(precision=precision,
                                ratio=config['mask_ratio'],
                                mode='50')
        elif model_type == 'Res101':
            from learning.model_cifar10_res101 import CifarResNetUpdate
            model = CifarResNetUpdate(precision=precision,
                                      ratio=config['mask_ratio'],
                                      mode='101')
        else:
            model = ModelVani(precision=precision)

        raw_dataset = dataloader.cifar10_input.CIFAR10Data(data_path)

    elif dataset_type == 'mnist':
        with open('config_mnist.json') as config_file:
            config = json.load(config_file)
        input_shape = [None, 28, 28]
        if config['model_type'] == 'MLP':
            from learning.model_mnist_mlp import ModelMNISTMLP
            model = ModelMNISTMLP(precision=precision,
                                  ratio=config['mask_ratio'])
        else:
            model = ModelMNIST(precision=precision)

        data_path = config['data_path']
        raw_dataset = dataloader.mnist_input.MNISTData(data_path,
                                                       dataset=dataset_type)

    elif dataset_type == 'imagenet':
        with open('config_imagenet.json') as config_file:
            config = json.load(config_file)

        # config["epsilon"] = config["epsilon"] / 255.0
        # config["step_size"] = config["step_size"] / 255.0

        input_shape = [None, 64, 64, 3]
        raw_dataset = dataloader.mnist_input.MNISTData(
            config['tiny_imagenet_data_dir_np'], dataset="imagenet")
        if black_attack:
            model_type = model_type
        else:
            model_type = config['model_type']

        if model_type == 'Res20':
            from learning.model_imagenet_res20 import ModelImagenet
            model = ModelImagenet(batch_size=None,
                                  precision=precision,
                                  label_smoothing=0.1)
        elif model_type == 'Res50':
            from learning.model_imagenet_res50 import ModelImagenet
            model = ModelImagenet(batch_size=None,
                                  precision=precision,
                                  label_smoothing=0.1)

    elif dataset_type == 'imagenet_01':
        with open('config_imagenet.json') as config_file:
            config = json.load(config_file)
        input_shape = [None, 64, 64, 3]
        raw_dataset = dataloader.mnist_input.MNISTData(
            config['tiny_imagenet_data_dir_np'], dataset="imagenet")

        if model_name.startswith('res101'):
            from learning.model_imagenet_res101 import ModelImagenet
            model = ModelImagenet(0)
            config["epsilon"] = config["epsilon"] / 255.0
        elif model_name.startswith('res50'):
            from learning.model_imagenet_res50 import ModelImagenet
            model = ModelImagenet(0)
            config["epsilon"] = config["epsilon"] / 255.0

    x_Anat = tf.placeholder(precision, shape=input_shape)
    y_Ainput = tf.placeholder(tf.int64, shape=None)
    is_training = tf.placeholder(tf.bool, shape=None)

    layer_values_A, n_Axent, n_Amean_xent, _, n_Anum_correct, n_Aaccuracy, _, n_mask = model._encoder(
        x_Anat, y_Ainput, is_training)
    xent_loss = model.y_xent

    model_var_attack = x_Anat, n_Axent, y_Ainput, is_training, n_Aaccuracy
    var_main_encoder = trainable_in('main_encoder')

    print("mode dir", model_load_direction)

    if vis_lossland_scape:
        from vis_loss_landscape import visualize_landscape
        visualize_landscape(model_load_direction, model_var_attack,
                            var_main_encoder, config, raw_dataset.eval_data,
                            config["epsilon"], 300, xent_loss, dataset_type)

    elif black_attack:
        test_black_attack(model_load_direction,
                          model_var_attack,
                          n_Anum_correct,
                          var_main_encoder,
                          attack_steps,
                          attack_step_size,
                          config,
                          dataset_type,
                          raw_dataset,
                          n_mask,
                          layer_values_A['x4'],
                          layer_values_A['pre_softmax'],
                          loss_func,
                          rand_start,
                          use_rand=use_rand,
                          momentum=momentum,
                          load_filename=save_filename)
    else:
        test_model(model_load_direction,
                   model_var_attack,
                   n_Anum_correct,
                   var_main_encoder,
                   attack_steps,
                   attack_step_size,
                   config,
                   dataset_type,
                   raw_dataset,
                   n_mask,
                   layer_values_A['x4'],
                   layer_values_A['pre_softmax'],
                   loss_func,
                   rand_start,
                   use_rand=use_rand,
                   momentum=momentum,
                   save_filename=save_filename)

    print("mode dir", model_load_direction, 'rand start', rand_start,
          'loss_func', loss_func, 'step num', attack_steps, 'step size',
          attack_step_size, save_filename)