Example #1
0
def SAFA(x_sat, x_grd, keep_prob, dimension, trainable):

    vgg_grd = VGG16()
    grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_local = tf.nn.max_pool(grd_local, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
    batch, g_height, g_width, channel = grd_local.get_shape().as_list()

    grd_w = spatial_aware(grd_local, dimension, trainable, name='spatial_grd')
    grd_local = tf.reshape(grd_local, [-1, g_height * g_width, channel])

    grd_global = tf.einsum('bic, bid -> bdc', grd_local, grd_w)
    grd_global = tf.reshape(grd_global, [-1, dimension*channel])


    vgg_sat = VGG16()
    sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_local = tf.nn.max_pool(sat_local, [1, 2, 2, 1], [1, 2, 2, 1], padding='SAME')
    batch, s_height, s_width, channel = sat_local.get_shape().as_list()

    sat_w = spatial_aware(sat_local, dimension, trainable, name='spatial_sat')
    sat_local = tf.reshape(sat_local, [-1, s_height * s_width, channel])

    sat_global = tf.einsum('bic, bid -> bdc', sat_local, sat_w)
    sat_global = tf.reshape(sat_global, [-1, dimension*channel])

    return tf.nn.l2_normalize(sat_global, dim=1), tf.nn.l2_normalize(grd_global, dim=1)
def two_stream_baseline(x_sat, x_grd, keep_prob, trainable):
    with tf.device('/gpu:1'):
        vgg_grd = VGG16()
        grd_local = vgg_grd.eight_layer_conv_multiscale(
            x_grd, keep_prob, trainable, 'VGG_grd')

        vgg_sat = VGG16()
        sat_local = vgg_sat.eight_layer_conv_multiscale(
            x_sat, keep_prob, trainable, 'VGG_sat')

    with tf.device('/gpu:0'):
        fc = Siamese_FC()
        sat_global, grd_global = fc.my_siamese_fc_multiscale(
            sat_local, grd_local, trainable, 'dim_reduction')
    return sat_global, grd_global
Example #3
0
def attack_all():
    if args.model == 'resnet':
        model = ResNet50(enable_lat =args.enable_lat,
                         epsilon =args.lat_epsilon,
                         pro_num =args.lat_pronum,
                         batch_size =args.model_batchsize,
                         num_classes = 10,
                         if_dropout=args.dropout
                        )
    elif args.model == 'vgg':
        model = VGG16(enable_lat=args.enable_lat,
                      epsilon=args.lat_epsilon,
                      pro_num=args.lat_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout
                      )
    elif args.model == 'resnet18':
        model = ResNet18(enable_lat=args.enable_lat,
                      epsilon=args.lat_epsilon,
                      pro_num=args.lat_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout
                      )
    model.cuda()
    model.load_state_dict(torch.load((args.modelpath)))
    
    for eps in range(4,16+1):
        test_data_cln, test_data_adv, test_label, test_label_adv = attack_one(model,eps)
        if args.generate:
            save_data_label(args.savepath, eps, test_data_cln, test_data_adv,test_label, test_label_adv)
Example #4
0
def cvm_net_I(x_sat, x_grd, coords_geo, keep_prob, trainable):
    with tf.device('/gpu:1'):
        # local descriptors / local feature extraction; use only the convolution layers (until conv_5) of vgg for this
        vgg_grd = VGG16()
        # output of conv_5 layer; the input dimension and output dimension of conv2 layer (512 x 512)
        grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, False, 'VGG_grd')
        with tf.variable_scope('netvlad_grd', reuse=tf.AUTO_REUSE):
            # embed netvlad on each cnn branch to get global descriptor

            # constructor!
            # feature size is the depth of the final convolution layer
            # grd local
            netvlad_grd = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(grd_local)[1] *
                                     tf.shape(grd_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            # forward prop; give input of conv_5 layer
            # output is the batch*(vlad vector) i.e 12 * [(K*D) x 1] (64*512) x 1
            grd_vlad = netvlad_grd.forward(grd_local)

        vgg_sat = VGG16()
        # local satellite descriptors ;
        sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, False, 'VGG_sat')
        with tf.variable_scope('netvlad_sat', reuse=tf.AUTO_REUSE):
            # shape of sat_local is batch_sz x 14 x 14 x 512 (i guess)
            # so max samples is 14*14 = 196? is this equal to N?
            # is cluster size equal to K or N?
            netvlad_sat = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(sat_local)[1] *
                                     tf.shape(sat_local)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            sat_vlad = netvlad_sat.forward(sat_local)

    with tf.device('/gpu:0'):
        fc = Siamese_FC()
        sat_global, grd_global = fc.siamese_fc(sat_vlad, grd_vlad, trainable,
                                               'dim_reduction')
        # grd_global = fc.new_fc_layer(sat_global, coords_geo)
    return sat_global, grd_global
Example #5
0
def LPN_AWARE_HEATMAP(x_sat, x_grd, keep_prob, dimension, trainable,
                      multi_loss):
    vgg_grd = VGG16()
    grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_local = tf.nn.max_pool(grd_local, [1, 2, 2, 1], [1, 2, 2, 1],
                               padding='SAME')  #b*4*20*512
    grd_local_v = grd_local
    grd_local = get_block_feature(grd_local, dimension)  #batch*4*6*512*8
    batch, g_height, g_width, channel, dimension = grd_local.get_shape(
    ).as_list()
    # print(grd_local)
    grd_w = block_spatial_aware(grd_local,
                                dimension,
                                trainable,
                                name='b_spatial_grd')  #b*24*8
    grd_local = tf.reshape(grd_local,
                           [-1, g_height * g_width, channel, dimension])

    grd_global_ = tf.einsum('bicd, bid -> bdc', grd_local, grd_w)  #b*8*512
    grd_global = tf.reshape(grd_global_, [-1, dimension * channel])
    # print('grd_block: ', grd_block.shape)

    vgg_sat = VGG16()
    sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_local = tf.nn.max_pool(sat_local, [1, 2, 2, 1], [1, 2, 2, 1],
                               padding='SAME')
    sat_local = get_block_feature(sat_local, dimension)  #batch*4*6*512*8
    batch, g_height, g_width, channel, dimension = sat_local.get_shape(
    ).as_list()

    sat_w = block_spatial_aware(sat_local,
                                dimension,
                                trainable,
                                name='b_spatial_sat')  #b*24*8
    sat_local = tf.reshape(sat_local,
                           [-1, g_height * g_width, channel, dimension])

    sat_global_ = tf.einsum('bicd, bid -> bdc', sat_local, sat_w)  #b*8*512
    sat_global = tf.reshape(sat_global_, [-1, dimension * channel])
    # print('sat_block: ', sat_block.shape)
    if multi_loss:
        return grd_local_v
    else:
        return tf.nn.l2_normalize(sat_global,
                                  dim=1), tf.nn.l2_normalize(grd_global, dim=1)
Example #6
0
def main():
    if args.model == 'vgg':
        model = VGG16(enable_lat=args.enable_lat,
                      epsilon=args.anp_epsilon,
                      pro_num=args.anp_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout)
    model.cuda()
    model.load_state_dict(torch.load((args.modelpath)))
    # if cifar then normalize epsilon from [0,255] to [0,1]

    if args.dataset == 'mnist':
        eps = args.attack_epsilon
    else:
        eps = args.attack_epsilon / 255.0

    #eps = args.attack_epsilon

    attack = Attack(dataroot=args.dataroot,
                    dataset=args.dataset,
                    batch_size=args.attack_batchsize,
                    target_model=model,
                    criterion=nn.CrossEntropyLoss(),
                    epsilon=eps,
                    alpha=args.attack_alpha,
                    iteration=args.attack_iter)
    if args.attack == 'fgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.fgsm(
        )
    elif args.attack == 'ifgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm(
        )
    elif args.attack == 'stepll':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.step_ll(
        )
    elif args.attack == 'pgd':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.PGD()
    elif args.attack == 'momentum_ifgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.momentum_ifgsm(
        )
    print(test_data_adv.size(), test_label.size(), type(test_data_adv))
    #test_data, test_label, size = read_data_label('./test_data_cln.p','./test_label.p')
    #test_data_adv, test_label_adv, size = read_data_label('./test_data_cln.p','./test_label.p')
    '''
    test_loader = attack.return_data()
    dataiter = iter(test_loader)
    images,labels = dataiter.next()
    print(images[0])
    '''
    #test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm()
    #display(test_data_cln, test_data_adv, test_label, test_label_adv)
    if args.generate:
        save_data_label(args.savepath, test_data_cln, test_data_adv,
                        test_label, test_label_adv)
def joint_feat_learning(x_sat, x_grd, x_grd_gan, keep_prob, trainable):
    with tf.device('/gpu:1'):
        vgg_grd = VGG16()
        grd_local = vgg_grd.eight_layer_conv_multiscale(
            x_grd, keep_prob, trainable, 'VGG_grd')

        vgg_sat = VGG16()
        sat_local = vgg_sat.eight_layer_conv_multiscale(
            x_sat, keep_prob, trainable, 'VGG_sat')

        vgg_grd_gan = VGG16()
        grd_local_gan = vgg_grd_gan.eight_layer_conv_multiscale(
            x_grd_gan, keep_prob, trainable, 'VGG_grd_seg')

    with tf.device('/gpu:0'):
        fc = Siamese_FC()

        sat_global, grd_global, grd_global_gan = fc.three_stream_joint_feat_learning(
            sat_local, grd_local, grd_local_gan, trainable, 'dim_reduction')
    return sat_global, grd_global, grd_global_gan
Example #8
0
def VGG_gp(x_sat, x_grd, keep_prob, trainable):

    ############## VGG module #################

    vgg_grd = VGG16()
    grd_vgg = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')

    vgg_sat = VGG16()
    sat_vgg = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')

    grd_height, grd_width, grd_channel = grd_vgg.get_shape().as_list()[1:]
    grd_global = tf.nn.max_pool(grd_vgg, [1, grd_height, grd_width, 1], [1, 1, 1, 1], padding='VALID')
    grd_global = tf.reshape(grd_global, [-1, grd_channel])

    sat_height, sat_width, sat_channel = sat_vgg.get_shape().as_list()[1:]
    sat_global = tf.nn.max_pool(sat_vgg, [1, sat_height, sat_width, 1], [1, 1, 1, 1], padding='VALID')
    sat_global = tf.reshape(sat_global, [-1, sat_channel])


    return tf.nn.l2_normalize(sat_global, dim=1), tf.nn.l2_normalize(grd_global, dim=1)
Example #9
0
def VGG_13_conv_v2_cir(x_sat, x_grd, keep_prob, trainable):
    vgg_grd = VGG16(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_layer13 = vgg_grd.layer13_output
    grd_vgg = vgg_grd.conv2(grd_layer13, 'grd', dimension=16)
    grd_vgg = tf.nn.l2_normalize(grd_vgg, axis=[1, 2, 3])

    vgg_sat = VGG16_cir(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_layer13 = vgg_sat.layer13_output
    sat_vgg = vgg_sat.conv2(sat_layer13, 'sat', dimension=16)

    sat_matrix, distance, pred_orien = corr_crop_distance(sat_vgg, grd_vgg)

    return sat_vgg, grd_vgg, distance, pred_orien
Example #10
0
def cvm_net_II(x_sat, x_grd, keep_prob, trainable):
    with tf.device('/gpu:1'):
        vgg_grd = VGG16()
        grd_local = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')

        vgg_sat = VGG16()
        sat_local = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')

        transnet = TransNet()
        trans_sat, trans_grd = transnet.transform(sat_local, grd_local,
                                                  keep_prob, trainable,
                                                  'transformation')

        with tf.variable_scope('netvlad') as scope:
            netvlad_sat = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(trans_sat)[1] *
                                     tf.shape(trans_sat)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            sat_global = netvlad_sat.forward(trans_sat, True)

            scope.reuse_variables()

            netvlad_grd = lp.NetVLAD(feature_size=512,
                                     max_samples=tf.shape(trans_grd)[1] *
                                     tf.shape(trans_grd)[2],
                                     cluster_size=64,
                                     output_dim=4096,
                                     gating=True,
                                     add_batch_norm=False,
                                     is_training=trainable)
            grd_global = netvlad_grd.forward(trans_grd, True)

    return sat_global, grd_global
Example #11
0
def noise():
    distortion_name = [
        'gaussian_noise', 'shot_noise', 'impulse_noise', 'speckle_noise',
        'gaussian_blur', 'defocus_blur', 'glass_blur', 'snow', 'frost', 'fog',
        'brightness', 'contrast', 'elastic_transform', 'motion_blur',
        'zoom_blur', 'pixelate', 'jpeg_compression', 'spatter', 'saturate'
    ]
    label_name = 'labels.npy'

    #load model
    net = VGG16(enable_lat=False,
                epsilon=0,
                pro_num=1,
                batch_size=args.batchsize_noise,
                if_dropout=True)
    net.cuda()
    net.load_state_dict(torch.load(args.model_path_noise))

    label_root = args.distortion_root + label_name
    y = np.load(label_root)
    clean_loader = get_clean_loader(y)

    #error_rates = []
    for i in range(len(distortion_name)):
        data_root = args.distortion_root + distortion_name[i] + '.npy'
        label_root = args.distortion_root + label_name
        #load data
        x = np.load(data_root)
        #print(x.shape)
        #for j in range(0,10):
        #    plt.imsave('./cifar-c/{}_{}.png'.format(distortion_name[i],j), x[j])
        x = x.transpose((0, 3, 1, 2))
        x = x / 255.0
        y = np.load(label_root)
        #data_loader
        test_loader = get_test_loader(x, y)
        test_model(net, clean_loader, test_loader, distortion_name[i],
                   args.model_name)
    '''
Example #12
0
def CVFT(x_sat, x_grd, keep_prob, trainable):
    def conv_layer(x,
                   kernel_dim,
                   input_dim,
                   output_dim,
                   stride,
                   trainable,
                   activated,
                   name='ot_conv',
                   activation_function=tf.nn.relu):
        with tf.variable_scope(name,
                               reuse=tf.AUTO_REUSE):  # reuse=tf.AUTO_REUSE
            weight = tf.get_variable(
                name='weights',
                shape=[kernel_dim, kernel_dim, input_dim, output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.get_variable(
                name='biases',
                shape=[output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())

            out = tf.nn.conv2d(
                x, weight, strides=[1, stride, stride, 1
                                    ], padding='SAME') + bias

            if activated:
                out = activation_function(out)

            return out

    def fc_layer(x, trainable, name='ot_fc'):
        height, width, channel = x.get_shape().as_list()[1:]
        assert channel == 1
        in_dimension = height * width
        out_dimension = in_dimension**2

        input_feature = tf.reshape(x, [-1, height * width])

        with tf.variable_scope(name):
            weight = tf.get_variable(
                name='weights',
                shape=[in_dimension, out_dimension],
                trainable=trainable,
                initializer=tf.truncated_normal_initializer(mean=0.0,
                                                            stddev=0.005),
                regularizer=tf.contrib.layers.l2_regularizer(0.01))
            bias = tf.get_variable(name='biases',
                                   shape=[out_dimension],
                                   trainable=trainable,
                                   initializer=tf.constant_initializer(
                                       np.eye(in_dimension).reshape(
                                           in_dimension**2)))
            out = tf.matmul(input_feature, weight) + bias

            out = tf.reshape(out, [-1, in_dimension, in_dimension])

        return out

    def ot(input_feature, trainable, name='ot'):
        height, width, channel = input_feature.get_shape().as_list()[1:]
        conv_feature = conv_layer(input_feature,
                                  kernel_dim=1,
                                  input_dim=channel,
                                  output_dim=1,
                                  stride=1,
                                  trainable=trainable,
                                  activated=True,
                                  name=name + 'ot_conv')
        fc_feature = fc_layer(conv_feature, trainable, name=name + 'ot_fc')
        ot_matrix = sinkhorn(fc_feature * (-100.))

        return ot_matrix

    def apply_ot(input_feature, ot_matrix):

        height, width, channel = input_feature.get_shape().as_list()[1:]
        in_dimension = ot_matrix.get_shape().as_list()[1]

        reshape_input = tf.transpose(
            tf.reshape(input_feature, [-1, in_dimension, channel]), [0, 2, 1])
        # shape = [batch, channel, in_dimension]

        out = tf.einsum('bci, bio -> bco', reshape_input, ot_matrix)
        output_feature = tf.reshape(tf.transpose(out, [0, 2, 1]),
                                    [-1, height, width, channel])

        return output_feature

    ############## VGG module #################

    vgg_grd = VGG16()
    grd_vgg = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_vgg = conv_layer(grd_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='grd_conv')

    vgg_sat = VGG16()
    sat_vgg = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_vgg = conv_layer(sat_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='sat_conv')

    ############## resize #################
    height, width, channel = sat_vgg.get_shape().as_list()[1:]

    grd_vgg = tf.image.resize_bilinear(grd_vgg, [height, width])

    ############## OT module ######################

    ot_matrix_grd_branch = ot(grd_vgg, trainable, name='ot_grd_branch')
    grd_ot = apply_ot(grd_vgg, ot_matrix_grd_branch)

    sat_ot = sat_vgg

    ################# reshape ###################

    grd_height, grd_width, grd_channel = grd_ot.get_shape().as_list()[1:]
    grd_global = tf.reshape(grd_ot, [-1, grd_height * grd_width * grd_channel])

    sat_height, sat_width, sat_channel = sat_ot.get_shape().as_list()[1:]
    sat_global = tf.reshape(sat_ot, [-1, sat_height * sat_width * sat_channel])

    return tf.nn.l2_normalize(sat_global,
                              dim=1), tf.nn.l2_normalize(grd_global, dim=1)
Example #13
0
def CVFT_LPN(x_sat, x_grd, keep_prob, trainable, block, multi_loss):
    def conv_layer(x,
                   kernel_dim,
                   input_dim,
                   output_dim,
                   stride,
                   trainable,
                   activated,
                   name='ot_conv',
                   activation_function=tf.nn.relu):
        with tf.variable_scope(name,
                               reuse=tf.AUTO_REUSE):  # reuse=tf.AUTO_REUSE
            weight = tf.get_variable(
                name='weights',
                shape=[kernel_dim, kernel_dim, input_dim, output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.get_variable(
                name='biases',
                shape=[output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())

            out = tf.nn.conv2d(
                x, weight, strides=[1, stride, stride, 1
                                    ], padding='SAME') + bias

            if activated:
                out = activation_function(out)

            return out

    def fc_layer(x, trainable, name='ot_fc'):
        height, width, channel = x.get_shape().as_list()[1:]
        assert channel == 1
        in_dimension = height * width
        out_dimension = in_dimension**2

        input_feature = tf.reshape(x, [-1, height * width])

        with tf.variable_scope(name):
            weight = tf.get_variable(
                name='weights',
                shape=[in_dimension, out_dimension],
                trainable=trainable,
                initializer=tf.truncated_normal_initializer(mean=0.0,
                                                            stddev=0.005),
                regularizer=tf.contrib.layers.l2_regularizer(0.01))
            bias = tf.get_variable(name='biases',
                                   shape=[out_dimension],
                                   trainable=trainable,
                                   initializer=tf.constant_initializer(
                                       np.eye(in_dimension).reshape(
                                           in_dimension**2)))
            out = tf.matmul(input_feature, weight) + bias

            out = tf.reshape(out, [-1, in_dimension, in_dimension])

        return out

    def ot(input_feature, trainable, name='ot'):
        height, width, channel = input_feature.get_shape().as_list()[1:]
        conv_feature = conv_layer(input_feature,
                                  kernel_dim=1,
                                  input_dim=channel,
                                  output_dim=1,
                                  stride=1,
                                  trainable=trainable,
                                  activated=True,
                                  name=name + 'ot_conv')
        fc_feature = fc_layer(conv_feature, trainable, name=name + 'ot_fc')
        ot_matrix = sinkhorn(fc_feature * (-100.))

        return ot_matrix

    def apply_ot(input_feature, ot_matrix):

        height, width, channel = input_feature.get_shape().as_list()[1:]
        in_dimension = ot_matrix.get_shape().as_list()[1]

        reshape_input = tf.transpose(
            tf.reshape(input_feature, [-1, in_dimension, channel]), [0, 2, 1])
        # shape = [batch, channel, in_dimension]

        out = tf.einsum('bci, bio -> bco', reshape_input, ot_matrix)
        output_feature = tf.reshape(tf.transpose(out, [0, 2, 1]),
                                    [-1, height, width, channel])

        return output_feature

    # column partition
    def get_block_feature(input_feature, block):
        batch, height, width, channel = input_feature.get_shape().as_list()
        dim = height * width * channel
        sw = np.floor(width / block).astype(np.int32)  #stride
        kw = width - (block - 1) * sw  #kernel
        f_bs = []
        for i in range(block):
            f_b = input_feature[:, :, i * sw:i * sw + kw, :]
            f_bs.append(f_b)
        block_feature = tf.stack(f_bs, axis=4)  #batch*8*1*64*8
        h, w, c, b = block_feature.get_shape().as_list()[1:]
        block_feature_ = tf.reshape(block_feature, [-1, h * w * c, block])
        # print('block_feature_shape', block_feature_)
        return block_feature_

    # row partition
    # def get_block_feature(input_feature, block):
    #     batch, height, width, channel = input_feature.get_shape().as_list()
    #     dim = height * width * channel
    #     sh = np.floor(height / block).astype(np.int32) #stride
    #     kh = height - (block - 1) * sh #kernel
    #     f_bs = []
    #     for i in range(block):
    #         f_b = input_feature[:,i*sh:i*sh+kh,:,:]
    #         f_bs.append(f_b)
    #     block_feature = tf.stack(f_bs,axis=4)   #batch*1*8*64*8
    #     h, w, c, b = block_feature.get_shape().as_list()[1:]
    #     block_feature_ = tf.reshape(block_feature, [-1, h*w*c, block])
    #     # print('block_feature_shape', block_feature_)
    #     return block_feature_
    ############## VGG module #################

    vgg_grd = VGG16()
    grd_vgg = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_vgg = conv_layer(grd_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='grd_conv')

    vgg_sat = VGG16()
    sat_vgg = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_vgg = conv_layer(sat_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='sat_conv')

    ############## resize #################
    height, width, channel = sat_vgg.get_shape().as_list()[1:]

    grd_vgg = tf.image.resize_bilinear(grd_vgg, [height, width])

    ############## OT module ######################

    ot_matrix_grd_branch = ot(grd_vgg, trainable, name='ot_grd_branch')
    grd_ot = apply_ot(grd_vgg, ot_matrix_grd_branch)

    sat_ot = sat_vgg

    ################# reshape ###################
    if multi_loss:
        sat_global_ = get_block_feature(sat_ot, block)
        grd_global_ = get_block_feature(grd_ot, block)
    else:
        grd_height, grd_width, grd_channel = grd_ot.get_shape().as_list()[1:]
        grd_global = tf.reshape(grd_ot,
                                [-1, grd_height * grd_width * grd_channel])

        sat_height, sat_width, sat_channel = sat_ot.get_shape().as_list()[1:]
        sat_global = tf.reshape(sat_ot,
                                [-1, sat_height * sat_width * sat_channel])

    if multi_loss:
        return tf.nn.l2_normalize(sat_global_,
                                  dim=1), tf.nn.l2_normalize(grd_global_,
                                                             dim=1)
    else:
        return tf.nn.l2_normalize(sat_global,
                                  dim=1), tf.nn.l2_normalize(grd_global, dim=1)
Example #14
0
def VGG_conv(x_sat, x_grd, keep_prob, trainable):
    def conv_layer(x,
                   kernel_dim,
                   input_dim,
                   output_dim,
                   stride,
                   trainable,
                   activated,
                   name='ot_conv',
                   activation_function=tf.nn.relu):
        with tf.variable_scope(name,
                               reuse=tf.AUTO_REUSE):  # reuse=tf.AUTO_REUSE
            weight = tf.get_variable(
                name='weights',
                shape=[kernel_dim, kernel_dim, input_dim, output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())
            bias = tf.get_variable(
                name='biases',
                shape=[output_dim],
                trainable=trainable,
                initializer=tf.contrib.layers.xavier_initializer())

            out = tf.nn.conv2d(
                x, weight, strides=[1, stride, stride, 1
                                    ], padding='SAME') + bias

            if activated:
                out = activation_function(out)

            return out

    ############## VGG module #################

    vgg_grd = VGG16()
    grd_vgg = vgg_grd.VGG16_conv(x_grd, keep_prob, trainable, 'VGG_grd')
    grd_vgg = conv_layer(grd_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='grd_conv')

    vgg_sat = VGG16()
    sat_vgg = vgg_sat.VGG16_conv(x_sat, keep_prob, trainable, 'VGG_sat')
    sat_vgg = conv_layer(sat_vgg,
                         kernel_dim=3,
                         input_dim=512,
                         output_dim=64,
                         stride=2,
                         trainable=trainable,
                         activated=True,
                         name='sat_conv')

    ############## resize #################
    height, width, channel = sat_vgg.get_shape().as_list()[1:]

    grd_vgg = tf.image.resize_bilinear(grd_vgg, [height, width])

    ############## reshape #################
    grd_height, grd_width, grd_channel = grd_vgg.get_shape().as_list()[1:]
    grd_global = tf.reshape(grd_vgg,
                            [-1, grd_height * grd_width * grd_channel])

    sat_height, sat_width, sat_channel = sat_vgg.get_shape().as_list()[1:]
    sat_global = tf.reshape(sat_vgg,
                            [-1, sat_height * sat_width * sat_channel])

    return tf.nn.l2_normalize(sat_global,
                              dim=1), tf.nn.l2_normalize(grd_global, dim=1)
Example #15
0
if __name__ == "__main__":
    if args.enable_lat:
        real_model_path = args.model_path + "lat_param.pkl"
        print('loading the ANP model')
    else:
        real_model_path = args.model_path + "naive_param.pkl"
        print('loading the naive model')

    if args.test_flag:
        args.enable_lat = False

    if args.model == 'vgg':
        cnn = VGG16(enable_lat=args.enable_lat,
                    epsilon=args.epsilon,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    cnn.cuda()

    if os.path.exists(real_model_path):
        cnn.load_state_dict(torch.load(real_model_path))
        print('load model.')
    else:
        print("load failed.")

    if args.test_flag:
        test_op(cnn)
    else:
        train_op(cnn)
Example #16
0
import tensorflow as tf 

from sklearn.metrics import accuracy_score, log_loss

xTest, yTest = Cifar100(train=False)
#xTest, yTest = Cifar10(train=False)
#xTest, yTest = mnist(train=False)
_,w,h,nChannels = xTest.shape
nSamples, nClasses = yTest.shape

sess = tf.Session()

xInput = tf.placeholder(tf.float32, [None, w, h, nChannels])
yInput = tf.placeholder(tf.float32, [None, nClasses])

vgg = VGG16(xInput,0,nClasses, train=False)
logits = vgg.fc8

parameters = np.load("SGD.npz")
vgg.loadW(parameters, sess)

crossEntropy = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits_v2(
	labels=yInput, logits=logits))
predictions = tf.nn.softmax(logits)
acc = tf.equal(tf.argmax(predictions,1), tf.argmax(yInput,1))
accuracy = tf.reduce_mean(tf.cast(acc, tf.float32))

y = np.zeros((nSamples, nClasses))
batchSize = 128

step = np.ceil(nSamples/batchSize)
Example #17
0
def cal_lip(model_path, data_path):
    cleanpath = "/media/dsg3/dsgprivate/lat/test_lip/" + TYPE + "/test_data_cln.p"
    labelpath = "/media/dsg3/dsgprivate/lat/test_lip/" + TYPE + "/test_label.p"
    model = VGG16(enable_lat=ENABLE_LAT,
                  epsilon=EPS,
                  pro_num=PROG,
                  batch_size=BATCH_SIZE,
                  if_dropout=IF_DROP)
    model.cuda()

    if os.path.exists(model_path):
        model.load_state_dict(torch.load(model_path))
        #print('load model successfully.')
    else:
        print("load failed.")

    # get test_data , test_label from .p file
    clean_data, test_label, size = read_data_label(cleanpath, labelpath)
    test_data, test_label, size = read_data_label(data_path, labelpath)

    if size == 0:
        print("reading data failed.")
        return

    sel_clean = torch.Tensor(LEN, 3, 32, 32)
    sel_test = torch.Tensor(LEN, 3, 32, 32)
    sel_clean_label = torch.LongTensor(LEN)
    sel_test_label = torch.LongTensor(LEN)
    j = 0
    for i in range(START, test_label.size(0)):
        if test_label[i] == 3:
            sel_clean[j] = clean_data[i]
            sel_test[j] = test_data[i]
            sel_clean_label[j] = 3
            sel_test_label[j] = 3
            j += 1
        if j == LEN:
            break
    '''
    sel_clean = clean_data[START:START+LEN]
    sel_test = test_data[START:START+LEN]
    sel_clean_label = test_label[START:START+LEN]
    sel_test_label = test_label[START:START+LEN]
    '''

    # create dataset
    clean_set = Data.TensorDataset(sel_clean, sel_clean_label)
    test_set = Data.TensorDataset(sel_test, sel_test_label)

    clean_loader = Data.DataLoader(
        dataset=clean_set,
        batch_size=sel_clean.size(0),  # LEN
        shuffle=False)

    test_loader = Data.DataLoader(
        dataset=test_set,
        batch_size=sel_test.size(0),  # LEN
        shuffle=False)
    c_lip = 0
    criterion = nn.CrossEntropyLoss()
    # Test the model
    model.eval()
    x_cln = 0
    loss_cln = 0
    for x, y in clean_loader:
        x = x.cuda()
        x_cln = x.view(sel_clean.size(0), -1)
        y = y.cuda()
        with torch.no_grad():
            h = model(x)
        loss = criterion(h, y)
        loss_cln = loss.item()
    model.train()
    model.eval()
    x_tst = 0
    loss_tst = 0
    for x, y in test_loader:
        x = x.cuda()
        y = y.cuda()
        x_tst = x.view(sel_test.size(0), -1)
        with torch.no_grad():
            h = model(x)
        loss = criterion(h, y)
        loss_tst = loss.item()
    model.train()

    dist = 0
    for j in range(sel_test.size(0)):
        dist += torch.max(abs(x_cln[j] - x_tst[j]))  # norm p = inf
    dist = dist / LEN
    c_lip = abs(loss_cln - loss_tst) / dist

    return c_lip
Example #18
0
def choose_model():
    # switch models
    print(args.model)
    print(args.layerlist)
    if args.model == 'lenet':
        cnn = LeNet(enable_lat=args.enable_lat,
                    layerlist=args.layerlist,
                    epsilon=args.epsilon,
                    alpha=args.alpha,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    batch_norm=args.batchnorm,
                    if_dropout=args.dropout)
    elif args.model == 'resnet':
        cnn = ResNet50(enable_lat=args.enable_lat,
                       layerlist=args.layerlist,
                       epsilon=args.epsilon,
                       alpha=args.alpha,
                       pro_num=args.pro_num,
                       batch_size=args.batchsize,
                       if_dropout=args.dropout)
    elif args.model == 'resnet18':
        cnn = ResNet18(enable_lat=args.enable_lat,
                       layerlist=args.layerlist,
                       epsilon=args.epsilon,
                       alpha=args.alpha,
                       pro_num=args.pro_num,
                       batch_size=args.batchsize,
                       if_dropout=args.dropout)
    elif args.model == 'vgg16':
        cnn = VGG16(enable_lat=args.enable_lat,
                    layerlist=args.layerlist,
                    epsilon=args.epsilon,
                    alpha=args.alpha,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    elif args.model == 'vgg11':
        cnn = VGG11(enable_lat=args.enable_lat,
                    layerlist=args.layerlist,
                    epsilon=args.epsilon,
                    alpha=args.alpha,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    elif args.model == 'vgg13':
        cnn = VGG13(enable_lat=args.enable_lat,
                    layerlist=args.layerlist,
                    epsilon=args.epsilon,
                    alpha=args.alpha,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    elif args.model == 'vgg19':
        cnn = VGG19(enable_lat=args.enable_lat,
                    layerlist=args.layerlist,
                    epsilon=args.epsilon,
                    alpha=args.alpha,
                    pro_num=args.pro_num,
                    batch_size=args.batchsize,
                    if_dropout=args.dropout)
    elif args.model == 'densenet':
        cnn = DenseNet()

    cnn.cuda()
    if args.enable_lat:
        cnn.choose_layer()

    return cnn
Example #19
0
def cal_eni(model, data):
    cnn = VGG16(enable_lat=args.enable_lat,
                epsilon=args.epsilon,
                pro_num=args.pro_num,
                batch_size=args.batchsize,
                if_dropout=args.dropout)
    cnn.cuda()
    model_path = model_list[model]
    if os.path.exists(model_path):
        cnn.load_state_dict(torch.load(model_path))
        print('load model successfully.')
    else:
        print("load failed.")
    model = cnn
    # get test_data , test_label from .p file
    clean_data, test_label, size = read_data_label(args.clean_path,
                                                   args.label_path)
    test_data, test_label, size = read_data_label(data_list[data],
                                                  args.label_path)
    if size == 0:
        print("reading data failed.")
        return
    if data == 'clean':
        sel_clean = clean_data[args.start_idx:args.start_idx + args.length]
        sel_test = test_data[args.start_idx + args.length:args.start_idx +
                             2 * args.length]
        sel_clean_label = test_label[args.start_idx:args.start_idx +
                                     args.length]
        sel_test_label = test_label[args.start_idx +
                                    args.length:args.start_idx +
                                    2 * args.length]
    else:
        sel_clean = clean_data[args.start_idx:args.start_idx + args.length]
        sel_test = test_data[args.start_idx:args.start_idx + args.length]
        sel_clean_label = test_label[args.start_idx:args.start_idx +
                                     args.length]
        sel_test_label = test_label[args.start_idx:args.start_idx +
                                    args.length]

    # create dataset
    clean_set = Data.TensorDataset(sel_clean, sel_clean_label)
    test_set = Data.TensorDataset(sel_test, sel_test_label)

    clean_loader = Data.DataLoader(dataset=clean_set,
                                   batch_size=args.length,
                                   shuffle=False)

    test_loader = Data.DataLoader(dataset=test_set,
                                  batch_size=args.length,
                                  shuffle=False)
    c_eni = 0
    criterion = nn.CrossEntropyLoss()
    # Test the model
    model.eval()
    x_cln = 0
    loss_cln = 0
    for x, y in clean_loader:
        x = x.cuda()
        x_cln = x.view(sel_clean.size(0), -1)
        y = y.cuda()
        #print(y)
        with torch.no_grad():
            h = model(x)
        loss = criterion(h, y)
        loss_cln = loss.item()
    model.train()
    model.eval()
    x_tst = 0
    loss_tst = 0
    for x, y in test_loader:
        x = x.cuda()
        y = y.cuda()
        x_tst = x.view(sel_test.size(0), -1)
        with torch.no_grad():
            h = model(x)
        loss = criterion(h, y)
        loss_tst = loss.item()
    model.train()

    dist = 0
    for j in range(sel_test.size(0)):
        dist += torch.max(abs(x_cln[j] - x_tst[j]))  # norm p = inf
    dist = dist / args.length
    c_eni = abs(loss_cln - loss_tst) / dist

    return c_eni
Example #20
0
xVal = X
yVal = Y

_, w, h, nChannels = xTrain.shape
nSamples, nClasses = yTrain.shape
print('Number of training samples: ', nSamples, ' Number of classes: ',
      nClasses)
print('Number of validation samples: ', xVal.shape[0])

sess = tf.Session(config=config)

xInput = tf.placeholder(tf.float32, [None, w, h, nChannels])
yInput = tf.placeholder(tf.float32, [None, nClasses])
learningRate = tf.placeholder(tf.float32, shape=[])

vgg = VGG16(xInput, 0.5, nClasses)
logits = vgg.fc8

#Metrics
predictions = tf.nn.softmax(logits)
acc = tf.equal(tf.argmax(predictions, 1), tf.argmax(yInput, 1))
accuracy = tf.reduce_mean(tf.cast(acc, tf.float32))
validationL, validationL2, validationA = [], [], []
trainL, trainL2, trainA = [], [], []

#Training parameters
epochs = 200
#lrInit = 0.005 (for Mnist)
lrInit = 0.05
batchSize = 128
Example #21
0
            data_list = adv_data_resnet
        elif target == 'densenet':
            data_list = adv_data_densenet
        elif target == 'inception':
            data_list = adv_data_inception

        for data_cat in data_list:
            data_path = data_list[data_cat]
            test_one(model,data_cat,data_path)

if __name__ == "__main__":

    if args.model == 'vgg':
        model = VGG16(enable_lat=False,
                     epsilon=0.6,
                     pro_num=5,
                     batch_size=args.batch_size,
                     if_dropout=True)
    elif args.model == 'resnet':
        model = ResNet50(enable_lat=False,
                     epsilon=0.6,
                     pro_num=5,
                     batch_size=args.batch_size,
                     if_dropout=True)    
    model.cuda()

    if os.path.exists(args.model_path):
        model.load_state_dict(torch.load(args.model_path))
        print('load model.')
    else:
        print("load failed.")
Example #22
0
import numpy as np
from torch.utils.data.sampler import SubsetRandomSampler
import time

# trainloader=Data.DataLoader(md,batch_size=16,shuffle=True, num_workers=12)
torch.cuda.set_device(1)
total_epochs = 640
print_inter = 10
vali_inter = 200
validation_split = 0.2
shuffle_dataset = True
stud_names = [ 'Nut_stud','panel_stud', 'stud', 'T_stud', 'ball_stud']
# stud_names = ['Nut_stud', 'panel_stud', 'stud', 'T_stud', 'ball_stud']

for name in stud_names:
    net = VGG()
    save_path = './checkpoints/' + name + '/RI1_VGG'
    if not os.path.exists(save_path):
        os.mkdir(save_path)
    md_train = myDataset('./mat/' + name + '/stud_data_RI_train.mat', aug=True, inch=3)
    md_test = myDataset('./mat/' + name + '/stud_data_RI_test.mat', aug=False, inch=3)
    # dataset_size = len(md)
    # indices = list(range(dataset_size))
    # split = int(np.floor(validation_split * dataset_size))
    # if shuffle_dataset :
    #     np.random.shuffle(indices)
    # train_indices, val_indices = indices[split:], indices[:split]
    #
    # # Creating PT data samplers and loaders:
    # train_sampler = SubsetRandomSampler(train_indices)
    # valid_sampler = SubsetRandomSampler(val_indices)
Example #23
0
def test_all():

    if args.model == 'resnet':
        model = ResNet50(enable_lat=args.enable_lat,
                         epsilon=args.epsilon,
                         pro_num=args.pro_num,
                         batch_size=args.batchsize,
                         if_dropout=args.dropout)
    elif args.model == 'vgg':
        model = VGG16(enable_lat=args.enable_lat,
                      epsilon=args.epsilon,
                      pro_num=args.pro_num,
                      batch_size=args.batchsize,
                      if_dropout=args.dropout)
    model.cuda()

    resnet_model_list = {
        'naive-resnet':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/naive/naive_param.pkl",
        'new-AT-resnet':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/nat/naive_param.pkl",
        'origin-AT-resnet':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/oat/naive_param.pkl",
        'ensemble-AT-resnet':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/eat/naive_param.pkl",
        'LAT-aaai-resnet':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/aaai/naive_param.pkl",
        'DPLAT-resnet50':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/dplat/lat_param.pkl",
        'DPLAT-resnet18':
        "/media/dsg3/dsgprivate/yuhang/model/resnet50/dplat-18/lat_param.pkl",
    }
    vgg_model_list = {
        'naive-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/naive/naive_param.pkl",
        'new-AT-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/nat/naive_param.pkl",
        'origin-AT-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/oat/naive_param.pkl",
        'ensemble-AT-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/eat/naive_param.pkl",
        'LAT-aaai-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/aaai/naive_param.pkl",
        'PLAT-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/plat/lat_param.pkl",
        'DPLAT-vgg':
        "/media/dsg3/dsgprivate/yuhang/model/vgg16/dplat/lat_param.pkl",
    }

    data_list = {
        'k3c0.03-vgg':
        "/media/dsg3/dsgprivate/lat/test_cw/test_adv(k3c0.030).p",
        'k5c0.05-vgg':
        "/media/dsg3/dsgprivate/lat/test_cw/test_adv(k5c0.050).p",
        'k3c0.03-resnet':
        "/media/dsg3/dsgprivate/lat/test_cw/resnet/test_adv(k3c0.030).p",
        'k5c0.05-resnet':
        "/media/dsg3/dsgprivate/lat/test_cw/resnet/test_adv(k5c0.050).p",
    }

    if args.model == 'vgg':
        model_list = vgg_model_list
    elif args.model == 'resnet':
        model_list = resnet_model_list

    for target in model_list:
        print('------- Now target model is {} ------'.format(target))
        model_path = model_list[target]
        if target == 'DPLAT-resnet18':
            model = ResNet18(enable_lat=args.enable_lat,
                             epsilon=args.epsilon,
                             pro_num=args.pro_num,
                             batch_size=args.batchsize,
                             if_dropout=args.dropout).cuda()
        model.load_state_dict(torch.load(model_path))
        for data in data_list:
            data_path = data_list[data]
            test_one(model, data, data_path)
Example #24
0
    from ResNet import ResNet50
    from VGG import VGG16
    from denseNet import DenseNet
    from Inception_v2 import Inception_v2
    from utils import read_data_label
    if args.model == 'resnet':
        model = ResNet50(enable_lat=args.enable_lat,
                         epsilon=args.lat_epsilon,
                         pro_num=args.lat_pronum,
                         batch_size=args.model_batchsize,
                         num_classes=10,
                         if_dropout=args.dropout)
    elif args.model == 'vgg':
        model = VGG16(enable_lat=args.enable_lat,
                      epsilon=args.lat_epsilon,
                      pro_num=args.lat_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout)
    elif args.model == 'densenet':
        model = DenseNet()
    elif args.model == 'inception':
        model = Inception_v2()
    model.cuda()
    model.load_state_dict(torch.load((args.modelpath)))
    # if cifar then normalize epsilon from [0,255] to [0,1]

    if args.dataset == 'cifar10':
        eps = args.attack_epsilon / 255.0
    else:
        eps = args.attack_epsilon
Example #25
0
def main():
    if args.model == 'resnet':
        model = ResNet50(enable_lat =args.enable_lat,
                         epsilon =args.lat_epsilon,
                         pro_num =args.lat_pronum,
                         batch_size =args.model_batchsize,
                         num_classes = 10,
                         if_dropout=args.dropout
                        )
    elif args.model == 'vgg':
        model = VGG16(enable_lat=args.enable_lat,
                      epsilon=args.lat_epsilon,
                      pro_num=args.lat_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout
                      )
    elif args.model == 'resnet18':
        model = ResNet18(enable_lat=args.enable_lat,
                      epsilon=args.lat_epsilon,
                      pro_num=args.lat_pronum,
                      batch_size=args.model_batchsize,
                      num_classes=10,
                      if_dropout=args.dropout
                      )
    elif args.model == 'densenet':
        model = DenseNet()
    elif args.model == 'inception':
        model = Inception_v2()
    model.cuda()
    model.load_state_dict(torch.load((args.modelpath)))
    # if cifar then normalize epsilon from [0,255] to [0,1]
    '''
    if args.dataset == 'cifar10':
        eps = args.attack_epsilon / 255.0
    else:
        eps = args.attack_epsilon
    '''
    eps = args.attack_epsilon
    # the last layer of densenet is F.log_softmax, while CrossEntropyLoss have contained Softmax()
    attack = Attack(dataroot = "/media/dsg3/dsgprivate/lat/data/cifar10/",
                    dataset  = args.dataset,
                    batch_size = args.attack_batchsize,
                    target_model = model,
                    criterion = nn.CrossEntropyLoss(),
                    epsilon = eps,
                    alpha =  args.attack_alpha,
                    iteration = args.attack_iter)
    if args.attack == 'fgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.fgsm()
    elif args.attack == 'ifgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm()
    elif args.attack == 'stepll':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.step_ll()
    elif args.attack == 'pgd':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.PGD()
    elif args.attack == 'momentum_ifgsm':
        test_data_cln, test_data_adv, test_label, test_label_adv = attack.momentum_ifgsm()
    print(test_data_adv.size(),test_label.size(), type(test_data_adv))
    #test_data, test_label, size = read_data_label('./test_data_cln.p','./test_label.p')
    #test_data_adv, test_label_adv, size = read_data_label('./test_data_cln.p','./test_label.p')
    '''
    test_loader = attack.return_data()
    dataiter = iter(test_loader)
    images,labels = dataiter.next()
    print(images[0])
    '''
    #test_data_cln, test_data_adv, test_label, test_label_adv = attack.i_fgsm()
    #display(test_data_cln, test_data_adv, test_label, test_label_adv)
    if args.generate:
        save_data_label(args.savepath, eps, test_data_cln, test_data_adv,test_label, test_label_adv)