Ejemplo n.º 1
0
from utils.utils import *
from model import *
import sys
import os
import math
import time
from utils.data_helper import data_loader
from model import xavier_init, he_normal_init

dataset = sys.argv[1]
init_epsilon = float(sys.argv[2])
init_delta = float(sys.argv[3])
model_name = sys.argv[4]
prev_iter = int(sys.argv[5])

mb_size, X_dim, width, height, channels, len_x_train, x_train, len_x_test, x_test = data_loader(
    dataset)

graph = tf.Graph()
with graph.as_default():
    session_conf = tf.ConfigProto(allow_soft_placement=True,
                                  log_device_placement=False)
    sess = tf.Session(config=session_conf)

    with sess.as_default():
        #input placeholder
        input_shape = [None, width, height, channels]
        filter_sizes = [5, 5, 5, 5, 5]
        hidden = 128
        z_dim = 128

        if dataset == 'celebA' or dataset == 'lsun':
Ejemplo n.º 2
0
def classifier_multi():
    pathes = os.listdir("results/generated/")
    original = ["mnist", "fmnist", "cifar10", "svhn"]
    """    
    for dataset in original:
        _, X_dim, width, height, channels,len_x_train, x_train, y_train, len_x_test, x_test, y_test  = data_loader(dataset)
        classifier_one(dataset, "original", x_train, y_train, len_x_train)
        tf.reset_default_graph() 

    for path in pathes:
        info = path.split('_')
        dataset = info[0]
        model_name = path
        _, X_dim, width, height, channels,len_x_train, x_train, y_train, len_x_test, x_test, y_test  = data_loader(dataset)
        data = np.load('results/generated/{}'.format(path))
        x_target = data['x']
        y_target = data['y']
        classifier_one(dataset, model_name, x_target, y_target, len_x_train)
        tf.reset_default_graph()
        
    for path in pathes:
        info = path.split('_')
        dataset = info[0]
        model_name = path
        _, X_dim, width, height, channels,len_x_train, x_train, y_train, len_x_test, x_test, y_test  = data_loader(dataset)
        data = np.load('results/generated/{}'.format(path))
        x_target = data['x']
        y_target = data['y']
        x_target = np.append(x_target,x_train,axis=0)
        y_target = np.append(y_target,y_train,axis=0)
        model_name = model_name+"_augmented"
        classifier_one(dataset, model_name, x_target, y_target, len_x_train)
        tf.reset_default_graph() 
    """
    pathes = os.listdir("results_pure_noise/generated/")
    for path in pathes:
        info = path.split('_')
        dataset = info[0]
        model_name = path
        print(info)
        _, X_dim, width, height, channels, len_x_train, x_train, y_train, len_x_test, x_test, y_test = data_loader(
            dataset)
        data = np.load('results_pure_noise/generated/{}'.format(path))
        x_target = data['x']
        y_target = data['y']
        #classifier_one(dataset, model_name, x_target, y_target, len_x_train)
        tf.reset_default_graph()

    for path in pathes:
        info = path.split('_')
        dataset = info[0]
        model_name = path
        _, X_dim, width, height, channels, len_x_train, x_train, y_train, len_x_test, x_test, y_test = data_loader(
            dataset)
        data = np.load('results_pure_noise/generated/{}'.format(path))
        x_target = data['x']
        y_target = data['y']
        x_target = np.append(x_target, x_train, axis=0)
        y_target = np.append(y_target, y_train, axis=0)
        model_name = model_name + "_augmented"
        classifier_one(dataset, model_name, x_target, y_target, len_x_train)
        tf.reset_default_graph()
Ejemplo n.º 3
0
def classifier_one(dataset, model_name, x_target, y_target, len_x_target):

    NUM_CLASSES = 10
    fp = open("classifier_result.txt", 'a')
    print("dataset: {}; model name: {} Evaluation start.".format(
        dataset, model_name))
    _, X_dim, width, height, channels, len_x_train, x_train, y_train, len_x_test, x_test, y_test = data_loader(
        dataset)

    graph = tf.Graph()
    with graph.as_default():
        session_conf = tf.ConfigProto(allow_soft_placement=True,
                                      log_device_placement=False)
        sess = tf.Session(config=session_conf)

        with sess.as_default():
            #input placeholder
            input_shape = [None, width, height, channels]
            filter_sizes = [5, 5, 5, 5, 5]
            hidden = 128

            n_filters = [channels, hidden, hidden * 2, hidden * 4]

            X = tf.placeholder(tf.float32,
                               shape=[None, width, height, channels])
            Y = tf.placeholder(tf.float32, shape=[None, NUM_CLASSES])

            #discriminator variables
            W1 = tf.Variable(he_normal_init([5, 5, channels, hidden // 2]))
            W2 = tf.Variable(he_normal_init([5, 5, hidden // 2, hidden]))
            W3 = tf.Variable(he_normal_init([5, 5, hidden, hidden * 2]))
            W4 = tf.Variable(xavier_init([4 * 4 * hidden * 2, NUM_CLASSES]))
            b4 = tf.Variable(tf.zeros(shape=[NUM_CLASSES]))
            var_C = [W1, W2, W3, W4, b4]

            global_step = tf.Variable(0, name="global_step", trainable=False)

            C_real_logits = classifier(X, var_C)

            C_loss = tf.reduce_mean(
                tf.nn.softmax_cross_entropy_with_logits_v2(
                    labels=Y, logits=C_real_logits))

            num_batches_per_epoch = int((len_x_target - 1) / mb_size) + 1

            C_solver = tf.contrib.opt.AdamWOptimizer(
                weight_decay=1e-4, learning_rate=1e-3, beta1=0.5,
                beta2=0.9).minimize(C_loss,
                                    var_list=var_C,
                                    global_step=global_step)

            timestamp = str(int(time.time()))

            sess.run(tf.global_variables_initializer())

            x_temp = np.append(x_test, x_test[:mb_size], axis=0)
            y_temp = np.append(y_test, y_test[:mb_size], axis=0)
            best_accuracy = 0.0
            for it in range(num_batches_per_epoch * 500):
                X_mb, Y_mb = next_batch(mb_size, x_target, y_target)
                _, C_curr = sess.run([C_solver, C_loss],
                                     feed_dict={
                                         X: X_mb,
                                         Y: Y_mb
                                     })
                if it % 100 == 0:
                    print('Iter: {}; C_loss: {:.4};'.format(it, C_curr))
                if it % 1000 == 0:
                    predictions = []
                    for jt in range(len_x_test // mb_size + 1):
                        Xt_mb, Yt_mb = next_test_batch(jt, mb_size, x_temp,
                                                       y_temp)
                        _, C_pred = sess.run([C_solver, C_real_logits],
                                             feed_dict={
                                                 X: Xt_mb,
                                                 Y: Yt_mb
                                             })
                        if len(predictions) == 0:
                            predictions = C_pred
                        else:
                            predictions = np.append(predictions,
                                                    C_pred,
                                                    axis=0)

                    predictions = predictions[:len_x_test]
                    predictions = np.argmax(predictions, axis=1)
                    correct_y = np.argmax(y_test, axis=1)
                    correct_predictions = sum(predictions == correct_y)
                    accuracy = correct_predictions / float(len_x_test)
                    print('Iter: {}; accuracy: {:.4}; best accuracy: {:.4}'.
                          format(it, accuracy, best_accuracy))
                    if accuracy > best_accuracy:
                        best_accuracy = accuracy

            print("dataset: {} model name: {} with best accuracy: {:.4} fin.".
                  format(dataset, model_name, best_accuracy))
            print("dataset: {} model name: {} with best accuracy: {:.4} fin.".
                  format(dataset, model_name, best_accuracy),
                  file=fp)
            fp.close()
            sess.close()

            return
Ejemplo n.º 4
0
def classifier_multi():
    pathes = os.listdir("results/generated/")
    original = ["mnist", "fmnist", "cifar10", "svhn"]

    for dataset in original:
        _, X_dim, width, height, channels, len_x_train, x_train, y_train, len_x_test, x_test, y_test = data_loader(
            dataset)
        for sigma in range(1, 16):
            x_target = gaussian_filter(x_train, sigma)
            classifier_one(dataset, "gaussian_sigma_{}".format(sigma),
                           x_target, y_train, len_x_train)
            tf.reset_default_graph()
        for sigma in range(1, 16):
            x_target = gaussian_laplace(x_train, sigma)
            classifier_one(dataset, "laplace_sigma_{}".format(sigma), x_target,
                           y_train, len_x_train)
            tf.reset_default_graph()

        for sigma in range(1, 16):
            sigma = sigma / 10.0
            noise = np.random.normal(0.0, sigma, (x_train.shape))
            noise = noise.reshape(x_train.shape)
            x_target = x_train + noise
            classifier_one(dataset, "gaussian_noise_sigma_{}".format(sigma),
                           x_target, y_train, len_x_train)
            tf.reset_default_graph()
        for sigma in range(1, 16):
            sigma = sigma / 10.0
            noise = np.random.laplace(0.0, sigma, (x_train.shape))
            noise = noise.reshape(x_train.shape)
            x_target = x_train + noise
            classifier_one(dataset, "laplace_noise_scale_{}".format(sigma),
                           x_target, y_train, len_x_train)
            tf.reset_default_graph()
        for sigma in [2, 3, 5, 7, 9]:
            x_target = uniform_filter(x_train, sigma)
            classifier_one(dataset, "uniform_filter_size_{}".format(sigma),
                           x_target, y_train, len_x_train)
            tf.reset_default_graph()
        for sigma in [2, 3, 5, 7, 9]:
            x_target = median_filter(x_train, size=sigma)
            classifier_one(dataset, "median_filter_size_{}".format(sigma),
                           x_target, y_train, len_x_train)
            tf.reset_default_graph()
Ejemplo n.º 5
0
    config_gpu()

    params = get_params()
    if params['model'] == 'TextCNN':
        model_params = TextCNN_params.get_params()
    elif params['model'] == 'FastText':
        model_params = FastText_params.get_params()
    elif params['model'] == 'transformer':
        model_params = transformer_params.get_params()
    elif params['model'] == 'gcn':
        model_params = gcn_params.get_params()
        gcn_train.train_model(model_params)
    else:
        pass

    x_train, x_test, y_train, y_test, vocab, mlb = data_loader(
        params, is_rebuild_dataset=False)

    if params['model'] == 'FastText':
        x_train = [[i for i in x if i > 0] for x in x_train.tolist()]
        x_test = [[i for i in x if i > 0] for x in x_test.tolist()]
        x_train, x_test, vocab_size = add_ngram_features(
            model_params['ngram_range'], x_train, x_test,
            model_params['vocab_size'])
        x_train = tf.keras.preprocessing.sequence.pad_sequences(
            x_train,
            maxlen=model_params['maxlen'],
            padding='post',
            truncating='post')
        x_test = tf.keras.preprocessing.sequence.pad_sequences(
            x_test,
            maxlen=model_params['maxlen'],
Ejemplo n.º 6
0
from utils.params import get_params
import model.FastText.FastText_params as FastText_params
import model.TextCNN.TextCNN_params as TextCNN_params

app = Flask(__name__)
app.config['JSON_AS_ASCII'] = False

err_result = {"errCode": "", "errMsg": "", "status": False}

FastText_model = tf.keras.models.load_model(
    './results/FastText/2020-03-27-15-01')
# FastText_model = tf.keras.models.load_model('./results/FastText/model.h5')
TextCNN_model = tf.keras.models.load_model(
    './results/TextCNN/2020-03-27-21-41')

_, _, _, _, vocab, mlb = data_loader(get_params())
labels = np.array(mlb.classes_)

fastText_params = FastText_params.get_params()
textCNN_params = TextCNN_params.get_params()


@app.route("/FastText_service/", methods=['GET', 'POST'])
def FastText_service():
    try:
        text_list = request.json
        predict_data = convert(text_list, fastText_params)
    except Exception as e:
        return jsonify(err_result)
    else:
        preds = FastText_model.predict(predict_data)
Ejemplo n.º 7
0
def generate_one(dataset,model_name, z_dim,USE_DELTA):
    NUM_CLASSES = 10

    mb_size, X_dim, width, height, channels,len_x_train, x_train, y_train, len_x_test, x_test, y_test  = data_loader(dataset)


    graph = tf.Graph()
    with graph.as_default():
        session_conf = tf.ConfigProto(allow_soft_placement=True, log_device_placement=False)
        sess = tf.Session(config=session_conf)

        with sess.as_default():
            #input placeholder
            input_shape=[None, width, height, channels]
            filter_sizes=[5, 5, 5, 5, 5]        
            hidden = 128         

            n_filters=[channels, hidden, hidden*2, hidden*4]

            X = tf.placeholder(tf.float32, shape=[None, width, height,channels])  
            Z_S = tf.placeholder(tf.float32, shape=[None, z_dim]) 
            Z_noise = tf.placeholder(tf.float32, shape=[None,  z_dim])
            Y = tf.placeholder(tf.float32, shape=[None,  NUM_CLASSES])
            A_true_flat = X        
            #generator variables
            var_G = []
            #discriminator variables
            W1 = tf.Variable(he_normal_init([5,5,channels, hidden//2]))
            W2 = tf.Variable(he_normal_init([5,5, hidden//2,hidden]))
            W3 = tf.Variable(he_normal_init([5,5,hidden,hidden*2]))
            W4 = tf.Variable(xavier_init([4*4*hidden*2, 1]))
            b4 = tf.Variable(tf.zeros(shape=[1]))        
            var_D = [W1,W2,W3,W4,b4] 

            #classifier variables
            W4_c = tf.Variable(xavier_init([4*4*hidden*2, NUM_CLASSES])) 
            b4_c = tf.Variable(tf.zeros(shape=[NUM_CLASSES]))        
            var_C = [W1,W2,W3,W4_c,b4_c] 

            var_D_C = [W1,W2,W3,W4,b4,W4_c,b4_c]

            global_step = tf.Variable(0, name="global_step", trainable=False)        

            G_sample, G_zero, z_original, z_noise, z_noised = generator(input_shape, 
                                                                       n_filters, 
                                                                       filter_sizes, 
                                                                       X, 
                                                                       Z_noise, 
                                                                       var_G,
                                                                       z_dim,
                                                                       Z_S,
                                                                       USE_DELTA)  

            D_real, D_real_logits = discriminator(X, var_D)
            D_fake, D_fake_logits = discriminator(G_sample, var_D)
            C_real_logits = classifier(X, var_C)
            C_fake_logits = classifier(G_sample, var_C)            
            D_real_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                                logits=D_real_logits, labels=tf.ones_like(D_real))
            D_fake_loss = tf.nn.sigmoid_cross_entropy_with_logits(
                                   logits=D_fake_logits, labels=tf.zeros_like(D_fake))

            D_S_loss = tf.reduce_mean(D_real_loss) + tf.reduce_mean(D_fake_loss)
            D_C_loss = tf.reduce_mean(
                           tf.nn.softmax_cross_entropy_with_logits_v2(labels = Y, 
                                                                      logits = C_real_logits))                   
            G_zero_loss = tf.reduce_mean(tf.pow(X - G_zero,2))         
            G_S_loss = tf.reduce_mean(
                           tf.nn.sigmoid_cross_entropy_with_logits(logits=D_fake_logits, 
                                                                   labels=tf.ones_like(D_fake)))
            G_C_loss = tf.reduce_mean(
                           tf.nn.softmax_cross_entropy_with_logits_v2(labels = Y, 
                                                                      logits = C_fake_logits))

            D_loss = D_S_loss + D_C_loss
            G_loss = G_S_loss + G_C_loss + G_zero_loss


            #sensitivity estimation
            latent_max = tf.reduce_max(z_original, axis = 0)
            latent_min = tf.reduce_min(z_original, axis = 0)          

            tf.summary.image('Original',X)
            tf.summary.image('fake',G_sample) 
            tf.summary.image('fake_zero', G_zero)

            tf.summary.scalar('D_loss', D_loss)  
            tf.summary.scalar('D_S_loss',D_S_loss) 
            tf.summary.scalar('D_C_loss',D_C_loss)
            tf.summary.scalar('G_zero_loss',G_zero_loss)        
            tf.summary.scalar('G_S_loss',G_S_loss)
            tf.summary.scalar('G_C_loss',G_C_loss)        
            tf.summary.scalar('G_loss',G_loss) 

            tf.summary.histogram('z_original',z_original) 
            tf.summary.histogram('z_noise',z_noise) 
            tf.summary.histogram('z_noised',z_noised)

            merged = tf.summary.merge_all()

            num_batches_per_epoch = int((len_x_train-1)/mb_size) + 1


            A_solver = tf.contrib.opt.AdamWOptimizer(weight_decay=1e-4,learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(G_zero_loss,var_list=var_G, global_step=global_step) 
            D_solver = tf.contrib.opt.AdamWOptimizer(weight_decay=1e-4,learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(D_loss,var_list=var_D_C, global_step=global_step)
            G_solver = tf.contrib.opt.AdamWOptimizer(weight_decay=1e-4,learning_rate=1e-4,beta1=0.5, beta2=0.9).minimize(G_loss,var_list=var_G, global_step=global_step)

            timestamp = str(int(time.time()))    
            out_dir = os.path.abspath(os.path.join(os.path.curdir, 
                                                   "results/models/"+ model_name))
            checkpoint_dir = os.path.abspath(os.path.join(out_dir, "checkpoints"))
            checkpoint_prefix = os.path.join(checkpoint_dir, "model")
            if not os.path.exists("results/generated/"):
                os.makedirs("results/generated/")           

            saver = tf.train.Saver(tf.global_variables())
            sess.run(tf.global_variables_initializer())

            saver.restore(sess,tf.train.latest_checkpoint(checkpoint_dir))        

            #calculate approximated global sensitivity            
            for idx in range(num_batches_per_epoch):
                X_mb, Y_mb = next_batch(mb_size, x_train, y_train)
                enc_zero = np.zeros([mb_size,z_dim]).astype(np.float32) 
                if USE_DELTA:
                    enc_noise = np.random.normal(0.0,1.0,[mb_size,z_dim]).astype(np.float32)  
                else:
                    enc_noise = np.random.laplace(0.0,1.0,[mb_size,z_dim]).astype(np.float32)                  
                max_curr, min_curr = sess.run([latent_max,latent_min], feed_dict={
                                                                       X: X_mb, 
                                                                       Y: Y_mb, 
                                                                       Z_noise: enc_zero, 
                                                                       Z_S: enc_zero}) 
                if idx == 0:
                    z_max = max_curr
                    z_min = min_curr
                else:
                    z_max = np.maximum(z_max,max_curr)
                    z_min = np.minimum(z_min,min_curr)
            z_sensitivity = np.abs(np.subtract(z_max,z_min))
            #print("Approximated Global Sensitivity:") 
            #print(z_sensitivity)        
            z_sensitivity = np.tile(z_sensitivity,(mb_size,1)) 

            x_train = np.append(x_train, X_mb, axis=0)
            y_train = np.append(y_train, Y_mb, axis=0)
            for i in range(num_batches_per_epoch):
                X_mb, Y_mb = next_test_batch(i, mb_size, x_train, y_train)
                enc_zero = np.zeros([mb_size,z_dim]).astype(np.float32)  
                if USE_DELTA:
                    enc_noise = np.random.normal(0.0,1.0,[mb_size,z_dim]).astype(np.float32)  
                else:
                    enc_noise = np.random.laplace(0.0,1.0,[mb_size,z_dim]).astype(np.float32) 
                G_sample_curr = sess.run(G_sample,
                                                  feed_dict={X: X_mb, 
                                                  Y: Y_mb, 
                                                  Z_noise: enc_noise, 
                                                  Z_S: z_sensitivity})                
                samples_flat = tf.reshape(G_sample_curr,[mb_size,width,height,channels]).eval()
                if i == 0:
                    img_set = samples_flat
                    label_set = Y_mb
                else:
                    img_set = np.append(img_set, samples_flat, axis=0)
                    label_set = np.append(label_set, Y_mb, axis=0) 
            x_generated = img_set[:len_x_train]
            y_generated = label_set[:len_x_train]
            outfile = "results/generated/{}".format(model_name)
            np.savez(outfile, x=x_generated, y=y_generated)    
            print("dataset: {} model name: {} fin.".format(dataset, model_name))
            print("dataset: {} model name: {} fin.".format(dataset, model_name), file = fp)
            sess.close()
            
            return x_train, img_set
Ejemplo n.º 8
0
def fid_is_eval():
    pathes = os.listdir("results_pure_noise/generated/")
    finished = []
    tmp_path = []
    for path in pathes:
        if path not in finished:
            tmp_path.append(path)
    pathes = tmp_path
    for path in pathes:
        fp = open("FID_IS_result.txt", 'a')
        info = path.split('_')
        dataset = info[0]
        model_name = path
        if dataset == "cifar10":
            data = np.load('results_pure_noise/generated/{}'.format(path))
            mb_size, X_dim, width, height, channels, len_x_train, x_train, y_train, len_x_test, x_test, y_test = data_loader(
                dataset)
            real_set = x_train
            img_set = data['x']
            print("Calculating Fréchet Inception Distance for {}".format(
                model_name))
            print("Calculating Fréchet Inception Distance for {}".format(
                model_name),
                  file=fp)
            fid_set_r = real_set * 255.0
            fid_set_r = fid_set_r.astype(np.uint8)
            fid_set_r = np.transpose(fid_set_r, (0, 3, 1, 2))
            fid_set_i = img_set * 255.0
            fid_set_i = fid_set_i.astype(np.uint8)
            fid_set_i = np.transpose(fid_set_i, (0, 3, 1, 2))
            #fid_set_i = fid_set_i[:256]
            #fid_set_r = fid_set_r[:256]
            fid_score = get_fid(fid_set_r, fid_set_i)
            print("FID: {}".format(fid_score))
            print("FID: {}".format(fid_score), file=fp)
            tf.reset_default_graph()
            fp.close()
            fp = open("FID_IS_result.txt", 'a')

            print("Calculating inception score for {}".format(model_name))
            print("Calculating inception score for {}".format(model_name),
                  file=fp)
            is_set = img_set * 2.0 - 1.0
            is_set = np.transpose(is_set, (0, 3, 1, 2))
            #is_set = is_set[:256]
            mean, std = get_inception_score(is_set)
            print("mean: {} std: {}".format(mean, std))
            print("mean: {} std: {}".format(mean, std), file=fp)
            tf.reset_default_graph()
            fp.close()