Esempio n. 1
0
    def __init__(self, trial, step, size, batch_size, learning_rate, max_epoch,
                 tfrecord_path, checkpoint_dir, scale, num_of_data, conf,
                 model_num):

        print('Initialize Training')
        self.trial = trial
        self.step = step
        self.HEIGHT = size[0]
        self.WIDTH = size[1]
        self.CHANNEL = size[2]
        self.BATCH_SIZE = batch_size
        self.learning_rate = learning_rate
        self.EPOCH = max_epoch
        self.tfrecord_path = tfrecord_path
        self.checkpoint_dir = checkpoint_dir
        self.scale = scale
        self.num_of_data = num_of_data
        self.conf = conf
        self.model_num = model_num

        self.input = tf.placeholder(dtype=tf.float32,
                                    shape=[None, None, None, self.CHANNEL])
        self.label = tf.placeholder(dtype=tf.float32,
                                    shape=[None, None, None, self.CHANNEL])

        self.MODEL = model.MODEL('MODEL')
        self.PARAM = model.Weights('MODEL')
        self.MODEL.forward(self.input, self.PARAM.weights)
Esempio n. 2
0
    def Finalsummary(Num, JueLv, Keyword):
        # characters = input("Please input characters:")
        characters = Keyword
        # # 根据关键词返回类别标签
        label = word2vec_demo.Word2vec_similar.class_tags(characters)
        print(label)
        Imbalance_words = word2vec_demo.Word2vec_similar.similar_6words(
            characters, label)
        if '边塞征战' == label:
            class_tag = 'biansai'
        elif '写景咏物' == label:
            class_tag = 'jingwu'
        elif '山水田园' == label:
            class_tag = 'shanshui'
        elif '思乡羁旅' == label:
            class_tag = 'sixiang'
        else:
            class_tag = 'poetrySong'

        checkpointsPath = "E:\Desk\MyProjects\Python/NLP_Demo1\File_jar\generate_poem/" + class_tag  # checkpoints location
        trainPoems = "E:\Desk\MyProjects\Python/NLP_Demo1\File_jar\generate_poem\Poetry_class/" + class_tag + ".txt"  # training file location
        # 训练数据时用,依次更改诗的种类,路径
        # trainPoems = "E:\Desk\MyProjects\Python/NLP_Demo1\File_jar\generate_poem\Poetry_class/yongshi.txt"
        # checkpointsPath = "E:\Desk\MyProjects\Python/NLP_Demo1\File_jar\generate_poem/yongshi"
        trainData = data.POEMS(trainPoems)
        MCPangHu = model.MODEL(trainData)  # 带参初始化
        #***** 分别训练5类模型
        # MCPangHu.train(checkpointsPath)
        poems = MCPangHu.testHead(characters, Imbalance_words, checkpointsPath,
                                  Num, JueLv)
        return poems
def create_model(name,
                 batch_size,
                 learning_rate=0.0001,
                 wd=0.00001,
                 concat=False,
                 l2_loss=False,
                 penalty=False,
                 coef=0.4,
                 verbosity=0):
    """
  Create a model from model.py with the given configuration
  
  Args:
    name             : name of the model (used to create a specific folder to save/load parameters)
    batch_size       : batch size
    learning_rate    : learning_rate (cross entropy is arround 100* bigger than l2)
    wd               : weight decay factor
    concat           : does this model include direct connections?
    l2_loss          : does this model use l2 loss (if not then cross entropy)
    penalty          : whether to use the edge contrast penalty
    coef             : coef for the edge contrast penalty
    verbosity        : level of details to display
    
  Returns:
    my_model         : created model
  """

    my_model = model.MODEL(name, batch_size, learning_rate, wd, concat,
                           l2_loss, penalty, coef)
    my_model.display_info(verbosity)
    return my_model
    def __init__(self, trial, step, size, scale_list, meta_batch_size, meta_lr,
                 meta_iter, task_batch_size, task_lr, task_iter,
                 data_generator, checkpoint_dir, conf):
        print('[*] Initialize Training')
        self.trial = trial
        self.step = step
        self.HEIGHT = size[0]
        self.WIDTH = size[1]
        self.CHANNEL = size[2]
        self.scale_list = scale_list

        self.META_BATCH_SIZE = meta_batch_size
        self.META_LR = meta_lr
        self.META_ITER = meta_iter

        self.TASK_BATCH_SIZE = task_batch_size
        self.TASK_LR = task_lr
        self.TASK_ITER = task_iter

        self.data_generator = data_generator
        self.checkpoint_dir = checkpoint_dir
        self.conf = conf
        '''placeholders'''
        self.inputa = tf.placeholder(dtype=tf.float32,
                                     shape=[
                                         self.META_BATCH_SIZE,
                                         self.TASK_BATCH_SIZE, self.HEIGHT,
                                         self.WIDTH, self.CHANNEL
                                     ])
        self.inputb = tf.placeholder(dtype=tf.float32,
                                     shape=[
                                         self.META_BATCH_SIZE,
                                         self.TASK_BATCH_SIZE, self.HEIGHT,
                                         self.WIDTH, self.CHANNEL
                                     ])

        self.labela = tf.placeholder(dtype=tf.float32,
                                     shape=[
                                         self.META_BATCH_SIZE,
                                         self.TASK_BATCH_SIZE, self.HEIGHT,
                                         self.WIDTH, self.CHANNEL
                                     ])
        self.labelb = tf.placeholder(dtype=tf.float32,
                                     shape=[
                                         self.META_BATCH_SIZE,
                                         self.TASK_BATCH_SIZE, self.HEIGHT,
                                         self.WIDTH, self.CHANNEL
                                     ])
        '''model'''
        self.PARAM = model.Weights(scope='MODEL')
        self.weights = self.PARAM.weights

        self.MODEL = model.MODEL(name='MODEL')
Esempio n. 5
0
def main(trainPoems, checkpointsPath):
    global Str
    args = defineArgs()
    trainData = data.POEMS(trainPoems)
    MCPangHu = model.MODEL(trainData)
    if args.mode == "train":
        MCPangHu.train()
    else:
        if args.mode == "test":
            poems = MCPangHu.test(checkpointsPath)
            Str = MCPangHu.Get_Str()
        else:
            characters = input("please input chinese character:")
            poems = MCPangHu.testHead(characters)
Esempio n. 6
0
def blackimage():
    if request.method == "POST":
        f = request.files['file']
        print("got file. filename = %s" % f.filename)
        file_path = os.path.join(app.config['UPLOAD_FOLDER'], f.filename)
        f.save(file_path)
        print("saved. file path = %s" % file_path)
        output = model.MODEL(file_path).Make()
        #with open(os.path.join(app.config["UPLOAD_FOLDER"], output)) as f:
        #    f.read()
        print("output -------> ", output)
        #output = file_path
        return redirect(url_for('uploaded_file', filename=output))
        #return render_template("return_image.html", imgpath = output )
    return render_template("black_image.html")
Esempio n. 7
0
    def build_network(self, conf):
        tf.reset_default_graph()

        self.lr_decay = tf.placeholder(tf.float32,
                                       shape=[],
                                       name='learning_rate')

        # Input image
        self.input = tf.placeholder(tf.float32,
                                    shape=[None, None, None, 3],
                                    name='input')
        # Ground truth
        self.label = tf.placeholder(tf.float32,
                                    shape=[None, None, None, 3],
                                    name='label')

        # parameter variables
        self.PARAM = model.Weights(scope='MODEL')
        # model class (without feedforward graph)
        self.MODEL = model.MODEL(name='MODEL')
        # Graph build
        self.MODEL.forward(self.input, self.PARAM.weights)
        self.output = self.MODEL.output

        self.loss_t = tf.losses.absolute_difference(self.label, self.output)

        # Optimizer
        self.opt = tf.train.GradientDescentOptimizer(
            learning_rate=self.lr_decay).minimize(self.loss_t)
        self.init = tf.global_variables_initializer()

        # Variable lists
        self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                          scope='MODEL')

        self.loader = tf.train.Saver(var_list=self.var_list)

        self.sess = tf.Session(config=conf)
Esempio n. 8
0

if __name__ == "__main__":
    train_file = "dataset/poetryTang/poetryTang.txt"
    split_poemTxt(train_file)
    print("按照诗人将数据拆分完成!")
    args = defineArgs()
    rootdir = os.path.dirname(os.path.realpath(__file__)) + "/"
    path = rootdir + "dataset/poetryByPoet"
    f_list = os.listdir(path)
    # print f_list
    for i in f_list:
        fn, ex = os.path.splitext(i)

        # os.path.splitext():分离文件名与扩展名
        if ex != '.txt':
            continue
        if fn.find("完成_") != -1:
            continue

        type = fn
        trainData = dataUtils.POEMS(type)
        MCPangHu = model.MODEL(trainData, type)
        if args.mode == "train":
            MCPangHu.train()
        else:
            if args.mode == "test":
                poems = MCPangHu.test()
            else:
                characters = input("please input chinese character:")
                poems = MCPangHu.testHead(characters)
Esempio n. 9
0
def main(_):
    print('reading npy...')
    np.random.seed(19940423)  # set the random seed of numpy
    data = np.load(FLAGS.data_dir)  #load data from the data_dir
    train_idx = np.load(FLAGS.train_idx)  #load the indices of the training set
    valid_idx = np.load(
        FLAGS.valid_idx)  #load the indices of the validation set
    test_idx = np.load(FLAGS.test_idx)
    labels = get_data.get_label(
        data, train_idx)  #load the labels of the training set

    print("min:", np.amin(labels))
    print("max:", np.amax(labels))

    print("positive label rate:", np.mean(
        labels))  #print the rate of the positive labels in the training set
    param_setting = "lr-{}_lr-decay_{:.2f}_lr-times_{:.1f}_nll-{:.2f}_l2-{:.2f}_c-{:.2f}".format(
        FLAGS.learning_rate, FLAGS.lr_decay_ratio, FLAGS.lr_decay_times,
        FLAGS.nll_coeff, FLAGS.l2_coeff, FLAGS.c_coeff)
    build_path(FLAGS.summary_dir + param_setting)
    build_path('model/model_{}/{}'.format(FLAGS.dataname, param_setting))

    one_epoch_iter = len(
        train_idx
    ) / FLAGS.batch_size  # compute the number of iterations in each epoch
    print("one_epoch_iter:", one_epoch_iter)

    print('reading completed')
    # config the tensorflow
    session_config = tf.compat.v1.ConfigProto()
    session_config.gpu_options.allow_growth = True
    sess = tf.compat.v1.Session(config=session_config)

    print('showing the parameters...\n')

    # print all the hyper-parameters in the current training
    for key in FLAGS:
        print("%s\t%s" % (key, FLAGS[key].value))
    print()

    print('building network...')

    #building the model
    hg = model.MODEL(is_training=True)
    global_step = tf.Variable(0, name='global_step', trainable=False)

    learning_rate = tf.compat.v1.train.exponential_decay(
        FLAGS.learning_rate,
        global_step,
        one_epoch_iter * (FLAGS.max_epoch / FLAGS.lr_decay_times),
        FLAGS.lr_decay_ratio,
        staircase=True)

    #log the learning rate
    tf.compat.v1.summary.scalar('learning_rate', learning_rate)

    #use the Adam optimizer
    optimizer = tf.compat.v1.train.AdamOptimizer(learning_rate)
    reset_optimizer_op = tf.compat.v1.variables_initializer(
        optimizer.variables())

    #set training update ops/backpropagation
    var_x_encoder = tf.compat.v1.trainable_variables('feat_encoder')
    update_ops = tf.compat.v1.get_collection(tf.compat.v1.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        if FLAGS.resume:
            train_op = optimizer.minimize(hg.total_loss,
                                          var_list=var_x_encoder,
                                          global_step=global_step)
        else:
            train_op = optimizer.minimize(hg.total_loss,
                                          global_step=global_step)

    merged_summary = tf.compat.v1.summary.merge_all(
    )  # gather all summary nodes together
    summary_writer = tf.compat.v1.summary.FileWriter(
        FLAGS.summary_dir + param_setting + "/",
        sess.graph)  #initialize the summary writer

    sess.run(tf.compat.v1.global_variables_initializer()
             )  # initialize the global variables in tensorflow
    saver = tf.compat.v1.train.Saver(
        max_to_keep=FLAGS.max_keep)  #initializae the model saver

    if FLAGS.saved_ckpt != "":
        saver.restore(sess, FLAGS.saved_ckpt)

    print('building finished')

    #initialize several
    best_loss = 1e10
    best_iter = 0
    best_macro_f1 = 0.
    best_micro_f1 = 0.

    # smooth means average. Every batch has a mean loss value w.r.t. different losses
    smooth_nll_loss = 0.0  # label encoder decoder cross entropy loss
    smooth_nll_loss_x = 0.0  # feature encoder decoder cross entropy loss
    smooth_l2_loss = 0.0  # weights regularization
    smooth_c_loss = 0.0  # label encoder decoder ranking loss
    smooth_c_loss_x = 0.0  # feature encoder decoder ranking loss
    smooth_kl_loss = 0.0  # kl divergence
    smooth_total_loss = 0.0  # total loss
    smooth_macro_f1 = 0.0  # macro_f1 score
    smooth_micro_f1 = 0.0  # micro_f1 score

    best_macro_f1 = 0.0  # best macro f1 for ckpt selection in validation
    best_micro_f1 = 0.0  # best micro f1 for ckpt selection in validation
    best_acc = 0.0  # best subset acc for ckpt selction in validation

    temp_label = []
    temp_indiv_prob = []

    best_test_metrics = None

    # training the model
    for one_epoch in range(FLAGS.max_epoch):
        print('epoch ' + str(one_epoch + 1) + ' starts!')
        np.random.shuffle(train_idx)  # random shuffle the training indices

        for i in range(int(len(train_idx) / float(FLAGS.batch_size))):
            start = i * FLAGS.batch_size
            end = (i + 1) * FLAGS.batch_size
            input_feat = get_data.get_feat(
                data, train_idx[start:end])  # get the NLCD features
            input_label = get_data.get_label(
                data, train_idx[start:end])  # get the prediction labels

            #train the model for one step and log the training loss
            indiv_prob, nll_loss, nll_loss_x, l2_loss, c_loss, c_loss_x, kl_loss, total_loss, macro_f1, micro_f1 = train_step(
                sess, hg, merged_summary, summary_writer, input_label,
                input_feat, train_op, global_step)

            smooth_nll_loss += nll_loss
            smooth_nll_loss_x += nll_loss_x
            smooth_l2_loss += l2_loss
            smooth_c_loss += c_loss
            smooth_c_loss_x += c_loss_x
            smooth_kl_loss += kl_loss
            smooth_total_loss += total_loss
            smooth_macro_f1 += macro_f1
            smooth_micro_f1 += micro_f1

            temp_label.append(input_label)  #log the labels
            temp_indiv_prob.append(
                indiv_prob
            )  #log the individual prediction of the probability on each label

            current_step = sess.run(global_step)  #get the value of global_step
            lr = sess.run(learning_rate)
            summary_writer.add_summary(MakeSummary('learning_rate', lr),
                                       current_step)

            if current_step % FLAGS.check_freq == 0:  #summarize the current training status and print them out
                nll_loss = smooth_nll_loss / float(FLAGS.check_freq)
                nll_loss_x = smooth_nll_loss_x / float(FLAGS.check_freq)
                l2_loss = smooth_l2_loss / float(FLAGS.check_freq)
                c_loss = smooth_c_loss / float(FLAGS.check_freq)
                c_loss_x = smooth_c_loss_x / float(FLAGS.check_freq)
                kl_loss = smooth_kl_loss / float(FLAGS.check_freq)
                total_loss = smooth_total_loss / float(FLAGS.check_freq)
                macro_f1 = smooth_macro_f1 / float(FLAGS.check_freq)
                micro_f1 = smooth_micro_f1 / float(FLAGS.check_freq)

                temp_indiv_prob = np.reshape(np.array(temp_indiv_prob), (-1))
                temp_label = np.reshape(np.array(temp_label), (-1))

                temp_indiv_prob = np.reshape(temp_indiv_prob,
                                             (-1, FLAGS.label_dim))
                temp_label = np.reshape(temp_label, (-1, FLAGS.label_dim))

                time_str = datetime.datetime.now().isoformat()
                print(
                    "step=%d  %s\nlr=%.6f\nmacro_f1=%.6f, micro_f1=%.6f\nnll_loss=%.6f\tnll_loss_x=%.6f\tl2_loss=%.6f\nc_loss=%.6f\tc_loss_x=%.6f\tkl_loss=%.6f\ntotal_loss=%.6f\n"
                    %
                    (current_step, time_str, lr, macro_f1, micro_f1,
                     nll_loss * FLAGS.nll_coeff, nll_loss_x * FLAGS.nll_coeff,
                     l2_loss * FLAGS.l2_coeff, c_loss * FLAGS.c_coeff,
                     c_loss_x * FLAGS.c_coeff, kl_loss, total_loss))

                temp_indiv_prob = []
                temp_label = []

                smooth_nll_loss = 0
                smooth_nll_loss_x = 0
                smooth_l2_loss = 0
                smooth_c_loss = 0
                smooth_c_loss_x = 0
                smooth_kl_loss = 0
                smooth_total_loss = 0
                smooth_macro_f1 = 0
                smooth_micro_f1 = 0

            if current_step % int(
                    one_epoch_iter *
                    FLAGS.save_epoch) == 0:  #exam the model on validation set
                print("--------------------------------")
                # exam the model on validation set
                current_loss, val_metrics = validation_step(
                    sess, hg, data, merged_summary, summary_writer, valid_idx,
                    global_step, 'val')
                macro_f1, micro_f1 = val_metrics['maF1'], val_metrics['miF1']

                # select the best checkpoint based on some metric on the validation set
                # here we use macro F1 as the selection metric but one can use others
                if val_metrics['maF1'] > best_macro_f1:
                    print(
                        'macro_f1:%.6f, micro_f1:%.6f, nll_loss:%.6f, which is better than the previous best one!!!'
                        % (macro_f1, micro_f1, current_loss))

                    best_loss = current_loss
                    best_iter = current_step

                    print('saving model')
                    saved_model_path = saver.save(sess,
                                                  FLAGS.model_dir +
                                                  param_setting + '/model',
                                                  global_step=current_step)
                    print('have saved model to ', saved_model_path)
                    print()

                    if FLAGS.write_to_test_sh:
                        ckptFile = open(
                            FLAGS.test_sh_path.replace('ebird',
                                                       FLAGS.dataname), "r")
                        command = []
                        for line in ckptFile:
                            arg_lst = line.strip().split(' ')
                            for arg in arg_lst:
                                if 'model/model_{}/lr-'.format(
                                        FLAGS.dataname) in arg:
                                    command.append(
                                        'model/model_{}/{}/model-{}'.format(
                                            FLAGS.dataname, param_setting,
                                            best_iter))
                                else:
                                    command.append(arg)
                        ckptFile.close()

                        ckptFile = open(
                            FLAGS.test_sh_path.replace('ebird',
                                                       FLAGS.dataname), "w")
                        ckptFile.write(" ".join(command) + "\n")
                        ckptFile.close()
                best_macro_f1 = max(best_macro_f1, val_metrics['maF1'])
                best_micro_f1 = max(best_micro_f1, val_metrics['miF1'])
                best_acc = max(best_acc, val_metrics['ACC'])

                print("--------------------------------")
Esempio n. 10
0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 26 17:56:05 2018

@author: rahul
"""

import data
import model
import config
import utilities


if __name__ == "__main__":
    # READ DATA
    train_data = data.DATA()
    train_data.build_dataset(config.TRAIN_FILENAME)
    # BUILD MODEL
    net = model.MODEL()
    net.build()
    # TRAIN MODEL
    net.train(train_data)
    # PLOT EMBEDDINGS
    utilities.plot_with_labels(net.embeddings, train_data)
Esempio n. 11
0
        gen_config = config.SmallGenConfig()
        eval_config.batch_size = 1
        eval_config.num_steps = 1
        # READ DATA
        train_data = data.PTB_DATA()
        train_data.load_data(config.TRAIN_FILENAME, model_config.batch_size)
#        valid_data = data.PTB_DATA()
#        valid_data.load_data(config.VALIDATION_FILENAME, model_config.batch_size)
        test_data = data.PTB_DATA()
        test_data.load_data(config.TEST_FILENAME, eval_config.batch_size)
        # BUILD MODEL
        initializer = tf.random_uniform_initializer(-model_config.init_scale,
                                                    model_config.init_scale)
        with tf.name_scope("Train"):
            with tf.variable_scope("Model", reuse=None, initializer=initializer):
                train_model = model.MODEL(model_config, training=True)
                train_model.build()
#        with tf.name_scope("Validate"):
#            with tf.variable_scope("Model", reuse=True, initializer=initializer):
#                valid_model = model.MODEL(model_config, training=False)
#                valid_model.build()
        with tf.name_scope("Test"):
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                test_model = model.MODEL(eval_config, training=False)
                test_model.build()
        with tf.name_scope("Generate"):
            with tf.variable_scope("Model", reuse=True, initializer=initializer):
                gen_model = model.MODEL(gen_config, training=False)
                gen_model.build()
        # TRAIN MODEL
        model_name = os.path.join(config.MODEL_DIR, "model" + str(model_config.batch_size) + "_" + str(model_config.max_max_epoch) + ".ckpt")
Esempio n. 12
0
    train_config = config.TrainConfig()
    test_config = config.TestConfig()
    # LOAD DATA
    train_data = data.DATA(train_config)
    train_data.read_file(config.TRAIN_PATH, word_to_index)
    print("Train data Loaded")
    test_data = data.DATA(test_config)
    test_data.read_file(config.TEST_PATH, word_to_index)
    print("Test data Loaded")

    # BUILD MODEL
    #initializer = tf.random_uniform_initializer(train_config.init_scale, train_config.init_scale)
    with tf.name_scope("Train"):
        with tf.variable_scope("Model", reuse=None):
            train_model = model.MODEL(train_config,
                                      len(word_to_index),
                                      training=True)
            train_model.build()

    with tf.name_scope("Test"):
        with tf.variable_scope("Model", reuse=True):
            test_model = model.MODEL(test_config,
                                     len(word_to_index),
                                     training=False)
            test_model.build()
    print("Model Built")

    model_name = os.path.join(
        config.MODEL_DIR, "model" + str(train_config.BATCH_SIZE) + "_" +
        str(train_config.NUM_EPOCHS) + ".ckpt")
    #TRAIN MODEL
Esempio n. 13
0
    idx_map = 0
    for i, gate in enumerate(list_data['gates']):
        if gate['type'] == 'NOT':
            gate['mapping'] = best_circuit[idx_map]
            idx_map += 1

    # we save the score along with the name of the run
    score_str = str(round(best, 2))
    with open(f'{name}_{score_str}.pickle', 'wb') as handle:
        pickle.dump(list_data, handle, protocol=pickle.HIGHEST_PROTOCOL)


# create the circuit_mapping object
circuit_mapping = CircuitMapping(library_data)
# create the neural net
net = m.MODEL(20, library_data, args.path_json)

# run the evolution strategies (ES)
# note: if you want, you can play with the paramters.
# those seems to give reasonable results
es = EvolutionStrategy(
    net.model.get_weights(),
    get_reward,
    population_size=5,
    sigma=0.01,  # noise std deviation
    learning_rate=0.001,
    decay=0.995,
    num_threads=1)

es.run(args.n_epoch)
save_dict(args.path_json, args.name, verbose=True)
import model as m
import tensorflow as tf
import numpy as np

train_images = np.load('Data_pendulum.npy')
train_images = train_images.reshape(train_images.shape[0], 29, 29,
                                    train_images.shape[3], 1).astype('float32')

TRAIN_BUF = 16
BATCH_SIZE = 16
train_dataset = tf.data.Dataset.from_tensor_slices(train_images).shuffle(
    TRAIN_BUF).batch(BATCH_SIZE)

epochs = 10000
model = m.MODEL()
optimizer = tf.keras.optimizers.Adam(5e-4)

for epoch in range(1, epochs + 1):
    for train_x in train_dataset:
        m.compute_apply_gradients(
            model, train_x, optimizer,
            tf.constant(1. - 0.95 * np.exp(-epoch / 1000), dtype=tf.float32))

    if epoch % 500 == 0:
        loss = tf.keras.metrics.Mean()
        for test_x in train_dataset:
            loss(
                m.compute_loss(model, test_x, tf.constant(1.0,
                                                          dtype=tf.float32)))
        elbo = -loss.result()
Esempio n. 15
0
File: test.py Progetto: sk2299/MPVAE
def main(_):
    tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR)

    print('reading npy...')

    data = np.load(FLAGS.data_dir)
    test_idx = np.load(FLAGS.test_idx)

    print('reading completed')

    session_config = tf.compat.v1.ConfigProto()
    session_config.gpu_options.allow_growth = True
    sess = tf.compat.v1.Session(config=session_config)

    print('building network...')

    classifier = model.MODEL(is_training=False)
    global_step = tf.Variable(0, name='global_step', trainable=False)

    merged_summary = tf.summary.merge_all()
    summary_writer = tf.summary.FileWriter(FLAGS.summary_dir, sess.graph)

    saver = tf.train.Saver(max_to_keep=None)
    saver.restore(sess, FLAGS.checkpoint_path)
    model_id = FLAGS.checkpoint_path.split("-")[-1]

    print('restoring from ' + FLAGS.checkpoint_path)

    def test_step(test_idx, name="Test"):
        print('{}...'.format(name))
        all_nll_loss = 0
        all_l2_loss = 0
        all_c_loss = 0
        all_total_loss = 0

        all_indiv_prob = []
        all_label = []
        all_indiv_max = []

        sigma = []
        real_batch_size = min(FLAGS.testing_size, len(test_idx))

        N_test_batch = int((len(test_idx) - 1) / real_batch_size) + 1

        for i in range(N_test_batch):
            if i % 20 == 0:
                print("%.1f%% completed" % (i * 100.0 / N_test_batch))

            start = real_batch_size * i
            end = min(real_batch_size * (i + 1), len(test_idx))

            input_feat = get_data.get_feat(data, test_idx[start:end])
            input_label = get_data.get_label(data, test_idx[start:end])

            feed_dict = {}
            feed_dict[classifier.input_feat] = input_feat
            feed_dict[classifier.input_label] = input_label
            feed_dict[classifier.keep_prob] = 1.0

            nll_loss, l2_loss, c_loss, total_loss, indiv_prob, covariance = sess.run([classifier.nll_loss, classifier.l2_loss, classifier.c_loss, \
                classifier.total_loss, classifier.indiv_prob, classifier.covariance], feed_dict)

            all_nll_loss += nll_loss * (end - start)
            all_l2_loss += l2_loss * (end - start)
            all_c_loss += c_loss * (end - start)
            all_total_loss += total_loss * (end - start)

            if (all_indiv_prob == []):
                all_indiv_prob = indiv_prob
            else:
                all_indiv_prob = np.concatenate((all_indiv_prob, indiv_prob))

            if (all_label == []):
                all_label = input_label
            else:
                all_label = np.concatenate((all_label, input_label))

        nll_loss = all_nll_loss / len(test_idx)
        l2_loss = all_l2_loss / len(test_idx)
        c_loss = all_c_loss / len(test_idx)
        total_loss = all_total_loss / len(test_idx)
        return all_indiv_prob, all_label

    indiv_prob, input_label = test_step(test_idx, "Test")
    n_label = indiv_prob.shape[1]

    best_test_metrics = None
    for threshold in THRESHOLDS:
        test_metrics = evals.compute_metrics(indiv_prob,
                                             input_label,
                                             threshold,
                                             all_metrics=True)
        if best_test_metrics == None:
            best_test_metrics = {}
            for metric in METRICS:
                best_test_metrics[metric] = test_metrics[metric]
        else:
            for metric in METRICS:
                if 'FDR' in metric:
                    best_test_metrics[metric] = min(best_test_metrics[metric],
                                                    test_metrics[metric])
                else:
                    best_test_metrics[metric] = max(best_test_metrics[metric],
                                                    test_metrics[metric])

    print("****************")
    for metric in METRICS:
        print(metric, ":", best_test_metrics[metric])
    print("****************")
Esempio n. 16
0
from config import *
import data
import model


def defineArgs():
    """define args"""
    parser = argparse.ArgumentParser(description="Chinese_poem_generator.")
    parser.add_argument("-m",
                        "--mode",
                        help="select mode by 'train' or test or head",
                        choices=["train", "test", "head"],
                        default="test")
    return parser.parse_args()


if __name__ == "__main__":
    args = defineArgs()
    trainData = data.POEMS(trainPoems)
    SmartWriter = model.MODEL(trainData)
    if args.mode == "train":
        SmartWriter.train()
    else:
        if args.mode == "test":
            poems = SmartWriter.test()
        else:
            characters = input("please input chinese character:")
            poems = SmartWriter.testHead(characters)
Esempio n. 17
0
"""

import os
os.environ["CUDA_VISIBLE_DEVICES"]="1"
import data
import model
import config
import datetime

if __name__ == "__main__":
    with open(os.path.join(config.LOG_DIR, str(datetime.datetime.now().strftime("%Y%m%d")) + "_" + str(config.BATCH_SIZE) + "_" + str(config.NUM_EPOCHS) + ".txt"), "w") as log:
        log.write(str(datetime.datetime.now()) + "\n")
        log.write("Use Pretrained Weights: " + str(config.USE_PRETRAINED) + "\n")
        log.write("Pretrained Model: " + config.PRETRAINED + "\n")
        # READ DATA
        train_data = data.DATA(config.TRAIN_DIR)
        print("Train Data Loaded")
        # BUILD MODEL
        model = model.MODEL()
        print("Model Initialized")
        model.build()
        print("Model Built")
        # TRAIN MODEL
        model.train(train_data, log)
        print("Model Trained")
        # TEST MODEL
        test_data = data.DATA(config.TEST_DIR)
        print("Test Data Loaded")
        model.test(test_data, log)
        print("Image Reconstruction Done")
Esempio n. 18
0
from config import *
import data
import model

def defineArgs():
    """define args"""
    parser = argparse.ArgumentParser(description = "Chinese_poem_generator.")
    parser.add_argument("-m", "--mode", help = "select mode by 'train' or test or head",
                        choices = ["train", "test", "head"], default = "test")
    return parser.parse_args()

if __name__ == "__main__":
    args = defineArgs()
    trainData = data.POEMS(trainPoems)
    model = model.MODEL(trainData)
    if args.mode == "train":
        model.train()
    else:
        if args.mode == "test":
            poems = model.test()
        else:
            characters = input("please input chinese character:")
            poems = model.testHead(characters)