Ejemplo n.º 1
0
def train(model, epoachs, train_x, train_y, all_len, test_x, test_y):
    sess = tf.Session()
    sess.run(tf.global_variables_initializer())
    #
    for epoach in range(epoachs):
        iter = 0
        test(sess, model, test_x, test_y)
        batchs = data_helper.get_batch(64, train_x, train_y, all_len)
        for batch_x, batch_y, batch_len in batchs:
            sess.run(model.train_op,
                     feed_dict={
                         model.inputs: batch_x,
                         model.y: batch_y,
                         model.keep_prob: 0.8
                     })
            y_pred, train_acc, train_loss = sess.run(
                [model.pred, model.acc, model.loss],
                feed_dict={
                    model.inputs: batch_x,
                    model.y: batch_y,
                    model.keep_prob: 1.0
                })
            if iter % 100 == 0:
                print("{0} epoach {1} iters run train acc: {2} train loss:{3}".
                      format(epoach, iter, train_acc, train_loss))
                if iter % 200 == 0:
                    print("pred value:", y_pred)
            iter += 1
Ejemplo n.º 2
0
def train():
    train_x, train_y, words_dict, labels_dict, seqlen_all = data_helper.load("train.txt", 10000, 35)
    test_x, test_y, seqlen_test = data_helper.load_test_data("test_filter_2.txt", seqlen, words_dict, labels_dict)
    model = bilstm_text(voc_size,batch_size,seqlen,n_class,embedding_size,learn_rate)
    op_pred = model.pred
    op_loss = model.loss
    op_train = model.train_op
    op_acc = model.acc
    sess = tf.Session()
    init = tf.initialize_all_variables()
    sess.run(init)
    epoachs = 50
    cnt = 0

    for epoach in range(epoachs):
        batchs = data_helper.get_batch(64, train_x, train_y, seqlen_all)
        for batch_x,batch_y, batch_len in batchs:
            [_,train_acc] = sess.run([op_train,op_acc],feed_dict={model.inputs:batch_x,model.outputs:batch_y,model.seqlen_hdr:batch_len})
            print("{0} epoach {1} iters acc = {2}".format(epoach,cnt,train_acc))
            if cnt % 50 == 0:
                tmp_pred = sess.run(op_pred,feed_dict={model.inputs:batch_x,model.outputs:batch_y,model.seqlen_hdr:batch_len})
                print(tmp_pred)
                test(model, test_x, test_y, seqlen_test)
            cnt += 1
        print("---------test----------------")
        test(model,test_x, test_y, seqlen_test)
Ejemplo n.º 3
0
def main(_):
    # build model
    model = Model("train")
    model.build()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)

        if os.path.exists(os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
            model_file = tf.train.latest_checkpoint(os.path.join(cfg.model_dir, model.nickname))
            saver.restore(sess, model_file)
        else:
            if not os.path.exists(os.path.join(cfg.model_dir, model.nickname)):
                os.makedirs(os.path.join(cfg.model_dir, model.nickname))
        # training loop
        for epoch in range(cfg.epochs):
            # iterate the whole dataset n epochs
            print("iterate the whole dataset {} epochs".format(cfg.epochs))
            for i, samples in enumerate(get_batch(os.path.join(cfg.train_dir, cfg.data_filename), cfg.batch_size, True)):
                batch_syn, batch_bg = samples
                step = tf.train.global_step(sess, model.global_step)
                batch_syn = np.asarray(batch_syn, "float32")
                batch_bg = np.asarray(batch_bg, "float32")
                feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}

                if step % cfg.num_steps_per_display == 0:
                    _, lr, total_loss, mse, ssim, psnr = sess.run([model.train_op, model.lr, model.total_loss, model.mse,
                                                                   model.ssim, model.psnr],
                                                                   feed_dict=feed_dict)
                    print("[{}/{}] lr: {:.8f}, total_loss: {:.6f}, mse: {:.6f}, ssim: {:.4f}, "
                          "psnr: {:.4f}".format(epoch, step, lr, total_loss, mse, ssim, psnr))
                else:
                    sess.run(model.train_op, feed_dict=feed_dict)
            saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.epoch-{}'.format(epoch)))
        saver.save(sess, os.path.join(cfg.model_dir, model.nickname, 'model.final-{}'.format(cfg.epochs)))
        print(" ------ Arriving at the end of data ------ ")
Ejemplo n.º 4
0
def main(_):
    # build model
    model = Model("eval")
    model.build()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        saver = tf.train.Saver(max_to_keep=cfg.max_checkpoints_to_keep)

        if os.path.exists(
                os.path.join(cfg.model_dir, model.nickname, "checkpoint")):
            model_file = tf.train.latest_checkpoint(
                os.path.join(cfg.model_dir, model.nickname))
            saver.restore(sess, model_file)
        else:
            exit()

        ssim_list = list()
        psnr_list = list()
        mse_list = list()
        time_list = list()
        for batch_syn, batch_bg in tqdm(
                get_batch(os.path.join(cfg.test_dir, cfg.data_filename),
                          cfg.batch_size)):
            batch_syn = np.asarray(batch_syn, "float32")
            batch_bg = np.asarray(batch_bg, "float32")
            feed_dict = {model.bg_img: batch_bg, model.syn_img: batch_syn}

            start = time()
            mse, ssim, psnr = sess.run([model.mse, model.ssim, model.psnr],
                                       feed_dict=feed_dict)
            end = time()

            ssim_list.append(ssim)
            psnr_list.append(psnr)
            mse_list.append(mse)
            time_list.append(end - start)

        avg_ssim = np.mean(ssim_list)
        avg_psnr = np.mean(psnr_list)
        avg_mse = np.mean(mse_list)
        avg_time = np.mean(time_list) / cfg.batch_size

        if not os.path.exists(cfg.metric_dir):
            os.makedirs(cfg.metric_dir)

        with open(os.path.join(cfg.metric_dir, 'metrics.txt'), 'a') as f:
            f.write("os:\t{}\t\t\tdate:\t{}\n".format(platform.system(),
                                                      datetime.now()))
            f.write("model:\t{}\t\timage_size:\t{}\n".format(
                model.nickname, cfg.crop_size))
            f.write("data:\t{}\t\tgpu_id:\t{}\n".format(
                cfg.data_filename, cfg.gpu_id))
            f.write("speed:\t{:.8f} s/item\tmse:\t{:.8f}\n".format(
                avg_time, avg_mse))
            f.write("ssim:\t{:.8f}\t\tpsnr:\t{:.8f}\n\n".format(
                avg_ssim, avg_psnr))

        print(" ------ Arriving at the end of data ------ ")
Ejemplo n.º 5
0
outputs = tf.nn.softmax(tf.matmul(h_drop,full_W)+full_B)
pred = tf.argmax(outputs,1)
loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=outputs,labels=labels))
acc = tf.reduce_mean(tf.cast(tf.equal(pred,tf.argmax(labels,1)),tf.float32))
train_op = tf.train.AdamOptimizer(learning_rate).minimize(loss)
sess = tf.Session()
sess.run(tf.global_variables_initializer())

train_x, train_y, words_dict, labels_dict, all_len = data_helper.load("data/train.txt",1000,s_limit_len)
test_x,test_y, testlen =  data_helper.load_test_data("data/test_filter_2.txt",s_limit_len,words_dict,labels_dict)

def test(sess,acc,pred,tes_x,test_y):
    y_pred, acc_test = sess.run([pred,acc],feed_dict={inputs:test_x,labels:test_y,keep_prob:1.0})
    y_true = sess.run(tf.argmax(test_y,1))

    print(metrics.classification_report(y_true,y_pred))



for epoach in range(1000):
    iter = 0
    test(sess,acc,pred,test_x,test_y)
    batchs = data_helper.get_batch(64,train_x,train_y,all_len)
    for [batch_x,batch_y,batch_len] in batchs:
        _,loss_,acc_,pred_list = sess.run([train_op,loss,acc,pred],feed_dict={inputs:batch_x, labels:batch_y,keep_prob:0.5})
        if iter % 50 == 0:
            print(pred_list[:15])
            print("epoach-{0} iter-{1} loss:{2} acc-{3}".format(epoach,iter,loss_,acc_))
        # print(acc_)
        iter += 1
Ejemplo n.º 6
0
## now test if it wokrs at all, try one step

with tf.Session() as sess:
    saver = tf.train.Saver(max_to_keep=5)
    sess.run(tf.global_variables_initializer())
    lattest_ckpt = tf.train.latest_checkpoint(config.CPT_PATH)
    if lattest_ckpt is not None:
        saver.restore(sess, os.path.join(lattest_ckpt))
        print("Model restored.")

    losses = list()
    for e in range(1, config.epochs + 1):
        #shuffle(batches)  # for debuging purpose, don't randomize batches for now
        for idx, ids in enumerate(batches, 1):
            #ids = [18948, 18949, 18950, 18953, 18954, 18957, 18958, 18959]
            pad_encoder_batch, pad_decoder_batch, source_lengths, target_lengths, hrnn_lengths = helper.get_batch(
                train_enc_tokens, train_dec_tokens, vocab_to_int, ids)
            ## for debuging
            #pad_encoder_batch,pad_decoder_batch,source_lengths,target_lengths,hrnn_lengths,max_length = pickle.load(open('debug.p','rb'))
            if target_lengths[0] > config.max_target_sentence_length: continue
            try:
                _, loss = sess.run(
                    [train_op, cost], {
                        input_data: pad_encoder_batch,
                        targets: pad_decoder_batch,
                        lr: config.learning_rate,
                        target_sequence_length: target_lengths,
                        source_sequence_length: source_lengths,
                        keep_prob: config.keep_probability,
                        hrnn_sequence_length: hrnn_lengths
                    })
                losses.append(loss)
Ejemplo n.º 7
0
init = tf.global_variables_initializer()
sess.run(init)


def test(sess, acc, test_x, test_y, test_len):
    acc_test = sess.run(acc,
                        feed_dict={
                            inputs: test_x,
                            outputs: test_y,
                            seqlen_hdr: test_len
                        })
    print("test acc:", acc_test)


for epoach in range(200):
    batchs = data_helper.get_batch(64, train_x, train_y, seqlen_all)
    cnt = 0
    for batch_x, batch_y, batch_len in batchs:

        # batch_label_oh = sess.run(tf.one_hot(batch_y,n_class))
        _, train_loss, train_acc = sess.run([train_op, loss, acc],
                                            feed_dict={
                                                inputs: batch_x,
                                                outputs: batch_y,
                                                seqlen_hdr: batch_len
                                            })
        print("iters {0} train-loss {1} train_acc{2}:".format(
            cnt, train_loss, train_acc))
        # print("loss:",train_loss)
        # print(len(train_x))
        cnt += 1