Exemplo n.º 1
0
def evaluate():
    with tf.Graph().as_default():

        #        log_dir = 'C://Users//kevin//Documents//tensorflow//VGG//logsvgg//train//'
        log_dir = './logs/train/'
        test_dir = '.'

        data, labels = input_data.read_data(data_dir=test_dir,
                                            is_train=False,
                                            batch_size=BATCH_SIZE,
                                            shuffle=False,
                                            n_test=n_test)

        logits = FNET.FNET(data,
                           N_CLASSES,
                           IS_PRETRAIN,
                           train=False,
                           droprate=1)
        correct = tools.num_correct_prediction(logits, labels)
        saver = tf.train.Saver(tf.global_variables())

        with tf.Session() as sess:

            print("Reading checkpoints...")
            ckpt = tf.train.get_checkpoint_state(log_dir)
            if ckpt and ckpt.model_checkpoint_path:
                global_step = ckpt.model_checkpoint_path.split('/')[-1].split(
                    '-')[-1]
                saver.restore(sess, ckpt.model_checkpoint_path)
                print('Loading success, global_step is %s' % global_step)
            else:
                print('No checkpoint file found')
                return

            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(sess=sess, coord=coord)

            try:
                print('\nEvaluating......')
                num_step = int(math.floor(n_test / BATCH_SIZE))
                num_sample = num_step * BATCH_SIZE
                step = 0
                total_correct = 0
                while step < num_step and not coord.should_stop():
                    batch_correct = sess.run(correct)
                    total_correct += np.sum(batch_correct)
                    step += 1
                print('Total testing samples: %d' % num_sample)
                print('Total correct predictions: %d' % total_correct)
                print('Average accuracy: %.2f%%' %
                      (100 * total_correct / num_sample))
            except Exception as e:
                coord.request_stop(e)
            finally:
                coord.request_stop()
                coord.join(threads)
Exemplo n.º 2
0
def main(_):
    count = [] # List of (word, count) for all the data
    word2idx = {} # Dict (word, ID) for all the data

    if not os.path.exists(FLAGS.checkpoint_dir):
      os.makedirs(FLAGS.checkpoint_dir)

    # Lists of word IDs
    if FLAGS.preloaded_data:
        with open('preloaded_telenor/train.pickle', 'rb') as f:
            train_data = pickle.load(f)
        with open('preloaded_telenor/val.pickle', 'rb') as f:
            valid_data = pickle.load(f)
            word2idx = pickle.load(f)
    else:
        train_data = read_data('%s/train.pickle' % FLAGS.data_dir, count, word2idx)
        valid_data = read_data('%s/val.pickle' % FLAGS.data_dir, count, word2idx)
        if FLAGS.is_test:
            test_data = read_data('%s/test.pickle' % FLAGS.data_dir, count, word2idx)

    idx2word = dict(zip(word2idx.values(), word2idx.keys()))
    FLAGS.nwords = len(word2idx)

    pp.pprint(flags.FLAGS.__flags)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:

        # Build the Memory Network
        model = MemN2N(FLAGS, sess)
        model.build_model()

        if len(FLAGS.infere) > 0:
            print('Make sure the training and validation data supplied are the same as during the training of the model (idx2word)')
            question = convert_question(FLAGS.infere, word2idx)
            model.infere(question, idx2word) # Prediction
        elif FLAGS.is_test:
            model.run(valid_data, test_data, idx2word) # Testing
        else:
            model.run(train_data, valid_data, idx2word) # Training
Exemplo n.º 3
0
def main():

    np.random.seed(42)

    with tf.Session() as sess:
        cond_var_auto_enc = cvae.ConditionalVariationalAutoencoder(
            sess, S_DIM, C_DIM, HIDDEN_DIM)
        saver = tf.train.Saver()
        saver.restore(sess, MODEL)

        all_cond_inputs, all_inputs = input_data.read_data()
        all_idx = range(len(all_inputs))
        np.random.shuffle(all_idx)

        test_num = int(0.2 * len(all_idx))
        test_cond_inputs = all_cond_inputs[all_idx[-test_num:], :]
        test_inputs = all_inputs[all_idx[-test_num:], :]
        test_idx = range(len(test_inputs))

        all_inputs_mean = []
        all_outputs_mean = []
        all_gen_mean = []

        # plot generating examples
        plot_ex = 6
        while True:
            np.random.shuffle(test_idx)
            fig = plt.figure(figsize=(14, 6))

            gs = gridspec.GridSpec(plot_ex, plot_ex)
            for i in xrange(plot_ex):
                for j in xrange(plot_ex):
                    idx = i * plot_ex + j

                    cond_inputs = test_cond_inputs[
                        test_idx[idx]:test_idx[idx] + 1, :]
                    inputs = test_inputs[test_idx[idx]:test_idx[idx] + 1, :]

                    ax = plt.subplot(gs[i:i + 1, j:j + 1])
                    ax.plot(range(C_DIM), cond_inputs[0], 'b')

                    for _ in xrange(20):
                        gen = cond_var_auto_enc.generate(cond_inputs)
                        ax.plot(range(C_DIM, C_DIM + S_DIM),
                                gen[0],
                                'r',
                                alpha=0.5)

                    ax.plot(range(C_DIM, C_DIM + S_DIM), inputs[0], 'b')

            plt.show()
Exemplo n.º 4
0
def main():

    data = read_data()
    img_data = data['train_images']
    img_labels = data['train_labels']

    knn = KNN(img_data, img_labels)

    test_img_data = random.choice(data['t10k_images'])
    show_img(test_img_data)
    result = knn.L2Classifier(test_img_data)
    print(result)
    pre_label = sorted(result, key=lambda x: result[x])[-1]
    print(pre_label)
Exemplo n.º 5
0
def main():

    data = read_data()
    learning_rate = 0.001
    max_steps = 10000
    height = 28
    width = 28

    X = tf.placeholder(tf.float32, [None, height * width])
    Y = tf.placeholder(tf.float32, [None, 10])

    w = tf.Variable(tf.random_normal([height * width, 10]))
    b = tf.Variable(tf.random_normal([10]))

    out = tf.add(tf.matmul(X, w), b)
    norm = tf.contrib.slim.batch_norm(out, is_training=True)
    Y_out = tf.nn.softmax(norm)

    cross_entropy = -tf.reduce_sum(
        Y * tf.log(tf.clip_by_value(Y_out, 1e-8, tf.reduce_max(Y_out))))
    train_op = tf.train.AdamOptimizer(learning_rate).minimize(cross_entropy)

    correct_prediction = tf.equal(tf.argmax(Y, 1), tf.argmax(Y_out, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, "float"))

    init = tf.global_variables_initializer()

    with tf.Session() as sess:

        sess.run(init)

        for i in range(1, max_steps):
            x, y = data['train_images'], data['train_labels']
            loss, _ = sess.run([cross_entropy, train_op],
                               feed_dict={
                                   X: x,
                                   Y: y
                               })

            if i % 10 == 0:
                print('step:{} cross_enrropy:{}'.format(i, loss))

            if i % 100 == 0:
                x, y = data['t10k_images'], data['t10k_labels']
                acc = sess.run(accuracy, feed_dict={X: x, Y: y})
                print('accuracy:{}'.format(acc))
                if acc > 0.9:
                    break
Exemplo n.º 6
0
def run_training():
    """train model for a number of steps"""
    print(time.strftime("%Y-%m-%d %H:%M:%S") + "  start reading data")
    data_sets = input_data.read_data("invited_info_trainoutput.txt")
    print(time.strftime("%Y-%m-%d %H:%M:%S") + "  end reading data")
    with tf.Graph().as_default():
        docs_placeholder, labels_placeholder, keep_prob_placeholder = placeholder_inputs(
            FLAGS.batch_size)
        logits = model.inference(docs_placeholder, FLAGS.hidden1,
                                 FLAGS.hidden2, keep_prob_placeholder)
        loss = model.loss(logits, labels_placeholder)
        train_op = model.training(loss, FLAGS.learning_rate)
        eval_correct = model.evaluation(logits, labels_placeholder)
        summary_op = tf.merge_all_summaries()
        init = tf.initialize_all_variables()
        saver = tf.train.Saver()
        sess = tf.Session()
        summary_writer = tf.train.SummaryWriter(FLAGS.train_dir, sess.graph)
        sess.run(init)
        for step in range(FLAGS.max_steps):
            start_time = time.time()
            feed_dict = fill_feed_dict(data_sets.train, docs_placeholder,
                                       labels_placeholder,
                                       keep_prob_placeholder, 0.5)
            _, loss_value = sess.run([train_op, loss], feed_dict)
            duration = time.time() - start_time

            if step % 100 == 0:
                print('Step %d: loss = %.2f (%.3f sec)' %
                      (step, loss_value, duration))
                # Update the events file.
                summary_str = sess.run(summary_op, feed_dict=feed_dict)
                summary_writer.add_summary(summary_str, step)
                summary_writer.flush()
            if (step + 1) % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_file = os.path.join(FLAGS.train_dir, 'checkpoint')
                saver.save(sess, checkpoint_file, global_step=step)
                # Evaluate against the training set.
                print('Training Data Eval:')
                do_eval(sess, eval_correct, docs_placeholder,
                        labels_placeholder, keep_prob_placeholder,
                        data_sets.train)
                # Evaluate against the validation set.
                print('Validation Data Eval:')
                do_eval(sess, eval_correct, docs_placeholder,
                        labels_placeholder, keep_prob_placeholder,
                        data_sets.validation)
Exemplo n.º 7
0
Arquivo: train.py Projeto: rajat95/vae
def main():

    np.random.seed(42)

    with tf.Session() as sess:
        cond_var_auto_enc = cvae.ConditionalVariationalAutoencoder(
            sess, S_DIM, C_DIM, HIDDEN_DIM)
        saver = tf.train.Saver()

        all_cond_inputs, all_inputs = input_data.read_data()
        all_idx = range(len(all_inputs))
        np.random.shuffle(all_idx)

        train_num = int(0.8 * len(all_idx))
        train_cond_inputs = all_cond_inputs[all_idx[:train_num], :]
        train_inputs = all_inputs[all_idx[:train_num], :]
        train_idx = range(len(train_inputs))

        test_num = int(0.2 * len(all_idx))
        test_cond_inputs = all_cond_inputs[all_idx[-test_num:], :]
        test_inputs = all_inputs[all_idx[-test_num:], :]

        for ep in xrange(TRAIN_EPOCHS):

            np.random.shuffle(train_idx)
            steps = int(len(train_idx) / BATCH_SIZE)
            train_loss = 0

            for i in xrange(steps):
                cond_inputs = train_cond_inputs[train_idx[BATCH_SIZE *
                                                          i:BATCH_SIZE *
                                                          (i + 1)], :]
                inputs = train_inputs[train_idx[BATCH_SIZE * i:BATCH_SIZE *
                                                (i + 1)], :]
                train_loss += cond_var_auto_enc.train(inputs, cond_inputs)

            train_loss /= float(steps)

            _, test_loss = cond_var_auto_enc.reconstruct(
                test_inputs, test_cond_inputs)

            print 'epoch %d train_loss %0.3f test_loss %0.3f' % \
                (ep, train_loss, test_loss)

            save_path = saver.save(
                sess, MODEL_SAVE_PATH + "nn_model_" + str(ep) + ".ckpt")
Exemplo n.º 8
0
def get_pre_acc(num=100):

    data = read_data()
    img_data = data['train_images']
    img_labels = data['train_labels']

    knn = KNN(img_data, img_labels)

    acc = 0
    for i in range(num):
        a = random.randint(0, len(data['t10k_labels']))
        test_img_data = data['t10k_images'][a]
        test_img_label = data['t10k_labels'][a]
        result = knn.L2Classifier(test_img_data)
        pre_label = sorted(result, key=lambda x: result[x])[-1]
        test_label = get_label(test_img_label)
        acc += (pre_label == test_label)
    print(acc / num)
Exemplo n.º 9
0
# MILP
time_limit_mip = 3600         # time limit in seconds for mip_LB
opt_tol_mip = 0.005

# grid
dist_min = 0.2                    # arbitrary
p_x = 2                         # start grid with p_x*p_y partitions
p_y = 2
n_x = 2                         # adds p_x + n_x and p_y + n_y in each iteration of the bilevel decomposition
n_y = 2

# ################################################################################################################

# Input data
data = read_data(datafolder)

# Create minlp model
minlp = create_multiperiod_minlp(data, dist_min, [])
# nlpsolver = SolverFactory('gams')
# nlpsolver.solve(minlp,
#                 tee=True,
#                 add_options=['option reslim=3600; option optcr = 0.02;'],
#                 # keepfiles=True,
#                 solver='antigone',
#                 load_solutions=True
#                 )
# minlp.w.pprint()
# minlp.fac_x.pprint()
# minlp.fac_y.pprint()
Exemplo n.º 10
0
import os

#train_dir = 'Lhand'
# define some constants
LR = 0.001  # define learning rate
batch_size = 16  # define batch size
in_size = 200 * 200  # define input size
out_size = 24  # define output size
dropout = 0.5  # the probability to dropout
epoch = 50
train_data_dir = '/home/viplab/Desktop/petersci/CEDL_HW1/data/frames/train/'
label_dir = '/home/viplab/Desktop/petersci/CEDL_HW1/data/labels/'
test_data_dir = '/home/viplab/Desktop/petersci/CEDL_HW1/data/frames/test/'
save_dir = '/home/viplab/Desktop/petersci/CEDL_HW1/code/checkpoints/'
image_list, label_list = input_data.read_data(train_data_dir,
                                              label_dir,
                                              train=True)
test_image, test_label = input_data.read_data(test_data_dir,
                                              label_dir,
                                              train=False)
N_sample = len(image_list)

batch_xs, batch_ys = input_data.batch_generate(image_list, label_list, 200,
                                               200, batch_size, 3000)
test_batch_xs, test_batch_ys = input_data.batch_generate(
    test_image, test_label, 200, 200, batch_size, 3000)
# define placeholder for inputs to network
xs = tf.placeholder(tf.float32, [None, 200, 200, 3])
ys = tf.placeholder(tf.float32, [None, out_size])
keep_prob = tf.placeholder(tf.float32)
logits = rgb_logits + flow_logits

y = tf.nn.softmax(logits)
y_ = tf.placeholder(dtype=tf.float32, shape=[None, 101])

cross_entropy = -tf.reduce_sum(y_ * tf.log(tf.clip_by_value(y, 1e-7, 1)))
train_step = tf.train.AdamOptimizer(1e-3).minimize(cross_entropy)

correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

sess = tf.Session()
sess.run(tf.global_variables_initializer())
ckpt_util.restore(sess)
one_element = input_data.read_data(sess)
for i in range(3000):
    element = sess.run(one_element)
    rgb_datas = element[0]
    flow_datas = element[1]
    labels = input_data.one_hot([b.decode() for b in element[-1].tolist()])
    sess.run(train_step,
             feed_dict={
                 rgb_input: rgb_datas,
                 flow_input: flow_datas,
                 y_: labels,
                 keep_prob: 0.6
             })
    if i % 50 == 0:
        element = sess.run(one_element)
        rgb_datas = element[0]
Exemplo n.º 12
0
# https://qiita.com/taigamikami/items/6c69fc813940f838e96c

import tensorflow as tf
# NumPyは、データのロード、操作、および前処理によく使用されます。
import numpy as np
import matplotlib.pyplot as plt
import input_data

# ====================================
# 訓練用のデータ
# ====================================
data = input_data.read_data("train")
x_train = data.T[0]
y_train = data.T[1]

#
batch_size = len(x_train)
print("batch_size: %d" % batch_size)

input_fn = tf.estimator.inputs.numpy_input_fn({"x": x_train},
                                              y_train,
                                              batch_size=batch_size,
                                              num_epochs=None,
                                              shuffle=True)

train_input_fn = tf.estimator.inputs.numpy_input_fn({"x": x_train},
                                                    y_train,
                                                    batch_size=batch_size,
                                                    num_epochs=1000,
                                                    shuffle=False)
Exemplo n.º 13
0
from input_data import read_data
from Point import *
from algorithms import *

files = [
    "./data/instances/mona_1000.txt", "./data/instances/lu980.txt",
    "./data/instances/ja_1000.txt", "./data/instances/random_1.txt",
    "./data/instances/random_2.txt", "./data/instances/random_3.txt"
]
for file in files:
    data = read_data(file)
    matrix_distance = [[Distance(point1, point2) for point2 in data]
                       for point1 in data]

    solution = iterated_local_search(greedy(matrix_distance), amount_iter=20)

    cost = solution.get_cost() + Distance.distance(solution.get_path()[-1],
                                                   solution.get_path()[0])
    print(f"{file}: {solution}")
    print(f"{file}: {cost}")
Exemplo n.º 14
0
#coding=utf-8

import tensorflow as tf
from input_data import read_data
# 数据文件夹
data_dir = "G:/org_img/change_img"

# 训练还是测试
train = True
#train = None
# 模型文件路径
model_path = "G:/model/child_image_model"

#将文件夹中数据读取并处理
fpaths, datas, labels = read_data(data_dir)

# 计算有多少类图片,作为输出的大小(维度)
num_classes = len(set(labels))

# 定义Placeholder,存放输入和标签
#设置训练图片为32*32
input_datas = tf.placeholder(tf.float32, [None, 128, 128, 3])
input_labels = tf.placeholder(tf.int32, [None])
# 存放DropOut参数的容器,训练时为0.25,测试时为0
# 使用DropOut参数,控制整个cnn网络的冗杂度,防止过拟合
dropout_placeholdr = tf.placeholder(tf.float32)


def create_conv(input_placeholder):

    # 定义卷积层0, 20个卷积核, 卷积核大小为5,使用激活函数Relu激活
def training(train_size, test_size, cv_size, training_batch_size, training_rate):
	print train_size, test_size, cv_size, training_batch_size, training_rate
	import input_data
	gomoku = input_data.read_data(train_size, cv_size, test_size, 2);
	if tf.gfile.Exists('tmp/pSigma_white'):
		tf.gfile.DeleteRecursively('tmp/pSigma_white')
	tf.gfile.MakeDirs('tmp/pSigma_white')

	# placehold and input
	with tf.name_scope('input'):
		x = tf.placeholder(tf.float32, [None, 15, 15, 5], 'input_layer')
		y_ = tf.placeholder(tf.float32, [None, 15 * 15], 'y_label')

	batch_size = training_batch_size
	# print type(t)
	# print batch_size
	# neural network
	hidden_conv_layer1 = nn_layer(batch_size, x, [5, 5, 5, 384], 384, 'conv_layer_1')
	hidden_conv_layer2 = nn_layer(batch_size, hidden_conv_layer1, [3, 3, 384, 48], 48, 'conv_layer_2')
	hidden_conv_layer3 = nn_layer(batch_size, hidden_conv_layer2, [3, 3, 48, 48], 48, 'conv_layer_3')
	hidden_conv_layer4 = nn_layer(batch_size, hidden_conv_layer3, [3, 3, 48, 48], 48, 'conv_layer_4')
	hidden_conv_layer5 = nn_layer(batch_size, hidden_conv_layer4, [3, 3, 48, 48], 48, 'conv_layer_5')
	hidden_conv_layer6 = nn_layer(batch_size, hidden_conv_layer5, [3, 3, 48, 48], 48, 'conv_layer_6')
	y = nn_layer(batch_size, hidden_conv_layer6, [1, 1, 48, 1], 1, 'output_layer', tf.nn.softmax, True)

	# results
	with tf.name_scope('results'):
		loss = tf.reduce_mean(-tf.reduce_mean(y_ * tf.log(tf.clip_by_value(y, 1e-10, 1.0)), reduction_indices = 1))
		# loss = tf.reduce_mean(-tf.reduce_mean(y_ * tf.log(y), reduction_indices = 1));
		tf.scalar_summary('loss', loss)

	with tf.name_scope('evaluation'):
		with tf.name_scope('correct_prediction'):
			correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
		with tf.name_scope('accuracy'):
			accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
		tf.scalar_summary('accuracy', accuracy)

	# initialize
	order = 'o'
	'''while ((order != 'y') & (order != 'N')):
		order = raw_input('Continue the train last time? [y/N]')'''
	order = 'N'

	rec = []
	for k in range(10):
		training_rate = training_rate / 2.0;
		saver = tf.train.Saver(variable_set)
		train_step = tf.train.AdamOptimizer(training_rate).minimize(loss)
		init = tf.initialize_all_variables()
		merged = tf.merge_all_summaries()
		
		max_acc = 0

		# train
		with tf.Session() as sess:
			train_writer = tf.train.SummaryWriter('tmp/pSigma_white/train', sess.graph)
			test_writer = tf.train.SummaryWriter('tmp/pSigma_white/test', sess.graph)

			sess.run(init)
			# if (order == 'y'):
			if (k > 0):
				saver.restore(sess, 'tmp/pSigma_white.ckpt')

			for i in range(40001):
				if (i % 100 == 0):
					sacc = 0
					slos = 0

					for j in range(cv_size / training_batch_size):
						batch_xs, batch_ys = gomoku.crossvalidation.next_batch(training_batch_size)
						summary, acc, los = sess.run([merged, accuracy, loss], feed_dict={x: batch_xs, y_: batch_ys})
						sacc = sacc + acc
						slos = slos + los

					av_acc = sacc / (cv_size / training_batch_size)
					av_los = slos / (cv_size / training_batch_size)

					rec.append([av_acc, av_los])

					test_writer.add_summary(summary, i)
					#if ((i % 250 == 0) | ((i % 50 == 0) & (i < 250))):
					print 'Step %s: Accuracy(%s), Loss(%s)' % (i + k * 40000, av_acc, av_los)
					if (sacc > max_acc):
						max_acc = acc
					save_path = saver.save(sess, 'tmp/pSigma_white.ckpt')
					# print 'successfully saved in path', save_path

					file_ob = open('traing.txt', 'w+')
					for j in range(len(rec)):
						file_ob.write('Step %s: Accuracy(%s), Loss(%s)\n' % (j * 100, rec[j][0], rec[j][1]))
					file_ob.close()
				else:
					batch_xs, batch_ys = gomoku.train.next_batch(training_batch_size)
					summary, _, yt, lossv  = sess.run([merged, train_step, y, loss], feed_dict={x: batch_xs, y_: batch_ys})
					train_writer.add_summary(summary, i)
Exemplo n.º 16
0
def train():

    data_dir = '.'
    train_log_dir = './logs/train/'
    val_log_dir = './logs/val/'

    with tf.name_scope('input'):
        tra_data_batch, tra_label_batch = input_data.read_data(
            data_dir=data_dir,
            is_train=True,
            batch_size=BATCH_SIZE,
            shuffle=True)
        val_data_batch, val_label_batch = input_data.read_data(
            data_dir=data_dir,
            is_train=False,
            batch_size=BATCH_SIZE,
            shuffle=False)

    x = tf.placeholder(tf.float32, shape=[BATCH_SIZE, 30])
    y_ = tf.placeholder(tf.int16, shape=[BATCH_SIZE, N_CLASSES])

    logits = FNET.FNET(x, N_CLASSES, IS_PRETRAIN, train=True, droprate=0.6)
    loss = tools.loss(logits, y_)
    accuracy = tools.accuracy(logits, y_)

    my_global_step = tf.Variable(0, name='global_step', trainable=False)
    train_op = tools.optimize(loss, learning_rate, my_global_step)

    saver = tf.train.Saver(tf.global_variables())
    #summary_op = tf.summary.merge_all()

    init = tf.global_variables_initializer()
    sess = tf.Session()
    sess.run(init)

    coord = tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
    #tra_summary_writer = tf.summary.FileWriter(train_log_dir, sess.graph)
    #val_summary_writer = tf.summary.FileWriter(val_log_dir, sess.graph)
    numk = 3000 / 100
    numk = int(numk)
    bestaka = 0
    try:
        for step in np.arange(MAX_STEP):
            if coord.should_stop():
                break

            tra_images, tra_labels = sess.run(
                [tra_data_batch, tra_label_batch])
            _, tra_loss, tra_acc, llg = sess.run(
                [train_op, loss, accuracy, logits],
                feed_dict={
                    x: tra_images,
                    y_: tra_labels
                })
            if step % 20 == 0 or (step + 1) == MAX_STEP:
                print('Step: %d, loss: %.4f, accuracy: %.4f%%' %
                      (step, tra_loss, tra_acc))
                #summary_str = sess.run(summary_op)
                #tra_summary_writer.add_summary(summary_str, step)

            if step % 400 == 0 or (step + 1) == MAX_STEP:
                val_images, val_labels = sess.run(
                    [val_data_batch, val_label_batch])
                val_loss, val_acc = sess.run([loss, accuracy],
                                             feed_dict={
                                                 x: val_images,
                                                 y_: val_labels
                                             })
                print(
                    '**  Step %d, val loss = %.2f, val accuracy = %.2f%%  **' %
                    (step, val_loss, val_acc))

                #summary_str = sess.run(summary_op)
                #val_summary_writer.add_summary(summary_str, step)

            if step % 400 == 0:
                for i in llg:
                    print(i)

            if step % 800 == 0 or (step + 1) == MAX_STEP and step != 0:
                checkpoint_path = os.path.join(train_log_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
            if step % 600 == 0 and step != 0:
                aka = 0
                for ii in range(numk):
                    val_images, val_labels = sess.run(
                        [val_data_batch, val_label_batch])
                    val_loss, val_acc = sess.run([loss, accuracy],
                                                 feed_dict={
                                                     x: val_images,
                                                     y_: val_labels
                                                 })
                    aka += val_acc
                aka = aka / numk
                print('*****test accuracy = %.3f%% ***' % (aka))
                if (aka > bestaka):
                    bestaka = aka
                    checkpoint_path = os.path.join("./logs/train_best",
                                                   'model.ckpt')
                    saver.save(sess, checkpoint_path, global_step=step)

            if step == int(0.08 * MAX_STEP):
                train_op = tools.optimize(loss, 0.002, my_global_step)
            if step == int(0.24 * MAX_STEP):
                train_op = tools.optimize(loss, 0.0004, my_global_step)
            if step == int(0.4 * MAX_STEP):
                train_op = tools.optimize(loss, 0.0001, my_global_step)
            if step == int(0.6 * MAX_STEP):
                train_op = tools.optimize(loss, 0.00001, my_global_step)
    except tf.errors.OutOfRangeError:
        print('Done training -- epoch limit reached')
    finally:
        coord.request_stop()

    coord.join(threads)
    sess.close()
Exemplo n.º 17
0
import tensorflow as tf
from setting import BATCH_SIZE, CORPUS_FILENAME
from input_data import read_data, tokenize
from sklearn.model_selection import train_test_split

# read dataset
en, kr = read_data(CORPUS_FILENAME)
en_seq, en_tok, en_wc = tokenize(en)
kr_seq, kr_tok, kr_wc = tokenize(kr)
en_seq_train, en_seq_val, kr_seq_train, kr_seq_val = train_test_split(en_seq, kr_seq, test_size=0.1)

# make tf.dataset
BUFFER_SIZE = len(en_seq_train)
dataset = tf.data.Dataset.from_tensor_slices((en_seq_train, kr_seq_train)).shuffle(BUFFER_SIZE)
dataset = dataset.batch(BATCH_SIZE, drop_remainder=True)