Esempio n. 1
0
def main(*args):
	# Hyper parameters
	learning_rate = 0.001
	training_steps = 10000
	valid_step = 50
	cell_size = 256
	num_rnn_layers = 1

	dataset = DataSet(FLAGS.dataset)
	model = Model(dataset.samples_shape[1],
				  dataset.labels_shape[1],
				  dataset.labels_shape[2],
				  cell_size,
				  num_rnn_layers,
				  learning_rate,
				  cell_type='lstm')

	with tf.Session() as sess:
		tf.global_variables_initializer().run()
		loss = []
		for step in range(training_steps):
			train_samples, train_labels, train_weights = dataset.get_batch(FLAGS.batch_size, 'train')
			train_labels_T = np.transpose(train_labels, (1, 0, 2))
			_loss, prediction = model.step(train_samples, train_labels_T, train_weights, sess)
			# loss.append(_loss)
			if (step % valid_step) == 0:
				# print("Average training loss: %s" % np.mean(loss))
				# loss = []
				valid_samples, valid_labels, valid_weights = dataset.get_batch(FLAGS.batch_size, 'valid')
				valid_labels_T = np.transpose(valid_labels, (1, 0, 2))
				v_loss, v_prediction = model.step(valid_samples, valid_labels_T, valid_weights, sess, valid=True)
				print("Valid loss @ step %s: %s" % (step,v_loss))
				for p in v_prediction:
					pred = decode_ohe(p)
					cleaned_pred = clean_prediction(pred)
					print(cleaned_pred)
Esempio n. 2
0
def do_training_step(model, iteration, task_iterations, batch_size,
                     train_images, train_labels):
    from time import time
    from utils import DataSet

    iteration *= task_iterations
    start = time()

    train = DataSet(train_images, train_labels)

    rate, factor = model['rate_factor']
    learning_rate = rate / factor
    neurons = model['neuron_number']

    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    # First Convolutional layer
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])

    x_image = tf.reshape(x, [-1, 28, 28, 1])

    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # Second Convolutional layer
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # Densely Connected layer
    W_fc1 = weight_variable([7 * 7 * 64, neurons])
    b_fc1 = bias_variable([neurons])

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # Dropout
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Readout
    W_fc2 = weight_variable([neurons, 10])
    b_fc2 = bias_variable([10])

    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()  # defaults to saving all variables

    sess = tf.Session()

    if iteration == 0:
        init_op = tf.initialize_all_variables()
        sess.run(init_op)
    else:
        try:
            saver.restore(sess, model['path'])
            import os
            os.remove(model['path'])
            os.remove("%s.meta" % model['path'])
            print("Model %s correctly loaded i:%s" %
                  (model['path'], iteration))
        except Exception as e:
            print("No checkpoint for model %s found in iter %s\n%s" %
                  (model['path'], iteration, e))
            sys.exit(1)

    if not model:
        model = dict()

    t1 = time()
    import numpy as np
    step_times = np.array([])
    for i in range(iteration, iteration + task_iterations):
        ls = time()
        batch_offset = i * batch_size
        batch = train.get_batch(batch_offset, batch_size)

        train_step.run(session=sess,
                       feed_dict={
                           x: batch[0],
                           y_: batch[1],
                           keep_prob: 0.5
                       })
        # print("Iteration %s - %s s " % (i, time()-ls))
        step_times = np.append(step_times, (time() - ls))
    print("Loop\n Avg: %s\n Max: %s\n Min: %s" %
          (step_times.mean(), step_times.max(), step_times.min()))
    print("Loop time %s" % (time() - t1))
    save_path = saver.save(sess, "%s_%s" % (model['base_path'], iteration))

    model['path'] = save_path

    training_accuracy = accuracy.eval(session=sess,
                                      feed_dict={
                                          x: batch[0],
                                          y_: batch[1],
                                          keep_prob: 1.0
                                      })
    model['train_accuracy'] = training_accuracy

    end = time()
    print(
        "Training stats:\n - Neurons's number: %s\n - Learning rate: %s\n - Model: %s\n - Time: %s\n" % \
        (neurons, learning_rate, save_path, (end - start)))

    return model