コード例 #1
0
# """
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)

saver = tf.train.Saver(tf.global_variables())

# """
# # delete?
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.Session(config=config)
# """
print("start read")
train_image, train_label, train_label_mul = read_data.next_batch(
    train_filename, batch_size)
test_image, test_label, test_label_mul = read_data.next_batch(
    test_filename, batch_size)
print("end read")

with tf.Session() as sess:

    ckpt = tf.train.get_checkpoint_state(
        '/DB/rhome/qyzheng/Desktop/qyzheng/Tensorflow/model1')
    if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
        saver.restore(sess, ckpt.model_checkpoint_path)
    else:
        sess.run(tf.local_variables_initializer())
        sess.run(tf.global_variables_initializer())

    coord = tf.train.Coordinator()
コード例 #2
0
ファイル: Densenet.py プロジェクト: Zopek/bladder_old
    merged = tf.summary.merge_all()
    writer = tf.summary.FileWriter('./logs', sess.graph)

    global_step = 0
    epoch_learning_rate = init_learning_rate
    for epoch in range(total_epochs):
        if epoch == (total_epochs * 0.5) or epoch == (total_epochs * 0.75):
            epoch_learning_rate = epoch_learning_rate / 10

        # total_batch = int(mnist.train.num_examples / batch_size)
        random.shuffle(train)
        random.shuffle(test)

        for step in range(train_epoch):
            # start_time = time.time()
            batch_x, batch_y = read_data.next_batch(train, batch_size, height, width)
            # batch_x, batch_y = mnist.train.next_batch(batch_size)
            # print('load time is : ', time.time() - start_time)
            train_feed_dict = {
                x: batch_x,
                label: batch_y,
                learning_rate: epoch_learning_rate,
                training_flag : True
            }

            _, loss = sess.run([train, cost], feed_dict=train_feed_dict)

            if step % 50 == 0:
                global_step += 50
                train_summary, train_accuracy = sess.run([merged, accuracy], feed_dict=train_feed_dict)
                # accuracy.eval(feed_dict=feed_dict)
コード例 #3
0
#     if correct_prediction[i] == False:
#         err[lab_loc[i]] += 1
# err_label = tf.constant(err)
# """
# correct_prediction_two = tf.equal(tf.argmax(logits[:, 4:7], 1), tf.argmax(label[:, 4:7], 1))
# correct_prediction_thr = tf.equal(tf.argmax(logits[:, 7:11], 1), tf.argmax(label[:, 7:11], 1))
# correct_prediction_fou = tf.equal(tf.argmax(logits[:, 11:14], 1), tf.argmax(label[:, 11:14], 1))
# """
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)

saver = tf.train.Saver(tf.global_variables())

test_image, test_label, test_label_mul = read_data.next_batch(test_filename, batch_size, total_test_batch)

# """
# # delete?
# config = tf.ConfigProto()
# config.gpu_options.allow_growth = True
# session = tf.Session(config=config)
# """
with tf.Session() as sess:
    sess.run(tf.local_variables_initializer())
    ckpt = tf.train.get_checkpoint_state('/DATA/data/qyzheng/Tensorflow/model1')
    saver.restore(sess, ckpt.model_checkpoint_path)

    coord=tf.train.Coordinator()
    threads = tf.train.start_queue_runners(sess=sess, coord=coord)
def train_neural_network(inputs):

    prediction = recurrent_neural_network(inputs, weight, bias)
    #print(prediction.shape)
    #print(tf.reduce_sum(prediction - targets, 0).shape)
    cost = tf.reduce_sum(
        tf.square(tf.norm(prediction - targets, ord='euclidean', axis=1)))
    #cost = tf.square(tf.norm(tf.reduce_sum(prediction - targets, 0)))      # prediction: (len,2)
    optimizer = tf.train.AdamOptimizer(learning_rate).minimize(cost)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_epoch_loss = 1.0
        prev_train_loss = 0.0
        iteration = 0
        train_cost_list = []
        dev_cost_list = []
        while (abs(train_epoch_loss - prev_train_loss) > 1e-5):
            iteration += 1
            prev_train_loss = train_epoch_loss
            #train_epoch_loss = 0

            for batch in range(
                    int(len(training_X) / batch_size)
            ):  # There will be some data that's been thrown away if the size of
                # training_X is not divisible by batch_size
                x_batch, y_batch = read_data.next_batch(
                    batch, batch_size, training_X, training_Y)
                data_feed = {inputs: x_batch, targets: y_batch}
                _, c = sess.run([optimizer, cost], data_feed)
                #print('train: ', c)
                #train_epoch_loss += c/batch_size
            '''
            # training cost
            data_feed = {inputs: training_X, targets: training_Y}
            _, train_c = sess.run([optimizer, cost], data_feed)
            train_epoch_loss = train_c/len(training_X)
            '''
            #train_epoch_loss = train_epoch_loss/int(len(training_X)/batch_size)        # Use the same expression as above to make
            # sure not count the data that is thrown away

            dev_epoch_loss = 0
            for batch in range(int(len(dev_X) / batch_size)):
                x_batch, y_batch = read_data.next_batch(
                    batch, batch_size, dev_X, dev_Y)
                data_feed = {inputs: x_batch, targets: y_batch}
                c = sess.run(cost, data_feed)
                #print('dev: ', c)
                dev_epoch_loss += c / batch_size
            dev_epoch_loss = dev_epoch_loss / int(len(dev_X) / batch_size)
            # training cost
            train_epoch_loss = 0
            for batch in range(int(len(training_X) / batch_size)):
                x_batch, y_batch = read_data.next_batch(
                    batch, batch_size, training_X, training_Y)
                data_feed = {inputs: x_batch, targets: y_batch}
                c = sess.run(cost, data_feed)
                #print('dev: ', c)
                train_epoch_loss += c / batch_size
            train_epoch_loss = train_epoch_loss / int(
                len(training_X) / batch_size)

            # dev cost
            '''
            data_feed = {inputs: dev_X, targets: dev_Y}
            _, dev_c = sess.run([prediction, cost], data_feed)
            dev_epoch_loss = dev_c/len(dev_X)
            '''
            train_cost_list.append(train_epoch_loss)
            dev_cost_list.append(dev_epoch_loss)
            print('Train iteration', iteration, 'train loss:',
                  train_epoch_loss)
            print('Train iteration', iteration, 'dev loss:', dev_epoch_loss)
        iter_list = range(1, iteration + 1)
        plt.figure(1)
        plt.plot(iter_list, train_cost_list)
        plt.plot(iter_list, dev_cost_list)
        plt.title('iteration vs. epoch cost, university')
        plt.show()

        # After the training, print out the trained parameters
        trained_w = sess.run(weight)
        trained_b = sess.run(bias)
        #print('trained_w: ', trained_w, 'trained_b: ', trained_b, 'trained_w shape: ', trained_w.shape)

        # Begin testing
        test_epoch_loss = 0
        test_prediction = np.empty([int(len(testing_X)), 2])
        '''
        data_feed = {inputs: testing_X, targets: testing_Y}
        pre, test_c = sess.run([prediction, cost], data_feed)
        test_prediction = pre
        test_epoch_loss = test_c/int(len(testing_X))
        '''
        test_prediction = np.empty(
            [int(len(testing_X) / batch_size) * batch_size, 2])
        for batch in range(int(len(testing_X) / batch_size)):
            x_batch, y_batch = read_data.next_batch(batch, batch_size,
                                                    testing_X, testing_Y)
            data_feed = {inputs: x_batch, targets: y_batch}
            pre, c = sess.run([prediction, cost], data_feed)
            pre = np.array(pre)
            test_epoch_loss += c
            test_prediction[batch * batch_size:(batch + 1) *
                            batch_size, :] = pre
        test_epoch_loss = test_epoch_loss / (int(len(testing_X) / batch_size) *
                                             batch_size)

        print('Test loss:', test_epoch_loss)

        # Save predicted data and ground truth data into a .csv file.
        test_prediction = np.transpose(
            test_prediction)  # The first row of file: prediction
        testing_Y_array = np.transpose(
            np.array(testing_Y)
            [0:int(len(testing_X) / batch_size) *
             batch_size, :])  # The second row of file: ground truth
        test_prediction_and_real = np.vstack(
            (test_prediction, testing_Y_array))
        np.savetxt("GRU_test_prediction_and_real.csv",
                   test_prediction_and_real,
                   delimiter=",")
コード例 #5
0
BATCH_SIZE = 100
NUM_ITERS = 50000
tau0 = 1.0  # initial temperature
np_temp = tau0
np_lr = 0.001
ANNEAL_RATE = 0.00003
MIN_TEMP = 0.5

# In[9]:

dat = []
sess = tf.InteractiveSession()
sess.run(init_op)
for i in range(1, NUM_ITERS):
    np_x, np_y = next_batch(data, BATCH_SIZE)
    _, np_loss = sess.run([train_op, loss], {x: np_x, tau: np_temp, lr: np_lr})
    if i % 100 == 1:
        dat.append([i, np_temp, np_loss])
    if i % 1000 == 1:
        np_temp = np.maximum(tau0 * np.exp(-ANNEAL_RATE * i), MIN_TEMP)
        np_lr *= 0.9
    if i % 5000 == 1:
        print('Step %d, ELBO: %0.3f' % (i, -np_loss))
'''
# ## save to animation

# In[10]:


np_x1,_=data.next_batch(100)