def _model_loss(images, depths, invalid_depths, mode):
    # Compute the moving average of all losses

    flag_reuse_train_eval = tf.placeholder(tf.bool)
    flag_trainable_train_eval = tf.placeholder(tf.bool)


    if (mode == 'train'):
        flag_reuse_train_eval = False
        flag_trainable_train_eval = True
    elif (mode == 'eval'):
        flag_reuse_train_eval = True
        flag_trainable_train_eval = False


    with tf.variable_scope(tf.get_variable_scope()):
            logits = inference(images, reuse=flag_reuse_train_eval, trainable=flag_trainable_train_eval)

    loss(logits, depths, invalid_depths)

    total_loss = tf.add_n(tf.get_collection('losses'), name='total_loss')

    # Compute the moving average of total loss.
    loss_averages = tf.train.ExponentialMovingAverage(0.9, name='avg')
    loss_averages_op = loss_averages.apply([total_loss])

    with tf.control_dependencies([loss_averages_op]):
        total_loss = tf.identity(total_loss)

    return total_loss, logits
Esempio n. 2
0
# -*- coding:utf-8 -*-
'''
this is the enterance of this project
'''

import tensorflow as tf
import os
from model import model
import numpy as np

if __name__ == '__main__':
    batch_size = 32
    learning_rate = 0.01
    keep_prob = 0.7
    path = '/data/LUNA2016/cubic_normalization_npy'
    test_path = '/data/LUNA2016/cubic_normalization_test'

    print(" beigin...")
    model = model(learning_rate, keep_prob, batch_size, 40)
    model.inference(path, test_path, 0, True)
Esempio n. 3
0
			# backward G
			model.optimizer_G.zero_grad()
			loss_G.backward()
			model.optimizer_G.step()

			# # display input & output and save ouput images
			if opt.save_fake:
				result_img = model.get_result_img(Variable(data['left_img']), Variable(data['right_img']))
				visualizer.display_current_results(result_img, epoch, total_steps)

		# epoch end time
		iter_end_time = time.time()
		print('End of epoch %d / %d \t Time Taken: %d sec' %(epoch, opt.niter, time.time() - epoch_start_time))

	    # save mdodel
		print('saving the model at the end of epoch %d, iters %d' % (epoch, total_steps))
		model.save(epoch)

else:
	print('Testing started...')
	if opt.progress:
		for i, data in enumerate(tqdm(test_data)):
			gen_disp = model.inference(Variable(data['test_img']))
			result_img = model.get_test_result(gen_disp)
			visualizer.display_test_results(i, result_img)
	else:
		for i, data in enumerate(test_data):
			gen_disp = model.inference(Variable(data['test_img']))
			result_img = model.get_test_result(gen_disp)
			visualizer.display_test_results(i, result_img)
	print('Testing finsihed.')
Esempio n. 4
0
# -*- coding:utf-8 -*-
'''
this is the enterance of this project
'''

import tensorflow as tf
import os
from model import model
import numpy as np

if __name__ =='__main__':
    batch_size =32
    learning_rate = 0.01
    keep_prob =0.7
    path = '../../data/cubic_normalization_npy'
    #test_path = '../../data/cubic_normalization_test'
    test_size = 0.1
    seed=121

    print(" begin...")
    model = model(learning_rate,keep_prob,batch_size,40)
    model.inference(path,0,test_size,seed,True)







Esempio n. 5
0
def train():
    """Train datasets for a number of steps."""
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)

        # Get images and labels for model.
        images, labels = model.distorted_inputs()

        # Build a Graph that computes the logits predictions from the
        # inference model.
        logits = model.inference(images)

        # Calculate loss.
        loss = model.loss(logits, labels)

        # Build a Graph that trains the model with one batch of examples and
        # updates the model parameters.
        train_op = model.train(loss, global_step)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation based on the TF collection of Summaries.
        summary_op = tf.summary.merge_all()

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph.
        sess = tf.Session(config=tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement
        ))  # log_device_placement=True,该参数表示程序会将运行每一个操作的设备输出到屏幕
        sess.run(init)

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(FLAGS.train_dir,
                                               graph_def=sess.graph_def)

        for step in range(FLAGS.max_steps):
            start_time = time.time()
            _, loss_value = sess.run([train_op, loss])
            duration = time.time() - start_time

            assert not np.isnan(loss_value), 'Model diverged with loss = NaN'

            if step % 10 == 0:
                num_examples_per_step = FLAGS.batch_size
                examples_per_sec = num_examples_per_step / duration
                sec_per_batch = float(duration)

                format_str = (
                    '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f sec/batch)'
                )
                print(format_str % (datetime.now(), step, loss_value,
                                    examples_per_sec, sec_per_batch))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
Esempio n. 6
0
gpu = '3'
base_path = ''
train_flag = True
keep_prob = 1.0
for i in range(len(sys.argv)):
    if '-g' == sys.argv[i]:
        gpu = sys.argv[i + 1]
    elif '-p' == sys.argv[i]:
        base_path = sys.argv[i + 1]
    elif '-t' == sys.argv[i]:
        train_flag = False
    elif '-k' == sys.argv[i]:
        keep_prob = float(sys.argv[i + 1])

os.environ["CUDA_VISIBLE_DEVICES"] = gpu
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'

if __name__ == '__main__':
    batch_size = 32
    learning_rate = 0.01
    epoch = 80
    # path = '/data0/LUNA/cubic_normalization_npy'

    # test_path = '../../data/cubic_normalization_test'
    test_size = 0.1
    seed = 121

    print(" begin...")
    model = model(learning_rate, keep_prob, batch_size, epoch)
    model.inference(base_path, 0, test_size, seed, train_flag)
Esempio n. 7
0
# -*- coding:utf-8 -*-
'''
this is the enterance of this project
'''

import tensorflow as tf
import os
from model import model
import numpy as np

from project_config import *

if __name__ == '__main__':
    print(" beigin...")
    model = model(learning_rate, keep_prob, batch_size, 40)
    model.inference(normalazation_output_path, test_path, 0, True)
def main():

    with tf.Graph().as_default():
        k = 0
        k2 = 0
        loss_list = []
        tloss_list = []
        x_size, y_size = (64, 64)
        batch_size = 8
        l_r = 0.00008  #0.00008
        train_steps = 10000
        '''
        Labelde face in the wild dataset load:
        '''
        trainx, trainy, testx, testy = data_load(x_size)
        # with tf.InteractiveSession() as sess:
        config = tf.ConfigProto()
        # config.gpu_options.allow_growth = False
        # config.gpu_options.per_process_gpu_memory_fraction = 0.3
        sess = tf.InteractiveSession(config=config)

        images_placeholders = tf.placeholder("float",
                                             shape=[None, x_size, y_size, 3])
        labels_placeholders = tf.placeholder("float",
                                             shape=[None, x_size, y_size, 3])

        out_loss, pred_out, shape = inference(num_classes=3,
                                              images=images_placeholders,
                                              labels=labels_placeholders)

        train_op = training(out_loss, l_r)
        init = tf.global_variables_initializer()
        sess.run(init)
        saver = tf.train.Saver()

        g1 = int(math.ceil(len(trainx) / batch_size))
        g2 = int(math.ceil(len(testx) / batch_size))
        for step in range(train_steps):

            input_images = trainx[0 + k * batch_size:batch_size +
                                  k * batch_size]
            input_labels = trainy[0 + k * batch_size:batch_size +
                                  k * batch_size]

            k = k + 1
            if k == g1:
                k = 0

            feed_dict = {
                images_placeholders: input_images,
                labels_placeholders: input_labels
            }
            _, loss_value, m_shape = sess.run([train_op, out_loss, shape],
                                              feed_dict=feed_dict)

            loss_list.append(loss_value)
            if step % 10 == 0:
                print('Step %d:  loss = %.2f' % (step, loss_value))

            if step % 2000 == 0:

                input_imagest = testx[0 + k2 * batch_size:batch_size +
                                      k2 * batch_size]
                input_labelst = testy[0 + k2 * batch_size:batch_size +
                                      k2 * batch_size]

                k2 = k2 + 1
                if k2 == g2:
                    k2 = 0

                feed_dict2 = {
                    images_placeholders: input_imagest,
                    labels_placeholders: input_labelst
                }
                loss_valuet, out = sess.run([out_loss, pred_out],
                                            feed_dict=feed_dict2)
                tloss_list.append(loss_valuet)
                print('Step %d:            testloss = %.2f' %
                      (step, loss_valuet))
                save_path = saver.save(sess,
                                       Model_path + '/pretrained_lstm.ckpt',
                                       global_step=step)
                print("saved to %s" % save_path)

        tloss_list.append(loss_valuet)
        print('Step %d:            testloss = %.2f' % (step, loss_valuet))
        plt.figure(1)
        plt.plot(loss_list)
        plt.title('trainning loss')
        plt.show()
        plt.figure(2)
        plt.plot(tloss_list)
        plt.title('testing loss')
        plt.show()