Пример #1
0
def load_checkpoint(sess, checkpoint_path):

    ckpt = tf.train.get_checkpoint_state(checkpoint_path)
    if ckpt is None:
        raise Exception('Pretrained model not found at %s' % checkpoint_path)
    print('Loading model %s.' % ckpt.model_checkpoint_path)
    init_op = get_init_fn(checkpoint_path, [''], ckpt.model_checkpoint_path)
    init_op(sess)
Пример #2
0
def load_pretrain(sess, vae_type, enc_type, dataset, basenet, log_root):
    if vae_type in ['ps2s', 'sp2s'] or dataset in ['shoesv2', 'chairsv2']:
        if 'shoe' in dataset:
            sv_str = 'shoe'
        elif 'chair' in dataset:
            sv_str = 'chair'
        pretrain_dir = log_root.split('runs')[0] + 'pretrained_model/%s/' % sv_str
        ckpt = tf.train.get_checkpoint_state(pretrain_dir)
        if ckpt is not None:
            pretrained_model = ckpt.model_checkpoint_path
            print('Loading model %s.' % pretrained_model)
            checkpoint_exclude_scopes = []
            init_fn = get_init_fn(pretrained_model, checkpoint_exclude_scopes)
            init_fn(sess)
        else:
            print('Warning: pretrained model not found at %s' % pretrain_dir)
Пример #3
0
					'Filename of the training data')
flags.DEFINE_string('test_file', 'harrison_test.tfrecords',
					'Filename of the test data')

if __name__ == '__main__':
	tf.reset_default_graph()
	with tf.Graph().as_default():
		tf.logging.set_verbosity(tf.logging.INFO)

		batch_x, _, batch_y = input.inputs(
			filename=os.path.join(FLAGS.train_dir, FLAGS.train_file),
			num_epochs=FLAGS.num_epochs)
		logits = model.inference(batch_x)
		losses = model.loss(logits, batch_y)
		train_op = model.training(losses)
		init_fn = model.get_init_fn()

		with tf.Session() as sess:
			with slim.queues.QueueRunners(sess):
				
				init_op = tf.group(tf.initialize_all_variables(),
								   tf.initialize_local_variables())
				sess.run(init_op)

				number_of_steps = (57381*FLAGS.num_epochs)//FLAGS.batch_size
				print("Start training with total: %d steps from %d epoch and %d batch"%(number_of_steps, FLAGS.num_epochs, FLAGS.batch_size))

				final_loss = slim.learning.train(
						train_op,
						logdir=os.path.join(FLAGS.train_dir, 'train.log'),
						init_fn=init_fn,