Example #1
0
def evaluate():
	log = open("test.log", "w", 1)
	cases = os.listdir(FLAGS.testData_dir)
	for c in xrange(len(cases)):
		cases[c] = FLAGS.testData_dir + "/" + cases[c]

	"""Eval CIFAR-10 for a number of steps."""
	with tf.Graph().as_default() as g:
		# Get images and labels for CIFAR-10.
		eval_data = FLAGS.eval_data == 'test'
		p, k, nut, u, ground_truth = inputs.inputFlow(cases, log, FLAGS.testBatch_size, useType="test")
		# Build a Graph that computes the logits predictions from the
		# inference model.
		with tf.device('/gpu:0'):
			code = network.inference([p, k, nut, u], True, log)
			preds = network.predictFlow(code, log, useType="test")
			# Calculate predictions.
			diffs = tf.subtract(preds, ground_truth)
			mean_diff = tf.reduce_mean(tf.abs(diffs))
		# Restore the moving average version of the learned variables for eval.
		variable_averages = tf.train.ExponentialMovingAverage(network.MOVING_AVERAGE_DECAY)
		variables_to_restore = variable_averages.variables_to_restore()
		for k in variables_to_restore.keys():
			if "popMean" in k or "popVar" in k:
				variables_to_restore[k+"/avg"] = variables_to_restore[k]
				del variables_to_restore[k]
		saver = tf.train.Saver(variables_to_restore)
		print >> log, variables_to_restore
		while True:
			eval_once(mean_diff, saver, len(cases))
			if FLAGS.run_once:
				break
			time.sleep(FLAGS.eval_interval_secs)
def evaluate():
	log = open("test.log", "w", 1)
	cases = os.listdir(FLAGS.testData_dir)
	for c in xrange(len(cases)):
		cases[c] = FLAGS.testData_dir + "/" + cases[c]

	"""Eval CIFAR-10 for a number of steps."""
	with tf.Graph().as_default() as g:
		# Get images and labels for CIFAR-10.
		eval_data = FLAGS.eval_data == 'test'
		# p, k, nut, u = network.inputForcesAllCrops(cases, log)
		p, k, nut, u, index = inputs.inputReconstructionCrops(cases, log, FLAGS.testBatch_size, [36,56,32,6])
		_unProcessed = tf.concat([p, k, nut, u], 4)
		# Build a Graph that computes the logits predictions from the
		# inference model.
		with tf.device('/gpu:0'):
			unProcessed, gt = tf.split(_unProcessed, [24, 12], 1)
			p, k, nut, u = tf.split(unProcessed, [1,1,1,3], 4)
			code = network.inference([p, k, nut, u], True, log)
			# recon = network.reconstruct(code, True, log)
			recon = network.predictFlow(code, True, log)
			# Calculate predictions.
			diffs = tf.subtract(recon, gt)
			mean_diff = tf.reduce_mean(tf.abs(diffs), axis=(1,2,3,4))
		# Restore the moving average version of the learned variables for eval.
		variable_averages = tf.train.ExponentialMovingAverage(network.MOVING_AVERAGE_DECAY)
		variables_to_restore = variable_averages.variables_to_restore()
		for k in variables_to_restore.keys():
			if "popMean" in k or "popVar" in k:
				variables_to_restore[k+"/avg"] = variables_to_restore[k]
				del variables_to_restore[k]
		saver = tf.train.Saver(variables_to_restore)
		print >> log, variables_to_restore
		while True:
			eval_once(mean_diff, index, saver, len(cases))
			if FLAGS.run_once:
				break
			time.sleep(FLAGS.eval_interval_secs)
testCases = os.listdir(FLAGS.testData_dir)
for c in range(len(testCases)):
    testCases[c] = FLAGS.testData_dir + "/" + testCases[c]

cases += testCases
log = open("test.log", "w", 1)
# Get examples and ground truth.
print("Creating Graph..", file=log)
p, k, nut, u, ground_truth = inputs.inputForces(cases,
                                                log,
                                                FLAGS.testBatch_size,
                                                useType="test")
# pSmall, kSmall, nutSmall, uSmall, ground_truthSmall = inputs.inputFlow(cases, log, 4*FLAGS.trainBatch_size, useType="train")
with tf.device('/gpu:0'):
    # Build a Graph that computes the logits predictions from the inference model.
    code = network.inference([p, k, nut, u], True, log, useType="test")

# with tf.device('/gpu:1'):
# 	codeSmall = network.inference([pSmall, kSmall, nutSmall, uSmall], False, log, useType="train")

variable_averages = tf.train.ExponentialMovingAverage(MOVING_AVERAGE_DECAY)
variables_to_restore = variable_averages.variables_to_restore()

saver = tf.train.Saver(variables_to_restore)

init = tf.global_variables_initializer()
myconfig = tf.ConfigProto()
myconfig.gpu_options.allow_growth = True
sess = tf.Session(config=myconfig)
sess.run(init)
tf.train.start_queue_runners(sess=sess)
Example #4
0
def train():
    """Train for a number of steps."""
    log = open(FLAGS.train_dir + ".log", "w", 1)
    cases = os.listdir(FLAGS.trainData_dir)
    for c in range(len(cases)):
        cases[c] = FLAGS.trainData_dir + "/" + cases[c]
    random.shuffle(cases)
    print("Num train examples:\t" + str(len(cases)), file=log)
    with tf.Graph().as_default():
        global_step = tf.Variable(0, trainable=False)
        # Get examples and ground truth.
        print("Creating Graph..", file=log)
        p, k, nut, u, ground_truth = inputs.inputForces(cases,
                                                        log,
                                                        FLAGS.trainBatch_size,
                                                        useType="train")
        pSmall, kSmall, nutSmall, uSmall, ground_truthSmall = inputs.inputFlow(
            cases, log, 4 * FLAGS.trainBatch_size, useType="train")
        with tf.device('/gpu:0'):
            # Build a Graph that computes the logits predictions from the inference model.
            code = network.inference([p, k, nut, u],
                                     True,
                                     log,
                                     useType="train")
            forces = network.predictForces(code, log)
            forcesLoss = network.loss(forces, ground_truth, True,
                                      "ForceRegressionLoss")
            reconstruction = network.reconstruct(code,
                                                 True,
                                                 log,
                                                 useType="train")
            unProcessed = tf.concat([p, k, nut, u], 4)
            reconLoss = network.loss(reconstruction, unProcessed, True,
                                     "ReconstructionLoss")

        with tf.device('/gpu:1'):
            codeSmall = network.inference([pSmall, kSmall, nutSmall, uSmall],
                                          False,
                                          log,
                                          useType="train")
            flow = network.predictFlow(codeSmall, log, useType="train")
            flowLoss = network.loss(flow, ground_truthSmall, False, "FlowLoss")
            reconstructionSmall = network.reconstruct(codeSmall,
                                                      False,
                                                      log,
                                                      useType="train")
            unProcessedSmall = tf.concat([pSmall, kSmall, nutSmall, uSmall], 4)
            reconSmallLoss = network.loss(reconstructionSmall,
                                          unProcessedSmall, False,
                                          "Reconstruction2Loss")
            #unProcessed = tf.concat([p, k, nut, u], 4)
            #reconLoss = network.loss(reconstruction, unProcessed, True, "ReconstructionLoss")
            # Calculate loss.
            # Build a Graph that trains the model with one batch of examples and
            # updates the model parameters.

        totalLoss = network.totalLoss()

        with tf.device('/gpu:1'):
            train_op = network.train(totalLoss, global_step)

        losses = tf.get_collection("losses")

        #Keep summaries

        tf.summary.scalar(totalLoss.op.name + ' (raw)', totalLoss)
        for l in losses:
            # Name each loss as '(raw)' and name the moving average version of the loss
            # as the original loss name.
            tf.summary.scalar(l.op.name + ' (raw)', l)
            # tf.summary.scalar(l.op.name, loss_averages.average(l))
        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()
        #saver2 = tf.train.Saver(tf.all_variables())

        # Start running operations on the Graph.
        #gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
        myconfig = tf.ConfigProto(
            log_device_placement=FLAGS.log_device_placement)
        myconfig.gpu_options.allow_growth = FLAGS.allow_growth
        sess = tf.Session(config=myconfig)
        writer = tf.summary.FileWriter(FLAGS.train_dir, sess.graph)
        sess.run(init)
        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)
        print("Starting train procedure..", file=log)
        # Build the summary operation based on the TF collection of Summaries.
        _summ = tf.summary.merge_all()
        start_time = time.time()
        for step in range(FLAGS.max_steps):
            fLoss, rLoss, _flowLosss, rSLoss, _losses, loss_value, summ, _ = \
            sess.run([forcesLoss, reconLoss, flowLoss, reconSmallLoss, losses, totalLoss, _summ, train_op])
            assert not numpy.isnan(
                loss_value), 'Model diverged with loss = NaN'
            writer.add_summary(summ, step)
            if step % 200 == 0:
                duration = float(time.time() - start_time) / 200
                examples_per_sec = FLAGS.trainBatch_size / duration
                small_examples_per_sec = 4 * FLAGS.trainBatch_size / duration
                format_str = (
                    '%s: progress %2.2f, loss = %.10f (%.1f examples/sec; %.1f small_examples/sec; %.3f sec/batch)'
                )
                print(format_str %
                      (datetime.now(), step / FLAGS.max_steps, loss_value,
                       examples_per_sec, small_examples_per_sec, duration),
                      file=log)
                print(step / FLAGS.max_steps, end="\r", flush=True)
                start_time = time.time()
            # Save the model checkpoint periodically.
            if step % 1000 == 0 or (step + 1) == FLAGS.max_steps:
                checkpoint_path = os.path.join(FLAGS.train_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
def evaluate():
    log = open("evalFC.log", "w", 1)

    # features, labels = network.inputFeatures(cases, batch_size, False)
    flowPlaceholder = tf.placeholder(tf.float32, shape=(80, 56, 32, 6))
    labelsPlaceholder = tf.placeholder(tf.float32, shape=(12))

    p, k, nut, u = tf.split(flowPlaceholder, [1, 1, 1, 3], 3)
    u = network.normalizeTensor(u)
    p = network.normalizeTensor(p)
    k = network.normalizeTensor(k)
    nut = network.normalizeTensor(nut)
    u = tf.reshape(u, (1, 80, 56, 32, 3))
    p = tf.reshape(p, (1, 80, 56, 32, 1))
    k = tf.reshape(k, (1, 80, 56, 32, 1))
    nut = tf.reshape(nut, (1, 80, 56, 32, 1))
    code = network.inference([p, k, nut, u], True, log)
    predictions = network.predictForces(code, log, "test")
    diffs = tf.subtract(tf.reshape(predictions, (12, )), labelsPlaceholder)

    variable_averages = tf.train.ExponentialMovingAverage(
        network.MOVING_AVERAGE_DECAY)
    variables_to_restore = variable_averages.variables_to_restore()
    for k in variables_to_restore.keys():
        if "popMean" in k or "popVar" in k:
            variables_to_restore[k + "/avg"] = variables_to_restore[k]
            del variables_to_restore[k]

    saver = tf.train.Saver(variables_to_restore)

    myconfig = tf.ConfigProto()
    sess = tf.Session(config=myconfig)
    ckpt = tf.train.get_checkpoint_state(train_dir)
    if ckpt and ckpt.model_checkpoint_path:
        # Restores from checkpoint
        saver.restore(sess, ckpt.model_checkpoint_path)

    means = numpy.zeros(shape=(6), dtype=numpy.float32)
    minn = numpy.zeros(shape=(6), dtype=numpy.float32)
    minn[:] = numpy.finfo(numpy.float32).max
    maxx = numpy.zeros(shape=(6), dtype=numpy.float32)
    maxx[:] = numpy.finfo(numpy.float32).min
    cases = os.listdir(testData_dir)
    print "about to get error"
    for c in cases:
        path = testData_dir + "/" + c
        flow = numpy.fromfile(path + "/allInOne.raw", dtype=numpy.float32)
        flow = numpy.reshape(flow, (96, 64, 32, 6))
        flow = flow[10:90, 3:59, :, :]
        labels = numpy.loadtxt(path + "/forcesLast.dat", dtype=numpy.float32)
        differences = sess.run(diffs,
                               feed_dict={
                                   flowPlaceholder: flow,
                                   labelsPlaceholder: labels
                               })
        differences = numpy.take(differences, [0, 1, 2, 6, 7, 8])
        means += numpy.abs(differences)
        minn[differences < minn] = differences[differences < minn]
        maxx[differences > maxx] = differences[differences > maxx]

    means /= len(cases)
    rangee = maxx - minn
    means /= rangee
    print "Means: " + str(means)
    print numpy.mean(means)