示例#1
0
if __name__ == '__main__':
    if tf.gfile.Exists(train_dir):
        tf.gfile.DeleteRecursively(train_dir)
    tf.gfile.MakeDirs(train_dir)

    log = open(train_dir + ".txt", "w", 1)
    is_training = tf.get_variable('is_training',
                                  shape=(),
                                  dtype=tf.bool,
                                  initializer=tf.constant_initializer(
                                      True, dtype=tf.bool),
                                  trainable=False)
    global_step = tf.Variable(0, trainable=False)

    # trainData, testData, numTrainExamples, numTestExamples, testIterator = preprocessing.inputFlows(batch_size)
    trainData, testData, numTrainExamples, numTestExamples, testIterator = preprocessing.inputFlows(
        batch_size, numTrainExamples=numTrainExamples)

    perGPUTrainData = [list([]) for i in range(numGpus)]
    for tD in trainData:
        split = tf.split(tD, numGpus, axis=0)
        for gpu in range(numGpus):
            perGPUTrainData[gpu].append(split[gpu])

    perGPUTestData = [list([]) for i in range(numGpus)]
    for tD in testData[:-1]:
        split = tf.split(tD, numGpus, axis=0)
        for gpu in range(numGpus):
            perGPUTestData[gpu].append(split[gpu])

    for gpu in range(numGpus):
        with tf.name_scope('tower_%d' % (gpu)) as scope:
示例#2
0
			count += 1
		except tf.errors.OutOfRangeError:
			break
	return numpy.sqrt(mean/count), std/count

if __name__ == '__main__':
	if tf.gfile.Exists(train_dir):
		tf.gfile.DeleteRecursively(train_dir)
	tf.gfile.MakeDirs(train_dir)

	log = open(train_dir+".txt", "w", 1)
	is_training = tf.get_variable('is_training', shape=(), dtype=tf.bool, initializer=tf.constant_initializer(True, dtype=tf.bool), trainable=False)
	global_step = tf.Variable(0, trainable=False)

	trainDataSmall, testDataSmall, numTrainExamplesSmall, numTestExamplesSmall, testIteratorSmall = preprocessing.inputFlowsForFlowPrediction(batch_size)
	trainDataBig, testDataBig, numTrainExamplesBig, numTestExamplesBig, testIteratorBig = preprocessing.inputFlows(batch_size)
	# trainData, testData, numTrainExamples, numTestExamples, testIterator = preprocessing.inputFlows(batch_size, numTrainExamples=8000)
	# numTrainExamples = numTrainExamplesSmall
	numTrainExamples = numTrainExamplesBig

	perGPUTrainDataBig = [list([]) for i in range(numGpus)]
	for tD in trainDataBig:
		split = tf.split(tD, numGpus, axis=0)
		for gpu in range(numGpus):
			perGPUTrainDataBig[gpu].append(split[gpu])

	perGPUTestDataBig = [list([]) for i in range(numGpus)]
	for tD in testDataBig[:-1]:
		split = tf.split(tD, numGpus, axis=0)
		for gpu in range(numGpus):
			perGPUTestDataBig[gpu].append(split[gpu])