perGPUTestData[gpu].append(split[0])
        perGPUTestLabels[gpu].append(split[1])

# exit()
for gpu in range(numGpus):
    perGPUTestLabels[gpu] = tf.concat(perGPUTestLabels[gpu], axis=-1)

testLabels = tf.concat(perGPUTestLabels, axis=0)
netOut = []
for gpu in range(numGpus):
    with tf.name_scope('tower_%d' % (gpu)) as scope:
        with tf.device('/gpu:%d' % gpu):
            # print(perGPUTestData[gpu][0].get_shape())
            # print(len(perGPUTestData[gpu]))
            valCode = models.inference(perGPUTestData[gpu],
                                       first=(gpu == 0),
                                       useType="test",
                                       modelType=modelType)
            print(valCode.get_shape())
            gpuValPredictions = models.predictFlow(valCode,
                                                   batch_size // numGpus,
                                                   log,
                                                   useType="test",
                                                   first=(gpu == 0))
            diff = tf.subtract(gpuValPredictions, perGPUTestLabels[gpu])
            # netOut.append(gpuValPredictions)
            netOut.append(tf.reduce_mean(tf.square(diff)))

# valPredictions = tf.concat(netOut, axis=0)
# diff = tf.subtract(valPredictions, testLabels)
# valError = tf.reduce_mean(tf.square(diff))
valError = tf.reduce_mean(netOut)
Example #2
0
            perGPUTrainData[gpu].append(split[gpu])

    perGPUTestData = [list([]) for i in range(numGpus)]
    for tD in testData[:-1]:
        split = tf.split(tD, numGpus, axis=0)
        for gpu in range(numGpus):
            perGPUTestData[gpu].append(split[gpu])

    for gpu in range(numGpus):
        with tf.name_scope('tower_%d' % (gpu)) as scope:
            with tf.device('/gpu:%d' % gpu):
                print("Defining tower " + str(gpu))
                print(perGPUTrainData[gpu][0].get_shape())
                print(len(perGPUTrainData[gpu]))
                trainCode = models.inference(perGPUTrainData[gpu][:-1],
                                             first=(gpu == 0),
                                             useType="train",
                                             modelType=modelType)
                print(trainCode.get_shape())
                predictions = models.predictForces(trainCode,
                                                   batch_size // numGpus,
                                                   log,
                                                   useType="train",
                                                   first=(gpu == 0),
                                                   addToCollection=True)
                # l2_loss = tf.nn.l2_loss(predictions - perGPUTrainData[gpu][-1], name="l2_loss_gpu_"+str(gpu))
                weights = numpy.array([[10, 1]])
                l2_loss = tf.reduce_sum(tf.squared_difference(
                    predictions * weights, perGPUTrainData[gpu][-1] * weights),
                                        name="l2_loss_gpu_" + str(gpu))
                tf.add_to_collection('l2_losses', l2_loss)
Example #3
0
		gpuSplits = tf.split(tD, numGpus, axis=0)
		for gpu, gpuSplit in enumerate(gpuSplits):
			split = tf.split(gpuSplit, 2, axis=1)
			perGPUTestDataSmall[gpu].append(split[0])
			perGPUTestLabelsSmall[gpu].append(split[1])

	for gpu in range(numGpus):
		perGPUTrainLabelsSmall[gpu] = tf.concat(perGPUTrainLabelsSmall[gpu], axis=-1)
		perGPUTestLabelsSmall[gpu] = tf.concat(perGPUTestLabelsSmall[gpu], axis=-1)

	testLabelsSmall = tf.concat(perGPUTestLabelsSmall, axis=0)
	for gpu in range(numGpus):
		with tf.name_scope('tower_%d' % (gpu)) as scope:
			with tf.device('/gpu:%d' % gpu):
				print("Defining tower "+str(gpu))
				trainCodeBig = models.inference(perGPUTrainDataBig[gpu], first=(gpu==0), useType="train", modelType=modelType)
				trainCodeSmall = models.inference(perGPUTrainDataSmall[gpu], first=False, useType="train", modelType=modelType)
				flowPredictions = models.predictFlow(trainCodeSmall, batch_size//numGpus, log, useType="train", first=(gpu==0))
				forcePredictions = models.predictForces(trainCodeBig, batch_size//numGpus, log, useType="train", first=(gpu==0))
				# flowReconBig = models.reconstruct(trainCodeBig, batch_size//numGpus, log, useType="train", first=(gpu==0))
				# flowReconSmall = models.reconstruct(trainCodeSmall, batch_size//numGpus, log, useType="train", first=(gpu==0))
				# l2_loss = tf.nn.l2_loss(predictions - perGPUTrainData[gpu][-1], name="l2_loss_gpu_"+str(gpu))
				# print(perGPUTrainLabels[gpu].get_shape())
				weights = numpy.array([[10,1]])
				l2_loss_forces = tf.reduce_sum(tf.squared_difference(forcePredictions*weights, perGPUTrainDataBig[gpu][-1]*weights), name="l2_force_loss_gpu_"+str(gpu))
				l2_loss_flow = tf.reduce_sum(tf.squared_difference(flowPredictions, perGPUTrainLabelsSmall[gpu]), name="l2_flow_loss_gpu_"+str(gpu))
				# l2_flow_recon_loss = tf.reduce_sum(tf.squared_difference(flowReconSmall, tf.concat(perGPUTrainDataSmall[gpu], axis=-1)), name="l2_flow_recon_loss_gpu_"+str(gpu))
				# l2_force_recon_loss = tf.reduce_sum(tf.squared_difference(flowReconBig, tf.concat(perGPUTrainDataBig[gpu][:-1], axis=-1)), name="l2_force_recon_loss_gpu_"+str(gpu))
				tf.add_to_collection('l2_losses', l2_loss_forces)
				tf.add_to_collection('l2_losses', l2_loss_flow)
				# tf.add_to_collection('l2_losses', l2_flow_recon_loss)