示例#1
0
文件: valuenet.py 项目: cgao3/pnnhex
    def model(self, dataNode, kernalSize=(3, 3), kernalDepth=48, numValueUnits=48):
        weightShape = kernalSize + (INPUT_DEPTH, kernalDepth)
        output = self.inputLayer.convolve(dataNode, weight_shape=weightShape, bias_shape=(kernalDepth,))

        weightShape = kernalSize + (kernalDepth, kernalDepth)
        for i in xrange(self.nLayers):
            out = self.convLayers[i].convolve(output, weight_shape=weightShape, bias_shape=(kernalDepth,))
            output = out
        logits = self.convLayers[self.nLayers - 1].move_logits(output, BOARD_SIZE, value_net=True)
        self.valueLayer=Layer("ValueOutputLayer", paddingMethod="VALID")
        value=self.valueLayer.value_estimation(logits, numValueUnits)
        return value
示例#2
0
def test_functionality():
    n_input = 3
    n_output = 4
    shape = (n_output, n_input)
    weights = np.zeros(shape)
    bias = np.zeros(n_output)
    activation_function = LogisticSigmoid()
    layer = Layer(weights, bias, activation_function)

    assert_tuple_equal(layer.get_weights().shape, shape)

    np.random.seed(0)
    random_matrix = np.random.randn(*shape)
    assert_tuple_equal(random_matrix.shape, shape)
    random_vector = np.random.randn(n_output)
    layer.set_weights(random_matrix)
    layer.set_bias(random_vector)
    np.testing.assert_array_equal(layer.get_weights(), random_matrix)
    np.testing.assert_array_equal(layer.get_bias(), random_vector)
    assert_equal(layer.get_activation_function(), activation_function)
示例#3
0
 def setup_architecture(self, nLayers):
     self.nLayers=nLayers
     self.inputLayer=Layer("InputLayer", paddingMethod="VALID")
     self.convLayers=[Layer("ConvLayer%d"%i) for i in xrange(nLayers)]
示例#4
0
class SupervisedNet(object):

    def __init__(self, srcTrainDataPath, srcTestDataPath, srcTestPathFinal=None):
        self.srcTrainPath=srcTrainDataPath
        self.srcTestPath=srcTestDataPath
        self.srcTestPathFinal=srcTestPathFinal

    def setup_architecture(self, nLayers):
        self.nLayers=nLayers
        self.inputLayer=Layer("InputLayer", paddingMethod="VALID")
        self.convLayers=[Layer("ConvLayer%d"%i) for i in xrange(nLayers)]

    def model(self, dataNode, kernalSize=(3,3), kernalDepth=128):
        weightShape=kernalSize+(INPUT_DEPTH, kernalDepth)
        output=self.inputLayer.convolve(dataNode, weight_shape=weightShape, bias_shape=(kernalDepth,))

        weightShape=kernalSize+(kernalDepth, kernalDepth)
        for i in xrange(self.nLayers):
            out=self.convLayers[i].convolve(output, weight_shape=weightShape, bias_shape=(kernalDepth,))
            output=out
        logits=self.convLayers[self.nLayers-1].move_logits(output, BOARD_SIZE)
        return logits

    def inference(self, lastcheckpoint):
        srcIn = "dumpy.txt"
        num_lines = sum(1 for line in open(srcIn))
        self.xInputNode=tf.placeholder(dtype=tf.float32, shape=(num_lines, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH), name="x_input_node")
        putil=PositionUtil3(positiondata_filename=srcIn, batch_size=num_lines)
        putil.prepare_batch()
        self.setup_architecture(nLayers=5)
        self.xLogits = self.model(self.xInputNode)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, lastcheckpoint)
            logits=sess.run(self.xLogits, feed_dict={self.xInputNode:putil.batch_positions})

            action=np.argmax(logits, 1)[0]
            x,y=action//BOARD_SIZE, action%BOARD_SIZE
            y +=1
            print("prediction: "+repr(action)+" "+chr((ord('a')+x))+repr(y))
            print(np.argmax(logits,1))
            batch_predict=sess.run(tf.nn.softmax(logits))
            print(putil.batch_labels)
            e1=error_topk(batch_predict, putil.batch_labels, k=1)
            e2=error_topk(batch_predict, putil.batch_labels, k=2)
            e3=error_topk(batch_predict, putil.batch_labels, k=3)
            e4=error_topk(batch_predict, putil.batch_labels, k=4)
            e5=error_topk(batch_predict, putil.batch_labels, k=5)
            e6=error_topk(batch_predict, putil.batch_labels, k=6)
            print("top 1 accuracy", 100.0-e1)
            print("top 2 accuracy", 100.0-e2)
            print("top 3 accuracy", 100.0 - e3)
            print("top 4 accuracy", 100.0 - e4)
            print("top 5 accuracy", 100.0 - e5)
            print("top 6 accuracy", 100.0 - e6)
        putil.close_file()

    def train(self, nSteps):
        self.batchInputNode = tf.placeholder(dtype=tf.float32, shape=(BATCH_SIZE, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH),name="BatchTrainInputNode")
        self.batchLabelNode = tf.placeholder(dtype=tf.int32, shape=(BATCH_SIZE,), name="BatchTrainLabelNode")

        self.xInputNode=tf.placeholder(dtype=tf.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH), name="x_input_node")
        fake_input=np.ndarray(dtype=np.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH))
        fake_input.fill(0)

        self.setup_architecture(nLayers=5)
        batchLogits=self.model(self.batchInputNode)
        batchPrediction=tf.nn.softmax(batchLogits)
        loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(batchLogits, self.batchLabelNode))
        opt=tf.train.AdamOptimizer().minimize(loss)

        tf.get_variable_scope().reuse_variables()
        self.xLogits=self.model(self.xInputNode)
        
        trainDataUtil = PositionUtil3(positiondata_filename=self.srcTrainPath, batch_size=BATCH_SIZE)
        testDataUtil = PositionUtil3(positiondata_filename=self.srcTestPath, batch_size=BATCH_SIZE)

        accuracyPlaceholder = tf.placeholder(tf.float32)
        accuracyTrainSummary = tf.summary.scalar("Accuracy (Training)", accuracyPlaceholder)
        accuracyValidateSummary = tf.summary.scalar("Accuracy (Validating)", accuracyPlaceholder)

        saver=tf.train.Saver(max_to_keep=10)
        print_frequency=20
        test_frequency=500
        save_frequency=20000
        step=0
        epoch_num=0

        with tf.Session() as sess:
            init=tf.variables_initializer(tf.global_variables(), name="init_node")
            sess.run(init)
            print("Initialized all variables!")
            trainWriter = tf.summary.FileWriter(FLAGS.summaries_dir+"/"+repr(nSteps)+"/train", sess.graph)
            validateWriter = tf.summary.FileWriter(FLAGS.summaries_dir +"/"+repr(nSteps)+ "/validate", sess.graph)

            sl_model_dir = os.path.dirname(MODELS_DIR)
            while step < nSteps:
                nextEpoch=trainDataUtil.prepare_batch()
                if nextEpoch: epoch_num += 1
                inputs=trainDataUtil.batch_positions.astype(np.float32)
                labels=trainDataUtil.batch_labels.astype(np.uint16)
                feed_dictionary={self.batchInputNode:inputs, self.batchLabelNode:labels}
                _, run_loss=sess.run([opt, loss], feed_dict=feed_dictionary)

                if step % print_frequency:
                    run_predict=sess.run(batchPrediction, feed_dict={self.batchInputNode:inputs})
                    run_error=error_rate(run_predict,trainDataUtil.batch_labels)
                    print("epoch: ", epoch_num, "step:", step, "loss:", run_loss, "error_rate:", run_error )
                    summary = sess.run(accuracyTrainSummary, feed_dict={accuracyPlaceholder: 100.0-run_error})
                    trainWriter.add_summary(summary, step)
                if step % test_frequency == 0:
                    hasOneEpoch=False
                    sum_run_error=0.0
                    ite=0
                    while hasOneEpoch==False:
                        hasOneEpoch=testDataUtil.prepare_batch()
                        x_input = testDataUtil.batch_positions.astype(np.float32)
                        feed_d = {self.batchInputNode: x_input}
                        predict = sess.run(batchPrediction, feed_dict=feed_d)
                        run_error = error_rate(predict, testDataUtil.batch_labels)
                        sum_run_error += run_error
                        ite +=1
                    run_error=sum_run_error/ite
                    print("evaluation error rate", run_error)
                    summary = sess.run(accuracyValidateSummary, feed_dict={accuracyPlaceholder: 100.0-run_error})
                    validateWriter.add_summary(summary, step)
                if step>=40000 and step %save_frequency==0:
                    saver.save(sess, os.path.join(sl_model_dir, SLMODEL_NAME), global_step=step)
                step += 1
            print("saving computation graph for c++ inference")
            tf.train.write_graph(sess.graph_def, sl_model_dir, "graph.pbtxt")
            tf.train.write_graph(sess.graph_def, sl_model_dir, "graph.pb", as_text=False)
            saver.save(sess, os.path.join(sl_model_dir, SLMODEL_NAME), global_step=step)

            print("Testing error on test data is:")
            testDataUtil.close_file()
            testDataUtil=PositionUtil3(positiondata_filename=self.srcTestPathFinal, batch_size=BATCH_SIZE)
            hasOneEpoch=False
            sum_run_error=0.0
            sum2=0.0
            ite=0
            KValue=3
            while hasOneEpoch==False:
                hasOneEpoch = testDataUtil.prepare_batch()
                x_input = testDataUtil.batch_positions.astype(np.float32)
                feed_d = {self.batchInputNode: x_input}
                predict = sess.run(batchPrediction, feed_dict=feed_d)
                run_error = error_rate(predict, testDataUtil.batch_labels)
                top_k_run_error= error_topk(predict, testDataUtil.batch_labels, k=KValue)
                sum_run_error += run_error
                sum2 +=top_k_run_error
                ite += 1
            print("Testing error is:", sum_run_error/ite)
            writeout=open("test_error.txt","w")
            writeout.write("Testing error is: "+repr(sum_run_error/ite)+'\n')
            writeout.write("Top "+ repr(KValue)+" error: "+repr(sum2/ite))
            writeout.close()
            sess.run(self.xLogits, feed_dict={self.xInputNode:fake_input}) 
        trainDataUtil.close_file()
        testDataUtil.close_file()
示例#5
0
文件: rewardml.py 项目: cgao3/pnnhex
class SupervisedRMLNet(object):

    def __init__(self, srcTrainDataPath, srcTestDataPath, srcTestPathFinal=None):
        self.srcTrainPath=srcTrainDataPath
        self.srcTestPath=srcTestDataPath
        self.srcTestPathFinal=srcTestPathFinal

    def setup_architecture(self, nLayers):
        self.nLayers=nLayers
        self.inputLayer=Layer("InputLayer", paddingMethod="VALID")
        self.convLayers=[Layer("ConvLayer%d"%i) for i in xrange(nLayers)]

    def model(self, dataNode, kernalSize=(3,3), kernalDepth=48):
        weightShape=kernalSize+(INPUT_DEPTH, kernalDepth)
        output=self.inputLayer.convolve(dataNode, weight_shape=weightShape, bias_shape=(kernalDepth,))

        weightShape=kernalSize+(kernalDepth, kernalDepth)
        for i in xrange(self.nLayers):
            out=self.convLayers[i].convolve(output, weight_shape=weightShape, bias_shape=(kernalDepth,))
            output=out
        logits=self.convLayers[self.nLayers-1].move_logits(output, BOARD_SIZE)
        return logits

    def train(self, nSteps):
        tau=0.95
        self.batchInputNode = tf.placeholder(dtype=tf.float32, shape=(BATCH_SIZE, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH),name="BatchTrainInputNode")
        self.batchLabelNode = tf.placeholder(dtype=tf.int32, shape=(BATCH_SIZE,), name="BatchTrainLabelNode")

        self.xInputNode=tf.placeholder(dtype=tf.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH), name="x_input_node")
        fake_input=np.ndarray(dtype=np.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH))
        fake_input.fill(0)

        self.setup_architecture(nLayers=5)
        batchLogits=self.model(self.batchInputNode)
        batchPrediction=tf.nn.softmax(batchLogits)
        sum_loss=0.0
        for i in range(BATCH_SIZE):
            sum_loss =sum_loss - tf.log(batchPrediction[i][self.batchLabelNode[i]])

        loss=sum_loss/BATCH_SIZE
        opt=tf.train.AdamOptimizer().minimize(loss)

        tf.get_variable_scope().reuse_variables()
        self.xLogits=self.model(self.xInputNode)
        
        trainDataUtil = PositionUtilReward(positiondata_filename=self.srcTrainPath, batch_size=BATCH_SIZE)
        testDataUtil = PositionUtilReward(positiondata_filename=self.srcTestPath, batch_size=BATCH_SIZE, forTest=True)

        accuracyPlaceholder = tf.placeholder(tf.float32)
        accuracyTrainSummary = tf.summary.scalar("Accuracy (Training)", accuracyPlaceholder)
        accuracyValidateSummary = tf.summary.scalar("Accuracy (Validating)", accuracyPlaceholder)

        saver=tf.train.Saver(max_to_keep=10)
        print_frequency=20
        step=0
        epoch_num=0
        bestError=100.0
        bestTrainStep=None
        maxPatienceEpoch=10
        patience_begin=0

        with tf.Session() as sess:
            init=tf.variables_initializer(tf.global_variables(), name="init_node")
            sess.run(init)
            print("Initialized all variables!")
            trainWriter = tf.summary.FileWriter(FLAGS.rmlsummaries_dir+"/"+repr(nSteps)+"/train", sess.graph)
            validateWriter = tf.summary.FileWriter(FLAGS.rmlsummaries_dir +"/"+repr(nSteps)+ "/validate", sess.graph)

            rml_model_dir = os.path.dirname(MODELS_DIR)
            while step < nSteps:
                nextEpoch=trainDataUtil.prepare_batch()
                if nextEpoch:
                    epoch_num += 1
                    hasOneTestEpoch = False
                    sum_run_error = 0.0
                    ite = 0
                    while hasOneTestEpoch == False:
                        hasOneTestEpoch = testDataUtil.prepare_batch()
                        x_input = testDataUtil.batch_positions.astype(np.float32)
                        feed_d = {self.batchInputNode: x_input}
                        predict = sess.run(batchPrediction, feed_dict=feed_d)
                        run_error = errorRateTest(predict, testDataUtil.batch_labelSet)
                        sum_run_error += run_error
                        ite += 1
                    run_error = sum_run_error / ite
                    print("Epoch:", epoch_num, "Evaluation error rate", run_error)
                    summary = sess.run(accuracyValidateSummary, feed_dict={accuracyPlaceholder: 100.0 - run_error})
                    validateWriter.add_summary(summary, step)
                    saver.save(sess, os.path.join(rml_model_dir, RMLMODEL_NAME), global_step=step)
                    if(bestError>run_error):
                        bestError=run_error
                        bestTrainStep=step
                        patience_begin =0
                    else:
                        patience_begin +=1
                        if patience_begin >= maxPatienceEpoch:
                            break
                inputs=trainDataUtil.batch_positions.astype(np.float32)
                labels=trainDataUtil.batch_labels.astype(np.uint16)
                feed_dictionary={self.batchInputNode:inputs, self.batchLabelNode:labels}
                _, run_loss=sess.run([opt, loss], feed_dict=feed_dictionary)

                if step % print_frequency:
                    run_predict=sess.run(batchPrediction, feed_dict={self.batchInputNode:inputs})
                    run_error=error_rate(run_predict,trainDataUtil.batch_labels)
                    print("epoch: ", epoch_num, "step:", step, "loss:", run_loss, "error_rate:", run_error )
                    summary = sess.run(accuracyTrainSummary, feed_dict={accuracyPlaceholder: 100.0-run_error})
                    trainWriter.add_summary(summary, step)
                step += 1
            print("saving computation graph for c++ inference")
            tf.train.write_graph(sess.graph_def, rml_model_dir, "graph.pbtxt")
            tf.train.write_graph(sess.graph_def, rml_model_dir, "graph.pb", as_text=False)

            print("best Error is:", bestError, "best train step:", bestTrainStep)
            testDataUtil.close_file()
            testDataUtil=PositionUtilReward(positiondata_filename=self.srcTestPathFinal, batch_size=BATCH_SIZE, forTest=True)
            hasOneEpoch=False
            sum_run_error=0.0
            ite=0
            while hasOneEpoch==False:
                hasOneEpoch = testDataUtil.prepare_batch()
                x_input = testDataUtil.batch_positions.astype(np.float32)
                feed_d = {self.batchInputNode: x_input}
                predict = sess.run(batchPrediction, feed_dict=feed_d)
                run_error = errorRateTest(predict, testDataUtil.batch_labelSet)
                sum_run_error += run_error
                ite += 1
            print("Testing error is:", sum_run_error/ite)
            sess.run(self.xLogits, feed_dict={self.xInputNode:fake_input})
        trainDataUtil.close_file()
        testDataUtil.close_file()
示例#6
0
文件: valuenet.py 项目: cgao3/pnnhex
class ValueNet2(object):
    def __init__(self, srcTrainDataPath, srcTestDataPath, srcTestPathFinal=None):
        self.srcTrainPath = srcTrainDataPath
        self.srcTestPath = srcTestDataPath
        self.srcTestPathFinal = srcTestPathFinal

    def _setup_architecture(self, nLayers):
        self.nLayers = nLayers
        self.inputLayer = Layer("InputLayer", paddingMethod="VALID")
        self.convLayers = [Layer("ConvLayer%d" % i) for i in xrange(nLayers)]

    def model(self, dataNode, kernalSize=(3, 3), kernalDepth=48, numValueUnits=48):
        weightShape = kernalSize + (INPUT_DEPTH, kernalDepth)
        output = self.inputLayer.convolve(dataNode, weight_shape=weightShape, bias_shape=(kernalDepth,))

        weightShape = kernalSize + (kernalDepth, kernalDepth)
        for i in xrange(self.nLayers):
            out = self.convLayers[i].convolve(output, weight_shape=weightShape, bias_shape=(kernalDepth,))
            output = out
        logits = self.convLayers[self.nLayers - 1].move_logits(output, BOARD_SIZE, value_net=True)
        self.valueLayer=Layer("ValueOutputLayer", paddingMethod="VALID")
        value=self.valueLayer.value_estimation(logits, numValueUnits)
        return value

    def inference(self, lastcheckpoint):
        srcIn="value_dumpy.txt"
        num_lines = sum(1 for line in open(srcIn))
        assert(num_lines>=1)
        self.xInputNode=tf.placeholder(dtype=tf.float32, shape=(num_lines, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH), name="x_input_node")
        vutil=ValueUtil(srcStateValueFileName=srcIn, batch_size=num_lines)
        vutil.prepare_batch()
        self._setup_architecture(nLayers=5)
        self.x_value = self.model(self.xInputNode)
        saver = tf.train.Saver()
        with tf.Session() as sess:
            saver.restore(sess, lastcheckpoint)
            value_estimate=sess.run(self.x_value, feed_dict={self.xInputNode:vutil.batch_positions})
            print(value_estimate)
            print("value estimation: "+repr(value_estimate))
            print("correct value: ", vutil.batch_labels)
            MSE = tf.reduce_mean(tf.square(tf.sub(value_estimate, vutil.batch_labels)))
            print("MSE: ", sess.run(MSE))
        vutil.close_file()


    def train(self, nSteps):
        self.batchInputNode = tf.placeholder(dtype=tf.float32,
                                             shape=(BATCH_SIZE, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH),
                                             name="BatchTrainInputNode")
        self.batchLabelNode = tf.placeholder(dtype=tf.float32, shape=(BATCH_SIZE,), name="BatchTrainLabelNode")

        self.xInputNode = tf.placeholder(dtype=tf.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH),
                                         name="x_input_node")
        fake_input = np.ndarray(dtype=np.float32, shape=(1, INPUT_WIDTH, INPUT_WIDTH, INPUT_DEPTH))
        fake_input.fill(0)

        self._setup_architecture(nLayers=5)
        batchPredictedValue = self.model(self.batchInputNode)
        MSE=tf.reduce_mean(tf.square(tf.sub(batchPredictedValue, self.batchLabelNode)))
        opt = tf.train.AdamOptimizer().minimize(MSE)

        tf.get_variable_scope().reuse_variables()
        self.xLogits = self.model(self.xInputNode)

        trainDataUtil = ValueUtil(self.srcTrainPath, batch_size=BATCH_SIZE)
        testDataUtil = ValueUtil(self.srcTestPath, batch_size=BATCH_SIZE)

        msePlaceholder = tf.placeholder(tf.float32)
        mseTrainSummary = tf.summary.scalar("Mean Square Error (Training)", msePlaceholder)
        mseValidateSummary = tf.summary.scalar("Mean Square Error (Validating)", msePlaceholder)

        saver = tf.train.Saver(max_to_keep=20)
        print_frequency = 20
        test_frequency = 50
        save_frequency = 20000
        step = 0
        epoch_num = 0
        with tf.Session() as sess:
            init = tf.global_variables_initializer()
            sess.run(init)
            print("Initialized all variables!")
            trainWriter = tf.summary.FileWriter(FLAGS.summaries_dir2 + "/" + repr(nSteps) + "/train", sess.graph)
            validateWriter = tf.summary.FileWriter(FLAGS.summaries_dir2 + "/" + repr(nSteps) + "/validate", sess.graph)

            sl_model_dir = os.path.dirname(MODELS_DIR)
            while step < nSteps:
                nextEpoch = trainDataUtil.prepare_batch()
                if nextEpoch: epoch_num += 1
                inputs = trainDataUtil.batch_positions.astype(np.float32)
                labels = trainDataUtil.batch_labels.astype(np.float32)
                feed_dictionary = {self.batchInputNode: inputs, self.batchLabelNode: labels}
                _, run_error = sess.run([opt, MSE], feed_dict=feed_dictionary)

                if step % print_frequency:
                    print("epoch: ", epoch_num, "step:", step, "MSE:", run_error)
                    summary = sess.run(mseTrainSummary, feed_dict={msePlaceholder: run_error})
                    trainWriter.add_summary(summary, step)
                if step % test_frequency == 0:
                    hasOneEpoch = False
                    sum_run_error = 0.0
                    ite = 0
                    while hasOneEpoch == False:
                        hasOneEpoch = testDataUtil.prepare_batch()
                        x_input = testDataUtil.batch_positions.astype(np.float32)
                        feed_d = {self.batchInputNode: x_input, self.batchLabelNode:testDataUtil.batch_labels}
                        run_error = sess.run(MSE, feed_dict=feed_d)
                        sum_run_error += run_error
                        ite += 1
                    run_error = sum_run_error / ite
                    print("Validation MSE", run_error)
                    summary = sess.run(mseValidateSummary, feed_dict={msePlaceholder: run_error})
                    validateWriter.add_summary(summary, step)
                if step>40000 and step%save_frequency==0:
                    saver.save(sess, os.path.join(sl_model_dir, VALUE_NET_MODEL_NAME), global_step=step)

                step += 1

            sess.run(self.xLogits, feed_dict={self.xInputNode: fake_input})
            print("saving value net computation graph for c++ inference")
            tf.train.write_graph(sess.graph_def, sl_model_dir, "valuegraph.pbtxt")
            tf.train.write_graph(sess.graph_def, sl_model_dir, "valuegraph.pb", as_text=False)
            saver.save(sess, os.path.join(sl_model_dir, VALUE_NET_MODEL_NAME), global_step=step)
            testDataUtil.close_file()
            print("On test data...")
            valueTestResFile=open("value_test_result.txt", "w")
            testDataUtil = ValueUtil(self.srcTestPathFinal, batch_size=BATCH_SIZE)
            hasOneEpoch = False
            sum_run_error = 0.0
            ite = 0
            while hasOneEpoch == False:
                hasOneEpoch = testDataUtil.prepare_batch()
                x_input = testDataUtil.batch_positions.astype(np.float32)
                feed_d = {self.batchInputNode: x_input, self.batchLabelNode:testDataUtil.batch_labels}
                run_error = sess.run(MSE, feed_dict=feed_d)
                sum_run_error += run_error
                ite += 1
            print("Testing MSE is:", sum_run_error / ite)
            valueTestResFile.write("Overall Testing MSE is "+repr(sum_run_error/ite))
            valueTestResFile.close()


        trainDataUtil.close_file()
        testDataUtil.close_file()