def run(self): ''' Build the network and run it. @rtype: tuple(float32, float32) @return: A tuple of: loss, accuracy. Per the test data. ''' LOG.info("[%s] Doing network run", self._graph.name) debug = (LOG.getLevelName(LOG.getLogger().level) == 'DEBUG') with tf.Session(config=tf.ConfigProto(log_device_placement=debug)) as sess: # Create the network LOG.info("[%s] Creating the network", self._graph.name) net_maker = NetMaker(self._graph) layers = net_maker.make_net() # Grab the inputs and outputs in_tensor = layers[ 0] out_tensor = layers[-1] # The number of inputs and outputs num_in = self._x_train.shape[1] num_out = self._y_train.shape[1] # Check x_* & y_* and net shapes match if num_in != in_tensor.shape[1]: raise ValueError( "Number of data inputs, %d, " "does not match network input count, %d, " "for graph %s" % (num_in, in_tensor.shape[1], self._graph) ) if num_out != out_tensor.shape[1]: raise ValueError( "Number of data outputs, %d, " "does not match network output count, %d, " "for graph %s" % (num_out, out_tensor.shape[1], self._graph) ) # Now make the training equipment truth = tf.placeholder(tf.float32, shape=[None, num_out]) loss = self._create_loss (truth, out_tensor) accuracy = self._create_accuracy (truth, out_tensor) optimizer = self._create_optimizer(loss) # Initialize all variables sess.run(tf.global_variables_initializer()) # Train for this epoch LOG.info("[%s] Training the network", self._graph.name) for epoch in range(1, self._num_epochs + 1): self._do_epoch(epoch, sess, in_tensor, truth, optimizer) # And give back the loss and accuracy LOG.info("[%s] Evaluating the network", self._graph.name) result = sess.run((loss, accuracy), feed_dict={ in_tensor : self._x_test, truth : self._y_test }) LOG.info("[%s] Result: loss=%0.3f accuracy=%0.2f%%", self._graph.name, result[0], 100 * result[1]) return result