def withGrowth(): global train_x, train_y, max_iterations, batch_size with tf.Session() as session: net = Net(session, INPUT_DIM, OUTPUT_DIM) net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid) net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid) last_loss = 1000000000.0 loss_counts = 0 for i in range(max_iterations - no_back_iterations): train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(i, loss) if loss > last_loss: if loss_counts > 9: print "adding new nodes" back_loss = net.get_reconstruction_error_per_hidden_layer( train_x, train_y) print "Back loss %s" % (back_loss, ) layer_with_greatest_back_loss = back_loss.index( max(back_loss)) net = net.add_node_to_hidden_layer( session, layer_with_greatest_back_loss) print net.hidden_nodes last_loss = 10000000000.0 loss_counts = 0 else: last_loss = loss loss_counts += 1 else: last_loss = loss net.use_bactivate = False for j in range(no_back_iterations): i = j + max_iterations - no_back_iterations train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(i, loss) print "final loss %s, %s" % (i, loss) print "nodes %s" % (net.hidden_nodes, )
def withGrowth(): global train_x, train_y, max_iterations, batch_size with tf.Session() as session: net = Net(session, INPUT_DIM, OUTPUT_DIM) net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid) net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid) last_loss = 1000000000.0 loss_counts = 0 for i in range(max_iterations - no_back_iterations): train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(i, loss) if loss > last_loss: if loss_counts > 9: print "adding new nodes" back_loss = net.get_reconstruction_error_per_hidden_layer(train_x, train_y) print "Back loss %s" % (back_loss,) layer_with_greatest_back_loss = back_loss.index(max(back_loss)) net = net.add_node_to_hidden_layer(session, layer_with_greatest_back_loss) print net.hidden_nodes last_loss = 10000000000.0 loss_counts = 0 else: last_loss = loss loss_counts += 1 else: last_loss = loss net.use_bactivate = False for j in range(no_back_iterations): i = j + max_iterations - no_back_iterations train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(i, loss) print "final loss %s, %s" % (i, loss) print "nodes %s" % (net.hidden_nodes, )
def noGrowth(layers, bactivate=False): global train_x, train_y, max_iterations, batch_size with tf.Session() as session: net = Net(session, INPUT_DIM, OUTPUT_DIM) for l in layers: net = net.add_hidden_layer(session, l, bactivate=bactivate) for i in range(max_iterations): train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(i, loss) print "final loss %s, %s" % (i, loss) return loss
import tensorflow as tf from tensor_dynamic.data_functions import XOR_INPUTS, XOR_TARGETS from tensor_dynamic.net import Net train_x = XOR_INPUTS train_y = XOR_TARGETS max_iterations = 2000 batch_size = 1 with tf.Session() as session: net = Net(session, 2, 1) net = net.add_hidden_layer(session, 40) net = net.add_hidden_layer(session, 10) net = net.add_hidden_layer(session, 4) for i in range(max_iterations): #train_x, train_y = shuffle(train_x, train_y) loss = net.train(train_x, train_y, batch_size=batch_size) print(loss)