Beispiel #1
0
def withGrowth():
    global train_x, train_y, max_iterations, batch_size
    with tf.Session() as session:
        net = Net(session, INPUT_DIM, OUTPUT_DIM)
        net = net.add_hidden_layer(session,
                                   1,
                                   bactivate=True,
                                   non_liniarity=tf.nn.sigmoid)
        net = net.add_hidden_layer(session,
                                   1,
                                   bactivate=True,
                                   non_liniarity=tf.nn.sigmoid)
        last_loss = 1000000000.0
        loss_counts = 0
        for i in range(max_iterations - no_back_iterations):
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)
            if loss > last_loss:
                if loss_counts > 9:
                    print "adding new nodes"
                    back_loss = net.get_reconstruction_error_per_hidden_layer(
                        train_x, train_y)
                    print "Back loss %s" % (back_loss, )
                    layer_with_greatest_back_loss = back_loss.index(
                        max(back_loss))
                    net = net.add_node_to_hidden_layer(
                        session, layer_with_greatest_back_loss)
                    print net.hidden_nodes
                    last_loss = 10000000000.0
                    loss_counts = 0
                else:
                    last_loss = loss
                    loss_counts += 1
            else:
                last_loss = loss

        net.use_bactivate = False
        for j in range(no_back_iterations):
            i = j + max_iterations - no_back_iterations
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)

    print "final loss %s, %s" % (i, loss)
    print "nodes %s" % (net.hidden_nodes, )
Beispiel #2
0
def withGrowth():
    global train_x, train_y, max_iterations, batch_size
    with tf.Session() as session:
        net = Net(session, INPUT_DIM, OUTPUT_DIM)
        net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid)
        net = net.add_hidden_layer(session, 1, bactivate=True, non_liniarity=tf.nn.sigmoid)
        last_loss = 1000000000.0
        loss_counts = 0
        for i in range(max_iterations - no_back_iterations):
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)
            if loss > last_loss:
                if loss_counts > 9:
                    print "adding new nodes"
                    back_loss = net.get_reconstruction_error_per_hidden_layer(train_x, train_y)
                    print "Back loss %s" % (back_loss,)
                    layer_with_greatest_back_loss = back_loss.index(max(back_loss))
                    net = net.add_node_to_hidden_layer(session, layer_with_greatest_back_loss)
                    print net.hidden_nodes
                    last_loss = 10000000000.0
                    loss_counts = 0
                else:
                    last_loss = loss
                    loss_counts += 1
            else:
                last_loss = loss

        net.use_bactivate = False
        for j in range(no_back_iterations):
            i = j + max_iterations - no_back_iterations
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)


    print "final loss %s, %s" % (i, loss)
    print "nodes %s" % (net.hidden_nodes, )
Beispiel #3
0
def noGrowth(layers, bactivate=False):
    global train_x, train_y, max_iterations, batch_size
    with tf.Session() as session:
        net = Net(session, INPUT_DIM, OUTPUT_DIM)

        for l in layers:
            net = net.add_hidden_layer(session, l, bactivate=bactivate)

        for i in range(max_iterations):
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)

    print "final loss %s, %s" % (i, loss)
    return loss
Beispiel #4
0
def noGrowth(layers, bactivate=False):
    global train_x, train_y, max_iterations, batch_size
    with tf.Session() as session:
        net = Net(session, INPUT_DIM, OUTPUT_DIM)

        for l in layers:
            net = net.add_hidden_layer(session, l, bactivate=bactivate)

        for i in range(max_iterations):
            train_x, train_y = shuffle(train_x, train_y)
            loss = net.train(train_x, train_y, batch_size=batch_size)
            print(i, loss)

    print "final loss %s, %s" % (i, loss)
    return loss