Ejemplo n.º 1
0
def epoch(eta=0.04,
          penalty=0.4,
          epochs=200,
          mini_batch_size=100,
          t0=5,
          t1=50,
          create_conf=False):
    layer1 = DenseLayer(features, 100, sigmoid())
    #layer2 = DenseLayer(100, 50, sigmoid())
    #layer3 = DenseLayer(100, 50, sigmoid())
    layer4 = DenseLayer(100, 10, softmax())

    layers = [layer1, layer4]
    network = NN(layers)
    cost_array = np.zeros((epochs, 2))

    def learning_schedule(t):
        return 0.04  #t0/(t+t1)

    for i in range(epochs):
        random.shuffle(batch)
        X_train_shuffle = X_train[batch]
        one_hot_shuffle = one_hot[batch]
        Y_train_shuffle = Y_train[batch]
        #eta = learning_schedule(i)
        network.SGD(ce, 100, X_train_shuffle, one_hot_shuffle, eta, penalty)
        Y_pred = np.argmax(network.feedforward(X_test), axis=1)
        Y_pred_train = np.argmax(network.feedforward(X_train_shuffle), axis=1)
        cost_array[i, 0] = accuracy()(Y_test.ravel(), Y_pred)
        cost_array[i, 1] = accuracy()(Y_train_shuffle.ravel(), Y_pred_train)
    print("accuracy on train data = %.3f" % cost_array[-1, 1])
    print("accuracy on test data = %.3f" % cost_array[-1, 0])
    if create_conf == True:
        #creating confusion matrix
        numbers = np.arange(0, 10)
        conf_matrix = confusion_matrix(Y_pred, Y_test, normalize="true")
        heatmap = sb.heatmap(conf_matrix,
                             cmap="viridis",
                             xticklabels=["%d" % i for i in numbers],
                             yticklabels=["%d" % i for i in numbers],
                             cbar_kws={'label': 'Accuracy'},
                             fmt=".2",
                             edgecolor="none",
                             annot=True)
        heatmap.set_xlabel("pred")
        heatmap.set_ylabel("true")

        heatmap.set_title(r"FFNN prediction accuracy with $\lambda$ = {:.1e} $\eta$ = {:.1e}"\
            .format(penalty, eta))
        fig = heatmap.get_figure()
        fig.savefig("../figures/MNIST_confusion_net.pdf",
                    bbox_inches='tight',
                    pad_inches=0.1,
                    dpi=1200)
        plt.show()
    return cost_array[-1]
Ejemplo n.º 2
0
    def transform_sample(self, z0, decoder, x, k):
        '''
    	z0: P,B,Z
    	'''

        # q(v0|x,z0)
        net2 = NN([self.x_size + self.z_size, 100, self.z_size * 2], tf.nn.elu)
        # r(v|x,z)
        net3 = NN([self.x_size + self.z_size, 100, self.z_size * 2], tf.nn.elu)

        # Sample v0 and calc log_qv0
        z0_reshaped = tf.reshape(z0,
                                 [k * self.batch_size, self.z_size])  #[PB,Z]
        x_tiled = tf.tile(x, [k, 1])  #[PB,X]
        xz = tf.concat([x_tiled, z0_reshaped], axis=1)  #[PB,X+Z]
        v0_mean, v0_logvar = split_mean_logvar(net2.feedforward(xz))  #[PB,Z]
        v0 = sample_Gaussian(v0_mean, v0_logvar, 1)  #[1,PB,Z]
        v0 = tf.reshape(v0, [k * self.batch_size, self.z_size])  #[PB,Z]
        log_qv0 = log_norm2(v0, v0_mean, v0_logvar)  #[PB]
        log_qv0 = tf.reshape(log_qv0, [k, self.batch_size])  #[P,B]
        v0 = tf.reshape(v0, [k, self.batch_size, self.z_size])  #[P,B,Z]

        self.n_transitions = 3

        # Transform [P,B,Z]
        zT, vT = self.leapfrogs(z0, v0, decoder, x, k)

        # Reverse model
        z_reshaped = tf.reshape(zT,
                                [k * self.batch_size, self.z_size])  #[PB,Z]
        xz = tf.concat([x_tiled, z_reshaped], axis=1)  #[PB,X+Z]
        vt_mean, vt_logvar = split_mean_logvar(net3.feedforward(xz))  #[PB,Z]
        vT = tf.reshape(vT, [k * self.batch_size, self.z_size])  #[PB,Z]
        log_rv = log_norm2(vT, vt_mean, vt_logvar)  #[PB]
        log_rv = tf.reshape(log_rv, [k, self.batch_size])  #[P,B]

        return zT, log_qv0, log_rv
Ejemplo n.º 3
0
from NN import NN
import numpy as np

net = NN([4, 4, 2, 1]) # Number of neurons in each layer, so that is a three layer network, the first layer is the input, last the output.

file = open('data/input.txt', 'r')

inputList = []
targetList = []
for entry in file.readlines():                             #Train on every entry
  print(entry)
  strings = entry.strip().split(",")
  values = map(int, strings)
  inputs, targets = values[:4], values[4]
  inputList.append(inputs)
  targetList.append(targets)

training_data = zip([np.array(x) for x in inputList], [np.array(y) for y in targetList])
test_data = zip([np.array(x) for x in inputList], [np.array(y) for y in targetList])

net.SGD(training_data, 50, 1, 6.0, test_data=test_data)

print(net.feedforward(inputList[-1]))
print(net.feedforward(inputList[-2]))
print(net.feedforward(inputList[-3]))
print(net.feedforward(inputList[-4]))
Ejemplo n.º 4
0
    for x in range(args.train_amount):
        inputs = [random.randint(0, 1), random.randint(0, 1)]
        goal_output = [inputs[0] ^ inputs[1]]
        xornn.backpropagate(inputs, goal_output)

    # testing
    if args.test:
        for i in range(2):
            for j in range(2):
                print(f'{i} ^ {j} == {xornn.feedforward([i, j])[0]}')

    # plotting
    if args.plot:
        density = 101
        space = np.linspace(0, 1, density)
        data = [[xornn.feedforward([i, j])[0] for j in space]
                for i in space]
        plt.imshow(data, cmap='gray', interpolation='nearest')
        plt.gca().invert_yaxis()
        plt.show()

     # save brain
    if args.save:
        with open('brain.json', 'w') as f:
            json.dump(xornn.serialize(), f)

    # interactive
    if args.interactive:
        print('type bools and get the NN\'s guess! Ctrl+C to exit')
        while True:
            in0 = input('Enter first bool: ').lower() == 'true'
Ejemplo n.º 5
0
    def __init__(self, hyperparams):

        tf.reset_default_graph()

        #Model hyperparameters
        self.learning_rate = hyperparams['learning_rate']
        self.encoder_net = hyperparams['encoder_net']
        self.decoder_net = hyperparams['decoder_net']
        self.z_size = hyperparams['z_size']  #Z
        self.x_size = hyperparams['x_size']  #X
        self.rs = 0


        #Placeholders - Inputs/Targets
        self.x = tf.placeholder(tf.float32, [None, self.x_size])
        self.batch_size = tf.shape(self.x)[0]   #B        
        self.k = tf.placeholder(tf.int32, None)  #P

        encoder = NN(self.encoder_net, tf.nn.softplus)
        decoder = NN(self.decoder_net, tf.nn.softplus)
        

        #Objective
        logpx, logpz, logqz = self.log_probs(self.x, encoder, decoder) #[P,B]

        self.log_px = tf.reduce_mean(logpx)
        self.log_pz = tf.reduce_mean(logpz)
        self.log_qz = tf.reduce_mean(logqz)
        temp_elbo = logpx + logpz - logqz

        self.elbo = tf.reduce_mean(temp_elbo)

        max_ = tf.reduce_max(temp_elbo, axis=0) #[B]
        self.iwae_elbo = tf.reduce_mean(tf.log(tf.reduce_mean(tf.exp(temp_elbo-max_))) + max_)

        # Minimize negative ELBO
        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate, 
                                                epsilon=1e-02).minimize(-self.elbo)


        # create_ais()

        self.z = tf.placeholder(tf.float32, [None, self.z_size], name='z')
        self.t = tf.placeholder(tf.float32, [], name='t')

        self.energy_func = (1.-self.t)*log_pz + self.t*(log_px+log_pz)



        init_z, _, _ = self.sample_z(x, encoder, decoder)

        # RIght ,so I dont want to recompute q every step of AIS, so I need to store it somewhere.

        approx_posterior = Normal_distribution(split_mean_logvar(encoder.feedforward(x)))
        prior_dist = Normal_distribution(tf.zeros([self.batch_size, self.z_size]), tf.zeros([self.batch_size, self.z_size]))
        # I dont think this will work, itll want x everytime I want to compute q. ...





        #Finalize Initilization
        self.init_vars = tf.global_variables_initializer()
        self.saver = tf.train.Saver()
        tf.get_default_graph().finalize()
Ejemplo n.º 6
0
start = time.time()
for i in range(len(eta)):
    for j in range(len(penalty)):
        output_layer = DenseLayer(features, 10, softmax())
        layers = [output_layer]
        log_net = NN(layers)
        for k in range(epochs):  #looping epochs
            random.shuffle(ind)
            X_train = X_train[ind]
            one_hot = one_hot[ind]
            for l in range(0, m, mini_batch_size):
                log_net.backprop2layer(cost_func,
                                       X_train[l:l + mini_batch_size],
                                       one_hot[l:l + mini_batch_size], eta[i],
                                       penalty[j])
            Y_pred = np.argmax(log_net.feedforward(X_test), axis=1)

            cost_array[i][j] = accuracy()(Y_test, Y_pred)
            if cost_array[i][j] > cost_best:
                print(cost_array[i][j])
                penalty_best = penalty[j]
                eta_best = eta[i]
                cost_best = cost_array[i][j]
                Y_pred_best = Y_pred
                Y_test_best = Y_test
print("FFNN time {:.3}s".format(time.time() - start))
#np.save("accuracy_FFNN_logreg.npy",cost_array)

#making accuracy heatmap to compare with FFNN

heatmap = sb.heatmap(cost_array.T,
Ejemplo n.º 7
0
    calcnn.train(inputs, goal_ouputs, args.train_amount)

    # testing
    inputs = list(map(lambda x: x.mapped_inputs(), testdata))
    goal_ouputs = list(map(lambda x: x.mapped_result(), testdata))
    acc = calcnn.test(inputs, goal_ouputs, 1000)

    print(f'Accuracy: {acc*100}%')

    # testing with output to file
    if args.gen_file:
        guesses = []
        for data in testdata:
            inputs = data.mapped_inputs()
            goal_output = data.mapped_result()[0]
            output = calcnn.feedforward(inputs)[0]

            guesses.append(f'{data.tostring()} | {data.unmap_output(output)}')

        with open('outs.txt', mode='w') as f:
            f.write('\n'.join(guesses))

    # save brain
    if args.save:
        with open('brain.json', 'w') as f:
            json.dump(calcnn.serialize(), f)

    # interactive
    if args.interactive:
        print(
            'type two numbers and an operand and get the NN\'s guess! Ctrl+C to exit'