예제 #1
0
    def test_feed_forward_3_fully_connected(self):
        w = dict()
        bias = dict()
        w[1] = np.array([[1, 2, 3], [40, 50, 60]])
        bias[1] = np.zeros((2, 1))
        layer_1 = layers.FullyConnectedLayer(2,
                                             3,
                                             list(),
                                             unit=neuron.Linear())
        w[2] = np.array([[0, 1], [1, 0]])
        bias[2] = np.zeros((2, 1))
        layer_2 = layers.FullyConnectedLayer(2,
                                             2,
                                             list(),
                                             unit=neuron.Linear())
        w[3] = np.array([[1, 1], [2, 2], [3, 3]])
        bias[3] = np.ones((3, 1))
        layer_3 = layers.FullyConnectedLayer(3,
                                             2,
                                             list(),
                                             unit=neuron.Linear())
        net = nn.NN([None, layer_1, layer_2, layer_3])

        net.w = w
        net.bias = bias
        a = nn.feedforward(self.features, net)

        print "a0"
        print a[0]
        print a[0][:, 0]
        print "a1"
        print a[1]
        print "a2"
        print a[2]
        print "a3"
        print a[3]
        # layer 1, data 1
        self.assertTrue((a[1][:, 0] == np.array([[14, 320]])).all())
        # layer 1, data 2
        self.assertTrue((a[1][:, 1] == np.array([[32, 770]])).all())
        # layer 1, data 3
        self.assertTrue((a[1][:, 2] == np.array([[50, 1220]])).all())

        # layer 3, data 1
        self.assertTrue((a[2][:, 0] == np.array([[320, 14]])).all())
        # layer 3, data 2
        self.assertTrue((a[2][:, 1] == np.array([[770, 32]])).all())
        # layer 3, data 3
        self.assertTrue((a[2][:, 2] == np.array([[1220, 50]])).all())

        # layer 3, data 1
        self.assertTrue((a[3][:, 0] == np.array([[335, 669, 1003]])).all())
        # layer 3, data 2
        self.assertTrue((a[3][:, 1] == np.array([[803, 1605, 2407]])).all())
        # layer 3, data 3
        self.assertTrue(
            (a[3][:, 2] == np.array([[1271, 1270 * 2 + 1,
                                      1270 * 3 + 1]])).all())
예제 #2
0
def fcl01():
    net = n.NeuralNetwork([
        l.InputLayer(height=28, width=28),
        l.FullyConnectedLayer(
            100, init_func=f.glorot_uniform, act_func=f.sigmoid),
        l.FullyConnectedLayer(
            10, init_func=f.glorot_uniform, act_func=f.sigmoid)
    ], f.quadratic)
    optimizer = o.SGD(3.0)
    num_epochs = 1
    batch_size = 100
    return net, optimizer, num_epochs, batch_size
예제 #3
0
def fcl02():
    net = n.NeuralNetwork([
        l.InputLayer(height=28, width=28),
        l.FullyConnectedLayer(
            10, init_func=f.glorot_uniform, act_func=f.softmax)
    ], f.categorical_crossentropy)
    optimizer = o.SGD(0.1)
    num_epochs = 1
    batch_size = 10
    return net, optimizer, num_epochs, batch_size
예제 #4
0
def main():
    nn = NN([
        None,
        layers.ConvolutionalLayer(10, 10, 28, 28, 5, list()),
        layers.FullyConnectedLayer(10, 19 * 19, list(), unit=neuron.Logistic())
    ])
    #nn = NN([layers.FullyConnectedLayer(784,784,list()), layers.ConvolutionalLayer(10,10,28,28,5,list()), layers.FullyConnectedLayer(10, 19*19,list(),unit=neuron.Logistic())])
    #nn = NN([layers.FullyConnectedLayer(784, 784, list()), layers.FullyConnectedLayer(28, 784, list()), layers.FullyConnectedLayer(10, 28, list(), unit=neuron.Logistic())])
    # read in data
    """
    raw_data = pd.read_csv(
        "/Users/delbalso/projects/nn1/data/handwriting.csv",
        sep=",",
        header=None)
    raw_data = raw_data.reindex(np.random.permutation(raw_data.index))
    data = np.array(raw_data.transpose())
    num_labels = 10
    num_test_data = int(data.shape[1] * 0.2)
    features = data[:-num_labels, :]  # num_features x num_examples
    labels = data[-1 * num_labels:, :]  # num_labels x num_examples
    weights = train(
        labels[
            :,
            :-
            1 *
            num_test_data],
        features[
            :,
            :-
            num_test_data],
        nn)
    test(labels[:, -num_test_data:], features[:, -num_test_data:], weights, nn)
    """
    training_data, validation_data, test_data = mnist.load_data_wrapper_1()
    random.shuffle(training_data)
    training_features, training_labels = zip(*training_data[:500])
    training_data = MLDataSet(
        np.squeeze(training_features).transpose(),
        np.squeeze(training_labels).transpose())
    validation_features, validation_labels = zip(*validation_data[:])
    validation_data = MLDataSet(
        np.squeeze(validation_features).transpose(),
        np.squeeze(validation_labels).transpose())
    test_features, test_labels = zip(*test_data)
    test_data = MLDataSet(
        np.squeeze(test_features).transpose(),
        np.squeeze(test_labels).transpose())

    #hyperparam_search(1, 10, 1, 100, nn, training_data, validation_data)
    train(training_data, nn, 30, 10, validation_data=validation_data)

    test(test_data, nn)
    plot.plot_history(training_accuracy_history, validation_accuracy_history,
                      training_cost_history, validation_cost_history)
예제 #5
0
def cnn02():
    net = n.NeuralNetwork([
        l.InputLayer(height=28, width=28),
        l.ConvolutionalLayer(
            2, kernel_size=5, init_func=f.glorot_uniform, act_func=f.sigmoid),
        l.MaxPoolingLayer(pool_size=3),
        l.FullyConnectedLayer(
            height=10, init_func=f.glorot_uniform, act_func=f.softmax)
    ], f.categorical_crossentropy)
    optimizer = o.SGD(0.1)
    num_epochs = 2
    batch_size = 8
    return net, optimizer, num_epochs, batch_size
예제 #6
0
def cnn01():
    net = n.NeuralNetwork([
        l.InputLayer(height=28, width=28),
        l.ConvolutionalLayer(
            2, kernel_size=5, init_func=f.glorot_uniform, act_func=f.sigmoid),
        l.MaxPoolingLayer(pool_size=2),
        l.FullyConnectedLayer(
            height=10, init_func=f.glorot_uniform, act_func=f.softmax)
    ], f.log_likelihood)
    optimizer = o.SGD(0.1)
    num_epochs = 3
    batch_size = 10
    return net, optimizer, num_epochs, batch_size
예제 #7
0
    def test_forward_pass_fully_connected(self):
        layer = layers.FullyConnectedLayer(2, 6, list())
        w = np.array([[0, 4, 1, 2, 3, 4], [1, 2, 3, 4, 5, 6]])

        bias = np.array([[1], [0]])
        a_prev = np.zeros((6, 2))
        a_prev[:, 0] = [1, 2, 4, 5, 7, 8]
        a_prev[:, 1] = [10, 2, 0, 5, 70, 8]
        forward_pass = layer.forward_pass(w, bias, a_prev)
        print "forward"
        print forward_pass[:, 0]
        self.assertTrue((forward_pass[:, 0] == np.array([76, 120])).all())

        print forward_pass[:, 1]
        self.assertTrue((forward_pass[:, 1] == np.array([261, 432])).all())
예제 #8
0
def cnn(weights):
    conv = l.ConvolutionalLayer(2, kernel_size=5, init_func=f.zero, act_func=f.sigmoid)
    fcl = l.FullyConnectedLayer(height=10, init_func=f.zero, act_func=f.softmax)

    net = n.NeuralNetwork([
        l.InputLayer(height=28, width=28),
        conv,
        l.MaxPoolingLayer(pool_size=3),
        fcl
    ], f.categorical_crossentropy)

    conv.w = weights["w"][0][0]
    conv.b = np.expand_dims(weights["w"][0][1], 1)
    fcl.w = np.swapaxes(weights["w"][1][0], 0, 1)
    fcl.b = np.expand_dims(weights["w"][1][1], 1)

    return net