Ejemplo n.º 1
0
    # Liniarize II
    nn = FeedForward([
        LinearizeLayer(32, 32, 3),
        FullyConnected(32 * 32 * 3, 300, identity),
        Tanh(),
        FullyConnected(300, 200, identity),
        Tanh(),
        FullyConnected(200, 10, identity),
        SoftMax()
    ])

    # # Convolutional I
    nn = FeedForward([
        ConvolutionalLayer(3, 32, 32, 6, 5, 1),
        MaxPoolingLayer(2),
        ReluLayer(),
        ConvolutionalLayer(6, 14, 14, 16, 5, 1),
        MaxPoolingLayer(2),
        ReluLayer(),
        LinearizeLayer(16, 5, 5),
        FullyConnected(400, 300, relu),
        FullyConnected(300, 10, relu),
        SoftMax()
    ])

    # Convolutional II
    # nn = FeedForward([
    #     ConvolutionalLayer(3, 32, 32, 6, 5, 1),
    #     ReluLayer(),
    #     ConvolutionalLayer(6, 28, 28, 16, 5, 1),
Ejemplo n.º 2
0
 def setUp(self):
     self.layer = MaxPoolingLayer((1, 2))
Ejemplo n.º 3
0
 def __init__(self):
     # Lenet
     # input: 28x28
     # conv1: (5x5x6)@s1p2 -> 28x28x6 {(28-5+2x2)/1+1}
     # maxpool2: (2x2)@s2 -> 14x14x6 {(28-2)/2+1}
     # conv3: (5x5x16)@s1p0 -> 10x10x16 {(14-5)/1+1}
     # maxpool4: (2x2)@s2 -> 5x5x16 {(10-2)/2+1}
     # conv5: (5x5x120)@s1p0 -> 1x1x120 {(5-5)/1+1}
     # fc6: 120 -> 84
     # fc7: 84 -> 10
     # softmax: 10 -> 10
     lr = 0.01
     self.layers = []
     self.layers.append(
         ConvolutionLayer(inputs_channel=1,
                          num_filters=6,
                          width=5,
                          height=5,
                          padding=2,
                          stride=1,
                          learning_rate=lr,
                          name='conv1'))
     self.layers.append(ReLu())
     self.layers.append(
         MaxPoolingLayer(width=2, height=2, stride=2, name='maxpool2'))
     self.layers.append(
         ConvolutionLayer(inputs_channel=6,
                          num_filters=16,
                          width=5,
                          height=5,
                          padding=0,
                          stride=1,
                          learning_rate=lr,
                          name='conv3'))
     self.layers.append(ReLu())
     self.layers.append(
         MaxPoolingLayer(width=2, height=2, stride=2, name='maxpool4'))
     self.layers.append(
         ConvolutionLayer(inputs_channel=16,
                          num_filters=120,
                          width=5,
                          height=5,
                          padding=0,
                          stride=1,
                          learning_rate=lr,
                          name='conv5'))
     self.layers.append(ReLu())
     self.layers.append(Flatten())
     self.layers.append(
         FullyConnectedLayer(num_inputs=120,
                             num_outputs=84,
                             learning_rate=lr,
                             name='fc6'))
     self.layers.append(ReLu())
     self.layers.append(
         FullyConnectedLayer(num_inputs=84,
                             num_outputs=10,
                             learning_rate=lr,
                             name='fc7'))
     self.layers.append(Softmax())
     self.lay_num = len(self.layers)
Ejemplo n.º 4
0
class TestMaxPoolingLayer(unittest.TestCase):

    def setUp(self):
        self.layer = MaxPoolingLayer((1, 2))

    def test_forward_prop(self):
        self.layer.set_input_shape((4, 4))

        input = np.array([[-1, -2, 3, 4],
                          [5, 6, -7, -8],
                          [9, -10, 11, -12],
                          [-13, 14, -15, 16]], dtype=np.float64)
        expected_output = np.array([[-1, 4],
                                    [6, -7],
                                    [9, 11],
                                    [14, 16]], dtype=np.float64)

        output = self.layer.forward_prop(input)
        numpy.testing.assert_array_equal(output, expected_output)

    def test_back_prop(self):
        self.layer.set_input_shape((4, 16))

        input = np.array([[1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2],
                          [3, 4, -3, -4, 3, 4, -3, -4, 3, 4, -3, -4, 3, 4, -3, -4],
                          [-3, -4, 3, 4, -3, -4, 3, 4, -3, -4, 3, 4, -3, -4, 3, 4],
                          [1, 2, -1, -2, 1, 2, -1, -2, 1, 2, -1, -2, 1, 2, -1, -2]],
                         dtype=np.float64)

        self.layer.forward_prop(input)

        out_grad = np.array([[1, 1, 1, 1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 1, 1, 1, 1],
                             [1, 1, 1, 1, 1, 1, 1, 1]])
        expected_in_grad = np.array([[0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1],
                                     [0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0],
                                     [1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1],
                                     [0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0, 0, 1, 1, 0]],
                                    dtype=np.float64)

        in_grad = self.layer.back_prop(out_grad)
        numpy.testing.assert_array_equal(in_grad, expected_in_grad)

    def test_get_output_shape(self):
        self.layer.set_input_shape((280, 72))

        self.assertEqual(self.layer.get_output_shape(), (280, 36))