Esempio n. 1
0
def test_CNN_2D_with_CNN_2D():
    fc_layer = FC(10, 'sigmoid')
    conv_1 = Conv2D(filter_size=3,
                    channels=2,
                    padding='same',
                    stride=1,
                    activation='sigmoid')
    conv_2 = Conv2D(filter_size=5,
                    channels=4,
                    padding='same',
                    stride=2,
                    activation='sigmoid')

    x, y = data_generator.load_data()
    x = np.expand_dims(
        x, -1)  # the data is 1-channel, add the channel to the last axis

    x = conv_1.forward_prop(x)
    x = conv_2.forward_prop(x)

    x = np.reshape(x, (np.shape(x)[0], -1))
    x = fc_layer.forward_prop(x)

    w, delta = fc_layer.back_prop(label=y)
    w, delta = conv_2.back_prop(w_nextlayer=w,
                                delta_nextlayer=delta,
                                next_layer='FC')
    w, delta = conv_1.back_prop(w_nextlayer=w,
                                delta_nextlayer=delta,
                                next_layer='Conv2D')

    assert x.shape == (10, 10)
Esempio n. 2
0
    def test_conv_backprop(self):
        for case in self.conv_cases:
            weight = case['weight']
            out_c, in_c, h, w = weight.shape
            bias = case['bias']
            x = case['x']
            out = case['out']
            stride = case['stride']
            pad = case['pad']
            grad_output = case['grad_output']
            grad_x = case['grad_x']
            grad_w = case['grad_w']
            grad_b = case['grad_b']

            conv = Conv2D(in_channel=in_c,
                          out_channel=out_c,
                          kernel_size=(h, w),
                          stride=stride,
                          padding=pad)
            conv.W = weight
            conv.b = bias
            test_out = conv(x)
            dv_x, dv_W, dv_b = conv.backward(x, grad_output)

            self.assertTrue(np.allclose(out, test_out, rtol=0.0001))

            self.assertTrue(np.allclose(grad_x, dv_x, rtol=0.0001))
            self.assertTrue(np.allclose(grad_w, dv_W, rtol=0.0001))
            self.assertTrue(np.allclose(grad_b, dv_b, rtol=0.0001))
Esempio n. 3
0
def test_put_zeros():
    matrix = np.arange(18).reshape((2, 3, 3))
    conv = Conv2D(filter_size=3,
                  channels=2,
                  padding='same',
                  stride=2,
                  activation='sigmoid')
    matrix = conv.put_zeros(matrix, 2, del_last_ele=True)
    print(matrix)
    assert matrix.shape == (2, 5, 5)
    def test_conv(self):
        for case in self.conv_cases:
            weight = case['weight']
            kernel_size = weight.shape
            bias = case['bias']
            x = case['x']
            out = case['out']
            stride = case['stride']
            pad = case['pad']

            conv = Conv2D(kernel_size=kernel_size,
                          stride=stride,
                          padding=pad)
            conv.W = weight
            conv.b = bias
            test_out = conv(x)
            self.assertTrue(np.allclose(out, test_out, rtol=1e-3))
    def _gen_layers(self):
        """
        x_train: ndarray of shape(n_samples, n_channels, height, width)
        """
        #とりあえず画像サイズは正方形を想定し、input_size= heightとする
        self.n_train_samples, n_channels, input_size, _ = self.x_train.shape
        n_filters = self.conv_param['n_filters']
        filter_size = self.conv_param['filter_size']
        filter_stride = self.conv_param['stride']
        filter_pad = self.conv_param['pad']
        pool_size = self.pool_param['pool_size']

        conv_output_size = get_output_size(input_size, filter_size,
                                           filter_stride, filter_pad)
        pool_output_size = int(n_filters *
                               np.power(conv_output_size / pool_size, 2))

        #initialize hyper parameters
        self.params = {}
        self.params['W1'] = self.weight_init_std * np.random.randn(
            n_filters, n_channels, filter_size, filter_size)
        self.params['b1'] = np.zeros(n_filters)
        self.params['W2'] = self.weight_init_std * np.random.randn(
            pool_output_size, self.layer_nodes['hidden'])
        self.params['b2'] = np.zeros(self.layer_nodes['hidden'])
        self.params['W3'] = self.weight_init_std * np.random.randn(
            self.layer_nodes['hidden'], self.layer_nodes['output'])
        self.params['b3'] = np.zeros(self.layer_nodes['output'])

        #generate layers
        self.layers = OrderedDict()
        self.layers['Conv1'] = Conv2D(self.params['W1'], self.params['b1'],
                                      filter_stride, filter_pad)
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = MaxPooling2D(pool_h=pool_size,
                                            pool_w=pool_size,
                                            stride=pool_size)
        self.layers['Flatten1'] = Flatten()
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        self.layers['Last'] = Softmax()

        #gradients
        self.grads = {}
Esempio n. 6
0
    def test_conv(self):
        for case in self.conv_cases:
            weight = case['weight']
            out_c, in_c, h, w = weight.shape
            bias = case['bias']
            x = case['x']
            out = case['out']
            stride = case['stride']
            pad = case['pad']

            conv = Conv2D(in_channel=in_c,
                          out_channel=out_c,
                          kernel_size=(h, w),
                          stride=stride,
                          padding=pad)
            conv.W = weight
            conv.b = bias
            test_out = conv(x)

            self.assertTrue(np.allclose(out, test_out))
Esempio n. 7
0
    # 12 * 12 image
    input_shape = 144
    epoch = 10
    batchSize = 50
    channel = 1
    last_units = 2

    dataset = np.zeros((dataNum, channel, input_shape)) # 1 for channel
    label = np.zeros((dataNum, last_units))
    for i in range(last_units):
        # currently -1~0. 0~1 can be used due to some issue
        dataset[dataNum/last_units*i:dataNum/last_units*(i+1),:,:] = np.random.uniform(i-1, i, (dataNum/last_units,channel,input_shape))
        label[dataNum/last_units*i:dataNum/last_units*(i+1),i] = 1
    net = Network(
        [Conv2D(4, 3, input_shape=input_shape), # 1*12*12  -> 4*10*10
         ReLU(),
         MaxPooling2D(2), # 10*10*4 -> 9*9*4
         FullyConnect(units=last_units, input_shape=324),
         Softmax(),
     ],
    #FullyConnect(units=last_units)],
        # FullyConnect(units=last_units, input_shape=144)],
        learning_rate = learning_rate,
        optimizer=Momentum(0.9),
        batch=batchSize,
        dtype=np.float32
    )

    err_prev = 0
    for e in range(epoch):
Esempio n. 8
0
import platform
import numpy as np
import sys

sys.path.append('C:\github_projects\PythonPractice\simple_CNN')
sys.path.append('C:\GithubProject\PythonPractice\simple_CNN')

from layer import Conv2D, FC, Activations
from datagen import DataGenerator

# def change_smth(kernel):
#     kernel = np.expand_dims(kernel, 0)

conv = Conv2D(3, 2, 'same', 1, 'sigmoid')

if platform.system() == 'Windows':
    folder = 'C:/data/train_data'
    test_folder = 'C:/data/test_data'
elif platform.system() == 'Linux':
    folder = '/home/shaoheng/Documents/PythonPractice/handwritedigit'

data_generator = DataGenerator(folder, 10, (16, 16), class_num=10)


def test_CNN_2D_with_FC():
    fc_layer = FC(10, 'sigmoid')
    conv = Conv2D(filter_size=3,
                  channels=2,
                  padding='same',
                  stride=1,
                  activation='sigmoid')