コード例 #1
0
    def __init__(self, input_dim=(1, 28, 28),
                conv_param={'filter_num':30, 'filter_size':5, 'pad':0, 'stride':1},
                hidden_size=100, output_size=10, weight_init_std=0.01):
        
        filter_num = conv_param['filter_num']
        filter_size = conv_param['filter_size']
        filter_pad = conv_param['pad']
        filter_stride = conv_param['stride']
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2*filter_pad) / filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size/2) * (conv_output_size/2))

        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
        self.params['b1'] = np.zeros(filter_num)
        self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
        self.params['b2'] = np.zeros(hidden_size)
        self.params['W3'] = weight_init_std * np.random.randn(hidden_size, output_size)
        self.params['b3'] = np.zeros(output_size)

        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                            self.params['b1'],
                                            conv_param['stride'],
                                            conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1']= Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = SoftmaxWithLoss()
コード例 #2
0
ファイル: cnn_simple.py プロジェクト: jkatsuta/zero_to_nn
    def __init__(self, dim_in=(1, 28, 28),
                 par={'num_filter': 30, 'size_filter': 5, 'pad': 0, 'stride': 1},
                 s_hidden=100, s_out=10, std_w_init=0.01):
        n_f = par['num_filter']
        s_f = par['size_filter']
        pad = par['pad']
        stride = par['stride']
        size_in = dim_in[1]

        size_out_conv = int((size_in + 2 * pad - s_f) / stride) + 1
        size_out_pool = int(n_f * (size_out_conv / 2) ** 2)

        self.params = {}
        self.params['W1'] =\
            std_w_init * np.random.randn(n_f, dim_in[0], s_f, s_f)
        self.params['b1'] = np.zeros(n_f)
        self.params['W2'] = std_w_init * np.random.randn(size_out_pool, s_hidden)
        self.params['b2'] = np.zeros(s_hidden)
        self.params['W3'] = std_w_init * np.random.randn(s_hidden, s_out)
        self.params['b3'] = np.zeros(s_out)

        self.layers = OrderedDict()
        self.layers['Conv'] = Convolution(self.params['W1'], self.params['b1'],
                                          stride, pad)
        self.layers['Relu1'] = Relu()
        self.layers['Pool'] = Pooling(2, 2, 2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = SoftmaxWithLoss()
コード例 #3
0
    def __init__(self, input_dim = (1, 28, 28),
                 conv_params = {'filter_num':30,'filter_size': 5, 'pad': 0, 'stride':1},
                 hidden_size = 100, output_size = 10, weight_init_std = 0.01):
       """ 인스턴스 초기화 (변수들의 초기값을 줌) - CNN 구성, 변수들 초기화
        input_dim: 입력 데이터 차원, MINIST인 경우(1, 28, 28)
        conv_param: Convolution 레이어의 파라미터(filter, bias)를 생성하기 위해 필요한 값들
            필터 개수 (filter_num),
            필터 크기(filter_size = filter_height = filter_width),
            패딩 개수(pad),
            보폭(stride)
        hidden_size: Affine 계층에서 사용할 뉴런의 개수 -> W 행렬의 크기
        output_size: 출력값의 원소의 개수. MNIST인 경우 10
        weight_init_std: 가중치(weight) 행렬을 난수로 초기화 할 때 사용할 표준편차 
        """
       filter_num = conv_params['filter_num']
       filter_size = conv_params['filter_size']
       filter_pad = conv_params['pad']
       filter_stride = conv_params['stride']
       input_size = input_dim[1]
       conv_output_size = (input_size - filter_size + 2 * filter_pad) / \
                          filter_stride + 1
       pool_output_size = int(filter_num * (conv_output_size / 2) * (conv_output_size / 2))


       # CNN Layer에서 필요한 파라미터들
       self.params = dict()
       self.params['W1'] = weight_init_std * np.random.randn(filter_num, input_dim[0], filter_size, filter_size)
       self.params['b1'] = np.zeros(filter_num)
       self.params['W2'] = weight_init_std * np.random.randn(pool_output_size, hidden_size)
       self.params['b2'] = np.zeros(hidden_size)
       self.params['W'] = weight_init_std * np.random.randn(hidden_size, output_size)
       self.params['b3'] = np.zeros(output_size)


       # CNN Layer(계층) 생성, 연결
       self.layers = OrderedDict()

        # 방법 1 __init__(self,W,b) 라고 주고,  self.W = W, self.b = b 를 선언
        # self.W = W # 난수로 생성하려고 해도 데이터의 크기(size)를 알아야 필터를 생성할 수 있다
        # self.b = b # bias의 크기는 필터의 크기와 같다. 마찬가지로 난수로 생성해도 크기를 알아야한다 => dimension 결정

        # 방법 2
        # input_dim = (1, 28, 28) = MNIST를 위한 클래스
        # dimension을 주도록 설정 + 필터갯수가 있도록 설정해줘야한다
        # convolution 할 때 필터를 몇번 만들 것인가 -> 난수로 만들어서 넣어줄 수 있다

                    # key값
       self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'],
                                           conv_params['stride'],
                                           conv_params['pad'])  # W와 b를 선언
       self.layers['ReLu1'] = Relu() # x -> Convolution에서 전해주는 값
       self.layers['Pool1'] = Pooling(pool_h = 2, pool_w =2, stride =2)
       self.layers['Affine1'] = Affine(self.params['W2'],
                                        self.params['b2'])
       self.layers['Relu2'] = Relu()
       self.layers['Affine2'] = Affine(self.params['W3'],
                                        self.params['b3'])
       self.last_layer = SoftmaxWithLoss()
コード例 #4
0
    def __init__(
        self,
        input_size,
        hidden_size_list,
        output_size,
        activation="relu",
        weight_init_std="relu",
        weight_decay_lambda=0,
    ):
        self.input_size = input_size
        self.hidden_size_list = hidden_size_list
        self.hidden_layer_num = len(hidden_size_list)
        self.output_size = output_size
        self.weight_decay_lambda = weight_decay_lambda
        self.params = {}

        # Weight initialization
        self.__init_weight(weight_init_std)

        # Create layers
        activation_layer = {"sigmoid": Sigmoid(), "relu": Relu()}
        self.layers = OrderedDict()

        for idx in range(1, self.hidden_layer_num + 1):
            self.layers["Affine" + str(idx)] = Affine(
                self.params["W" + str(idx)], self.params["b" + str(idx)])
            self.layers["Activation_function" +
                        str(idx)] = activation_layer[activation]

        idx = self.hidden_layer_num + 1
        self.layers["Affine" + str(idx)] = Affine(self.params["W" + str(idx)],
                                                  self.params["b" + str(idx)])

        self.last_layer = SoftmaxLoss()
    def __init__(self, input_dim=(1, 28, 28),
                 conv_param_1={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_2={'filter_num': 16, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_3={'filter_num': 32, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_4={'filter_num': 32, 'filter_size': 3, 'pad': 2, 'stride': 1},
                 conv_param_5={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 conv_param_6={'filter_num': 64, 'filter_size': 3, 'pad': 1, 'stride': 1},
                 hidden_size=50, output_size=10):
        pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
        weight_init_scale = np.sqrt(2.0 / pre_node_nums)

        # weights init
        self.params = {}
        pre_channel_num = input_dim[0]
        for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3,
                                          conv_param_4, conv_param_5, conv_param_6]):
            self.params['w'+str(idx+1)] = weight_init_scale[idx] *\
                    np.random.randn(
                        conv_param['filter_num'],
                        pre_channel_num, conv_param['filter_size'],
                        conv_param['filter_size'])
            self.params['b'+str(idx+1)] = np.zeros(conv_param['filter_num'])
            pre_channel_num = conv_param['filter_num']
        self.params['w7'] = weight_init_scale[6] * np.random.randn(64*4*4, hidden_size)
        self.params['b7'] = np.zeros(hidden_size)
        self.params['w8'] = weight_init_scale[7] * np.random.randn(hidden_size, output_size)
        self.params['b8'] = np.zeros(output_size)

        # gen layers
        self.layers = []
        self.layers.append(Convolution(self.params['w1'], self.params['b1'], conv_param_1['stride'],
                                       conv_param_1['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w2'], self.params['b2'], conv_param_2['stride'],
                                       conv_param_2['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['w3'], self.params['b3'], conv_param_3['stride'],
                                       conv_param_3['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w4'], self.params['b4'], conv_param_4['stride'],
                                       conv_param_4['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['w5'], self.params['b5'], conv_param_5['stride'],
                                       conv_param_5['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['w6'], self.params['b6'], conv_param_6['stride'],
                                       conv_param_6['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Affine(self.params['w7'], self.params['b7']))
        self.layers.append(Relu())
        self.layers.append(Dropout(0.5))
        self.layers.append(Affine(self.params['w8'], self.params['b8']))
        self.layers.append(Dropout(0.5))
        self.last_layer = SoftmaxWithLoss()
コード例 #6
0
    def __init__(self, input_dim=(1, 28, 28),
                 conv_param_1 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_2 = {'filter_num':16, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_3 = {'filter_num':32, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_4 = {'filter_num':32, 'filter_size':3, 'pad':2, 'stride':1},
                 conv_param_5 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
                 conv_param_6 = {'filter_num':64, 'filter_size':3, 'pad':1, 'stride':1},
                 hidden_size=50, output_size=10):
        # 重みの初期化===========
        # 各層のニューロンひとつあたりが、前層のニューロンといくつのつながりがあるか(TODO:自動で計算する)
        pre_node_nums = np.array([1*3*3, 16*3*3, 16*3*3, 32*3*3, 32*3*3, 64*3*3, 64*4*4, hidden_size])
        weight_init_scales = np.sqrt(2.0 / pre_node_nums)  # ReLUを使う場合に推奨される初期値
        
        self.params = {} 
        pre_channel_num = input_dim[0]
        for idx, conv_param in enumerate([conv_param_1, conv_param_2, conv_param_3, conv_param_4, conv_param_5, conv_param_6]):
            self.params['W' + str(idx+1)] = weight_init_scales[idx] * np.random.randn(conv_param['filter_num'], pre_channel_num, conv_param['filter_size'], conv_param['filter_size'])
            self.params['b' + str(idx+1)] = np.zeros(conv_param['filter_num'])
            pre_channel_num = conv_param['filter_num']
        self.params['W7'] = weight_init_scales[6] * np.random.randn(64*4*4, hidden_size)
        self.params['b7'] = np.zeros(hidden_size)
        self.params['W8'] = weight_init_scales[7] * np.random.randn(hidden_size, output_size)
        self.params['b8'] = np.zeros(output_size)

        # レイヤの生成===========
        self.layers = []
        self.layers.append(Convolution(self.params['W1'], self.params['b1'], 
                           conv_param_1['stride'], conv_param_1['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W2'], self.params['b2'], 
                           conv_param_2['stride'], conv_param_2['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['W3'], self.params['b3'], 
                           conv_param_3['stride'], conv_param_3['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W4'], self.params['b4'],
                           conv_param_4['stride'], conv_param_4['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Convolution(self.params['W5'], self.params['b5'],
                           conv_param_5['stride'], conv_param_5['pad']))
        self.layers.append(Relu())
        self.layers.append(Convolution(self.params['W6'], self.params['b6'],
                           conv_param_6['stride'], conv_param_6['pad']))
        self.layers.append(Relu())
        self.layers.append(Pooling(pool_h=2, pool_w=2, stride=2))
        self.layers.append(Affine(self.params['W7'], self.params['b7']))
        self.layers.append(Relu())
        self.layers.append(Dropout(0.5))
        self.layers.append(Affine(self.params['W8'], self.params['b8']))
        self.layers.append(Dropout(0.5))
        
        self.last_layer = SoftmaxWithLoss()
コード例 #7
0
    def __init__(self,
                 inputLayerSize,
                 hiddenLayerSize,
                 ouputLayerSize,
                 distributionScale=0.01):
        # Initialize weight
        self.params = {}
        self.params['w1'] = distributionScale * np.random.randn(
            inputLayerSize, hiddenLayerSize)
        self.params['b1'] = np.zeros(hiddenLayerSize)
        self.params['w2'] = distributionScale * np.random.randn(
            hiddenLayerSize, ouputLayerSize)
        self.params['b2'] = np.zeros(ouputLayerSize)

        # Create layers
        self.layers = OrderedDict()
        self.layers['affine1'] = Affine(self.params['w1'], self.params['b1'])
        self.layers['relu1'] = Relu()
        self.layers['affine2'] = Affine(self.params['w2'], self.params['b2'])
        self.lastLayer = SoftmaxWithLoss()
コード例 #8
0
    def __init__(self):
        self.__input_size = 28 ** 2   # MNIST data is 28 x 28 pixel.
        self.__hidden_size = 50       # Hidden layer size.
        self.__output_size = 10       # Output is an one-hot array from 0 to 9.
        self.__weight_init_std = 0.01 # Standard deviation for initial weights    
        
        # Initialize weights and biases.
        self.params = {}
        self.params['W1'] = self.__weight_init_std * np.random.randn(self.__input_size, self.__hidden_size)
        self.params['b1'] = np.zeros(self.__hidden_size)
        self.params['W2'] = self.__weight_init_std * np.random.randn(self.__hidden_size, self.__output_size) 
        self.params['b2'] = np.zeros(self.__output_size)

        # Generate layers.
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss()
コード例 #9
0
    def __init__(self,
                 input_size,
                 hidden_size,
                 output_size,
                 weight_init_std=0.01):
        # 重みの初期化
        self.params = {}
        self.params['W1'] = weight_init_std * np.random.randn(
            input_size, hidden_size)
        self.params['b1'] = np.zeros(hidden_size)
        self.params['W2'] = weight_init_std * np.random.randn(
            hidden_size, output_size)
        self.params['b2'] = np.zeros(output_size)

        # レイヤの生成
        self.layers = OrderedDict()
        self.layers['Affine1'] = Affine(self.params['W1'], self.params['b1'])
        self.layers['Relu1'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W2'], self.params['b2'])

        self.lastLayer = SoftmaxWithLoss()
コード例 #10
0
    def __init__(self,
                 input_dim=(1, 28, 28),
                 conv_param={
                     'filter_num': 30,
                     "filter_size": 5,
                     'pad': 0,
                     'stride': 1
                 },
                 hidden_size=100,
                 output_size=10,
                 weight_init_std=0.01):
        """

        :param input_dim:输入数据的维度:(通道,高,长)
        :param conv_param:卷积层的超参数(字典)。字典的关键字如下:
                            filter_num―滤波器的数量
                            filter_size―滤波器的大小
                            stride―步幅
                            pad―填充
        :param hidden_size:隐藏层(全连接)的神经元数量
        :param output_size:输出层(全连接)的神经元数量
        :param weight_init_std:初始化时权重的标准差
        """
        filter_num = conv_param['filter_num']  # 滤波器数量
        filter_size = conv_param['filter_size']  # 滤波器大小
        filter_pad = conv_param['pad']  # 滤波器填充
        filter_stride = conv_param['stride']  # 滤波器步幅
        input_size = input_dim[1]
        conv_output_size = (input_size - filter_size + 2 * filter_pad) / \
                           filter_stride + 1
        pool_output_size = int(filter_num * (conv_output_size / 2) *
                               (conv_output_size / 2))

        # 权重参数初始化
        self.params = {
            'W1':
            weight_init_std * np.random.randn(
                filter_num, input_dim[0], filter_size, filter_size),  # 卷积层权重
            'b1':
            np.zeros(filter_num),  # 卷积层偏置
            'W2':
            weight_init_std * np.random.randn(pool_output_size, hidden_size),
            'b2':
            np.zeros(hidden_size),
            'W3':
            weight_init_std * np.random.randn(hidden_size, output_size),
            'b3':
            np.zeros(output_size)
        }

        # 生成必要的层
        self.layers = OrderedDict()
        self.layers['Conv1'] = Convolution(self.params['W1'],
                                           self.params['b1'],
                                           conv_param['stride'],
                                           conv_param['pad'])
        self.layers['Relu1'] = Relu()
        self.layers['Pool1'] = Pooling(pool_h=2, pool_w=2, stride=2)
        self.layers['Affine1'] = Affine(self.params['W2'], self.params['b2'])
        self.layers['Relu2'] = Relu()
        self.layers['Affine2'] = Affine(self.params['W3'], self.params['b3'])
        self.last_layer = SoftmaxWithLoss()
コード例 #11
0
def stage_shuffle(input_data, stage, repeat_num, groups):
    avgpool_layer = AVGPooling(3, 3, 2, 1)
    residual = avgpool_layer.forward(input_data)
    #savetxt('./dump/' + 'avg_pool.txt', residual)

    w = np.load(stage + '0.g_conv_1x1_compress.conv1x1.weight.npy')
    b = np.load(stage + '0.g_conv_1x1_compress.conv1x1.bias.npy')

    if 'Stage2' in stage:
        conv_layer = Convolution(w, b, stride=1, pad=0)
    else:
        conv_layer = GroupConvolution(w, b, stride=1, pad=0, groups=groups)
    out = conv_layer.forward(input_data)
    out_N, out_C, out_H, out_W = out.shape

    gamma = np.load(stage +
                    '0.g_conv_1x1_compress.batch_norm.weight.npy').reshape(
                        (-1, 1))
    beta = np.load(stage +
                   '0.g_conv_1x1_compress.batch_norm.bias.npy').reshape(
                       (-1, 1))
    mean = np.load(
        stage + '0.g_conv_1x1_compress.batch_norm.running_mean.npy').reshape(
            (-1, 1))
    var = np.load(stage +
                  '0.g_conv_1x1_compress.batch_norm.running_var.npy').reshape(
                      (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out = bn_layer.forward(out.reshape(out_C, -1), train_flg=False)
    relu_layer = Relu()
    out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + '1x1_comp.txt', out)

    out = channel_shuffle(out, groups)
    #savetxt('./dump/' + 'channel_shuffle.txt', out)

    w = np.load(stage + '0.depthwise_conv3x3.weight.npy').transpose(1, 0, 2, 3)
    b = np.load(stage + '0.depthwise_conv3x3.bias.npy')
    dwconv_layer = DWConvolution(w, b, stride=2, pad=1)
    out = dwconv_layer.forward(out)
    #savetxt('./dump/' + 'dwconv.txt', out)

    gamma = np.load(stage + '0.bn_after_depthwise.weight.npy').reshape((-1, 1))
    beta = np.load(stage + '0.bn_after_depthwise.bias.npy').reshape((-1, 1))
    mean = np.load(stage + '0.bn_after_depthwise.running_mean.npy').reshape(
        (-1, 1))
    var = np.load(stage + '0.bn_after_depthwise.running_var.npy').reshape(
        (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out_N, out_C, out_H, out_W = out.shape
    out = bn_layer.forward(out.reshape(out_C, -1),
                           train_flg=False).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'after_bn.txt', out)

    w = np.load(stage + '0.g_conv_1x1_expand.conv1x1.weight.npy')
    b = np.load(stage + '0.g_conv_1x1_expand.conv1x1.bias.npy')
    groupconv_layer = GroupConvolution(w, b, stride=1, pad=0, groups=groups)
    out = groupconv_layer.forward(out)

    gamma = np.load(stage +
                    '0.g_conv_1x1_expand.batch_norm.weight.npy').reshape(
                        (-1, 1))
    beta = np.load(stage + '0.g_conv_1x1_expand.batch_norm.bias.npy').reshape(
        (-1, 1))
    mean = np.load(stage +
                   '0.g_conv_1x1_expand.batch_norm.running_mean.npy').reshape(
                       (-1, 1))
    var = np.load(stage +
                  '0.g_conv_1x1_expand.batch_norm.running_var.npy').reshape(
                      (-1, 1))
    bn_layer = BatchNormalization(gamma,
                                  beta,
                                  running_mean=mean,
                                  running_var=var)
    out_N, out_C, out_H, out_W = out.shape
    out = bn_layer.forward(out.reshape(out_C, -1),
                           train_flg=False).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'gconv.txt', out)

    out = np.concatenate((residual, out), 1)
    #savetxt('./dump/' + 'combine.txt', out)
    relu_layer = Relu()
    out_N, out_C, out_H, out_W = out.shape
    out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
    #savetxt('./dump/' + 'stage2.txt', out)

    for i in range(1, repeat_num + 1):
        residual = out
        w = np.load(stage + str(i) + '.g_conv_1x1_compress.conv1x1.weight.npy')
        b = np.load(stage + str(i) + '.g_conv_1x1_compress.conv1x1.bias.npy')
        groupconv_layer = GroupConvolution(w,
                                           b,
                                           stride=1,
                                           pad=0,
                                           groups=groups)
        out = groupconv_layer.forward(out)
        out_N, out_C, out_H, out_W = out.shape

        gamma = np.load(stage + str(i) +
                        '.g_conv_1x1_compress.batch_norm.weight.npy').reshape(
                            (-1, 1))
        beta = np.load(stage + str(i) +
                       '.g_conv_1x1_compress.batch_norm.bias.npy').reshape(
                           (-1, 1))
        mean = np.load(
            stage + str(i) +
            '.g_conv_1x1_compress.batch_norm.running_mean.npy').reshape(
                (-1, 1))
        var = np.load(
            stage + str(i) +
            '.g_conv_1x1_compress.batch_norm.running_var.npy').reshape((-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out = bn_layer.forward(out.reshape(out_C, -1), train_flg=False)
        relu_layer = Relu()
        out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + str(i) + '_1x1_comp.txt', out)

        out = channel_shuffle(out, groups)
        #savetxt('./dump/' + 'channel_shuffle.txt', out)

        w = np.load(stage + str(i) +
                    '.depthwise_conv3x3.weight.npy').transpose(1, 0, 2, 3)
        b = np.load(stage + str(i) + '.depthwise_conv3x3.bias.npy')
        dwconv_layer = DWConvolution(w, b, stride=1, pad=1)
        out = dwconv_layer.forward(out)
        #savetxt('./dump/' + 'dwconv.txt', out)

        gamma = np.load(stage + str(i) +
                        '.bn_after_depthwise.weight.npy').reshape((-1, 1))
        beta = np.load(stage + str(i) +
                       '.bn_after_depthwise.bias.npy').reshape((-1, 1))
        mean = np.load(stage + str(i) +
                       '.bn_after_depthwise.running_mean.npy').reshape((-1, 1))
        var = np.load(stage + str(i) +
                      '.bn_after_depthwise.running_var.npy').reshape((-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out_N, out_C, out_H, out_W = out.shape
        out = bn_layer.forward(out.reshape(out_C, -1),
                               train_flg=False).reshape(
                                   out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + 'after_bn.txt', out)

        w = np.load(stage + str(i) + '.g_conv_1x1_expand.conv1x1.weight.npy')
        b = np.load(stage + str(i) + '.g_conv_1x1_expand.conv1x1.bias.npy')
        groupconv_layer = GroupConvolution(w,
                                           b,
                                           stride=1,
                                           pad=0,
                                           groups=groups)
        out = groupconv_layer.forward(out)

        gamma = np.load(stage + str(i) +
                        '.g_conv_1x1_expand.batch_norm.weight.npy').reshape(
                            (-1, 1))
        beta = np.load(stage + str(i) +
                       '.g_conv_1x1_expand.batch_norm.bias.npy').reshape(
                           (-1, 1))
        mean = np.load(
            stage + str(i) +
            '.g_conv_1x1_expand.batch_norm.running_mean.npy').reshape((-1, 1))
        var = np.load(stage + str(i) +
                      '.g_conv_1x1_expand.batch_norm.running_var.npy').reshape(
                          (-1, 1))
        bn_layer = BatchNormalization(gamma,
                                      beta,
                                      running_mean=mean,
                                      running_var=var)
        out_N, out_C, out_H, out_W = out.shape
        out = bn_layer.forward(out.reshape(out_C, -1),
                               train_flg=False).reshape(
                                   out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + 'gconv.txt', out)

        out = np.add(residual, out)
        #savetxt('./dump/' + str(i) + '_combine.txt', out)
        relu_layer = Relu()
        out_N, out_C, out_H, out_W = out.shape
        out = relu_layer.forward(out).reshape(out_N, out_C, out_H, out_W)
        #savetxt('./dump/' + str(i) + '_stage.txt', out)
    return out
コード例 #12
0
# coding: utf-8

import numpy as np
import sys
sys.path.append('../../')
from common.layers import Relu

relu = Relu()

#---------------------------------------
# forward
x = np.array([[1.0, -0.5], [-2.0, 3.0]])
print(x)
y = relu.forward(x)
print(y)

#---------------------------------------
# backward
dy = np.array([[5, 5], [5, 5]])
dx = relu.backward(dy)
print(dx)
コード例 #13
0
ファイル: cnn.py プロジェクト: HansRR/RATM-1
    def __init__(self, name, nout, numpy_rng, theano_rng, batchsize=128):
        # CALL PARENT CONSTRUCTOR TO SETUP CONVENIENCE FUNCTIONS
        # (SAVE/LOAD, ...)
        super(HumanConvNet, self).__init__(name=name)

        self.numpy_rng = numpy_rng
        self.batchsize = batchsize
        self.theano_rng = theano_rng
        self.mode = theano.shared(np.int8(0), name='mode')
        self.nout = nout

        self.inputs = T.ftensor4('inputs')
        self.inputs.tag.test_value = numpy_rng.randn(self.batchsize, 1, 28,
                                                     28).astype(np.float32)

        self.targets = T.ivector('targets')
        self.targets.tag.test_value = numpy_rng.randint(
            nout, size=self.batchsize).astype(np.int32)

        self.layers = OrderedDict()

        self.layers['conv0'] = ConvLayer(rng=self.numpy_rng,
                                         inputs=self.inputs,
                                         filter_shape=(128, 1, 5, 5),
                                         image_shape=(None, 1, 28, 28),
                                         name='conv0',
                                         pad=2)

        self.layers['maxpool0'] = MaxPoolLayer(inputs=self.layers['conv0'],
                                               pool_size=(2, 2),
                                               stride=(2, 2),
                                               name='maxpool0')

        self.layers['bias0'] = ConvBiasLayer(inputs=self.layers['maxpool0'],
                                             name='bias0')

        self.layers['relu0'] = Relu(inputs=self.layers['bias0'], name='relu0')

        self.layers['conv1'] = ConvLayer(rng=self.numpy_rng,
                                         inputs=self.layers['relu0'],
                                         filter_shape=(64, 128, 3, 3),
                                         name='conv1',
                                         pad=1)

        self.layers['maxpool1'] = MaxPoolLayer(inputs=self.layers['conv1'],
                                               pool_size=(2, 2),
                                               stride=(2, 2),
                                               name='maxpool1')

        self.layers['bias1'] = ConvBiasLayer(inputs=self.layers['maxpool1'],
                                             name='bias1')

        self.layers['relu1'] = Relu(inputs=self.layers['bias1'], name='relu1')

        self.layers['reshape1'] = Reshape(
            inputs=self.layers['relu1'],
            shape=(self.layers['relu1'].outputs_shape[0],
                   np.prod(self.layers['relu1'].outputs_shape[1:])),
            name='reshape1')

        self.layers['fc2'] = AffineLayer(rng=self.numpy_rng,
                                         inputs=self.layers['reshape1'],
                                         nouts=256,
                                         name='fc2')

        self.layers['relu2'] = Relu(inputs=self.layers['fc2'], name='relu2')

        self.layers['fc3'] = AffineLayer(rng=self.numpy_rng,
                                         inputs=self.layers['relu2'],
                                         nouts=self.nout,
                                         name='fc3')

        self.layers['softmax3'] = Softmax(inputs=self.layers['fc3'],
                                          name='softmax3')

        self.layers['clip3'] = Clip(inputs=self.layers['softmax3'],
                                    name='clip3',
                                    min_val=1e-6,
                                    max_val=1 - 1e-6)

        self.probabilities = self.forward(self.inputs)

        self._cost = T.nnet.categorical_crossentropy(self.probabilities,
                                                     self.targets).mean()

        self.classification = T.argmax(self.probabilities, axis=1)

        self.params = []
        for l in self.layers.values():
            self.params.extend(l.params)

        self._grads = T.grad(self._cost, self.params)

        self.classify = theano.function(
            [self.inputs],
            self.classification,
        )