コード例 #1
0
def understand_4d_im2col():
    batch_size = 2
    stride = 1
    padding = 0
    fh = 2
    fw = 2
    input_channel = 3
    output_channel = 2
    iw = 3
    ih = 3
    (output_height, output_width) = calculate_output_size(ih, iw, fh, fw, padding, stride)
    wb = ConvWeightsBias(output_channel, input_channel, fh, fw, InitialMethod.MSRA, OptimizerName.SGD, 0.1)
    wb.Initialize("test", "test", True)
    wb.W = np.array(range(output_channel * input_channel * fh * fw)).reshape(output_channel, input_channel, fh, fw)
    wb.B = np.array([0])
    x = np.array(range(input_channel * iw * ih * batch_size)).reshape(batch_size, input_channel, ih, iw)

    col = img2col(x, 2, 2, 1, 0)
    w = wb.W.reshape(output_channel, -1).T
    output = np.dot(col, w)
    print("x=\n", x)
    print("col_x=\n", col)
    print("weights=\n", wb.W)
    print("col_w=\n", w)
    print("output=\n", output)
    out2 = output.reshape(batch_size, output_height, output_width, -1)
    print("out2=\n", out2)
    out3 = np.transpose(out2, axes=(0, 3, 1, 2))
    print("conv result=\n", out3)
コード例 #2
0
ファイル: ConvLayer.py プロジェクト: mask8082/ai-edu
    def __init__(
            self,
            input_shape,  # (InputChannelCount, H, W)
            kernal_shape,  # (OutputChannelCount, FH, FW)
            conv_param,  # (stride, padding)
            activator,
            param):
        self.num_input_channel = input_shape[0]
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.num_output_channel = kernal_shape[0]
        self.filter_height = kernal_shape[1]
        self.filter_width = kernal_shape[2]
        self.stride = conv_param[0]
        self.padding = conv_param[1]
        self.activator = activator

        self.WeightsBias = ConvWeightsBias(self.num_output_channel,
                                           self.num_input_channel,
                                           self.filter_height,
                                           self.filter_width,
                                           param.init_method,
                                           param.optimizer_name, param.eta)
        (self.output_height, self.output_width) = calculate_output_size(
            self.input_height, self.input_width, self.filter_height,
            self.filter_width, self.padding, self.stride)
        self.output_shape = (self.num_output_channel, self.output_height,
                             self.output_height)
コード例 #3
0
def test_2d_conv():
    batch_size = 1
    stride = 1
    padding = 0
    fh = 2
    fw = 2
    input_channel = 1
    output_channel = 1
    iw = 3
    ih = 3
    (output_height, output_width) = calculate_output_size(ih, iw, fh, fw, padding, stride)
    wb = ConvWeightsBias(output_channel, input_channel, fh, fw, InitialMethod.MSRA, OptimizerName.SGD, 0.1)
    wb.Initialize("test", "test", True)
    wb.W = np.array([3,2,1,0]).reshape(1,1,2,2)
    wb.B = np.array([0])
    x = np.array(range(9)).reshape(1,1,3,3)
    output1 = jit_conv_4d(x, wb.W, wb.B, output_height, output_width, stride)
    print("input=\n", x)
    print("weights=\n", wb.W)
    print("output=\n", output1)

    col = img2col(x, 2, 2, 1, 0)
    w = wb.W.reshape(4, 1)
    output2 = np.dot(col, w)
    print("input=\n", col)
    print("weights=\n", w)
    print("output2=\n", output2)
 def initialize(self, folder, name, create_new=False):
     self.WB = ConvWeightsBias(self.OutC, self.InC, self.FH, self.FW,
                               self.hp.init_method, self.hp.optimizer_name,
                               self.hp.eta)
     self.WB.Initialize(folder, name, create_new)
     (self.OutH,
      self.OutW) = calculate_output_size(self.InH, self.InW, self.FH,
                                         self.FW, self.padding, self.stride)
     self.output_shape = (self.OutC, self.OutH, self.OutH)
コード例 #5
0
ファイル: ConvLayer.py プロジェクト: yingleizhong/ai-edu
 def initialize(self, folder, name, create_new=False):
     self.WB = ConvWeightsBias(
         self.num_output_channel, self.num_input_channel, self.filter_height, self.filter_width, 
         self.hp.init_method, self.hp.optimizer_name, self.hp.eta)
     self.WB.Initialize(folder, name, create_new)
     (self.output_height, self.output_width) = ConvLayer.calculate_output_size(
         self.input_height, self.input_width, 
         self.filter_height, self.filter_width, 
         self.padding, self.stride)
     self.output_shape = (self.num_output_channel, self.output_height, self.output_height)
コード例 #6
0
def understand_4d_col2img_complex():
    batch_size = 2
    stride = 1
    padding = 0
    fh = 2
    fw = 2
    input_channel = 3
    output_channel = 2
    iw = 3
    ih = 3
    (output_height,
     output_width) = calculate_output_size(ih, iw, fh, fw, padding, stride)
    wb = ConvWeightsBias(output_channel, input_channel, fh, fw,
                         InitialMethod.MSRA, OptimizerName.SGD, 0.1)
    wb.Initialize("test", "test", True)
    wb.W = np.array(range(output_channel * input_channel * fh * fw)).reshape(
        output_channel, input_channel, fh, fw)
    wb.B = np.array([0])
    x = np.array(range(input_channel * iw * ih * batch_size)).reshape(
        batch_size, input_channel, ih, iw)
    print("x=\n", x)
    col_x = img2col(x, fh, fw, stride, padding)
    print("col_x=\n", col_x)
    print("w=\n", wb.W)
    col_w = wb.W.reshape(output_channel, -1).T
    print("col_w=\n", col_w)

    # backward
    delta_in = np.array(
        range(batch_size * output_channel * output_height *
              output_width)).reshape(batch_size, output_channel, output_height,
                                     output_width)
    print("delta_in=\n", delta_in)

    delta_in_2d = np.transpose(delta_in,
                               axes=(0, 2, 3, 1)).reshape(-1, output_channel)
    print("delta_in_2d=\n", delta_in_2d)

    dB = np.sum(delta_in_2d, axis=0, keepdims=True).T / batch_size
    print("dB=\n", dB)
    dW = np.dot(col_x.T, delta_in_2d) / batch_size
    print("dW=\n", dW)
    dW = np.transpose(dW, axes=(1, 0)).reshape(output_channel, input_channel,
                                               fh, fw)
    print("dW=\n", dW)
    dcol = np.dot(delta_in_2d, col_w.T)
    print("dcol=\n", dcol)
    delta_out = col2img(dcol, x.shape, fh, fw, stride, padding, output_height,
                        output_width)
    print("delta_out=\n", delta_out)
コード例 #7
0
ファイル: ConvLayer.py プロジェクト: czr22/ai-edu
class ConvLayer(CLayer):
    def __init__(self, num_input_channel, num_output_channel, num_filter_size, stride, padding, activator):
        self.num_input_channel = num_input_channel
        self.num_output_channel = num_output_channel
        self.num_filter_size = num_filter_size
        self.stride = stride
        self.padding = padding
        self.activator = activator

    def Initialize(self):
        self.weights = ConvWeightsBias(self.num_output_channel, self.num_input_channel, self.num_filter_size, self.num_filter_size)
        self.weights.Initialize();

    """
    输入数据
    N:样本图片数量(比如一次计算10张图片)
    C:图片通道数量(比如红绿蓝三通道)
    H:图片高度(比如224)
    W:图片宽度(比如224)
    思维卷积操作
    """
    
    def forward(self, x):
        self.input_shape = x.shape
        assert(x.ndim == 4)
        self.x = x




    # 把激活函数算做是当前层,上一层的误差传入后,先经过激活函数的导数,而得到本层的针对z值的误差
    def backward(self, delta_in, flag):


    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.weights.Update()
        
    def save_parameters(self, name):
        self.weights.SaveResultValue(name)

    def load_parameters(self, name):
        self.weights.LoadResultValue(name)
コード例 #8
0
ファイル: ConvLayer.py プロジェクト: mask8082/ai-edu
class ConvLayer(CLayer):
    # define the number of input and output channel, also the filter size
    def __init__(
            self,
            input_shape,  # (InputChannelCount, H, W)
            kernal_shape,  # (OutputChannelCount, FH, FW)
            conv_param,  # (stride, padding)
            activator,
            param):
        self.num_input_channel = input_shape[0]
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.num_output_channel = kernal_shape[0]
        self.filter_height = kernal_shape[1]
        self.filter_width = kernal_shape[2]
        self.stride = conv_param[0]
        self.padding = conv_param[1]
        self.activator = activator

        self.WeightsBias = ConvWeightsBias(self.num_output_channel,
                                           self.num_input_channel,
                                           self.filter_height,
                                           self.filter_width,
                                           param.init_method,
                                           param.optimizer_name, param.eta)
        (self.output_height, self.output_width) = calculate_output_size(
            self.input_height, self.input_width, self.filter_height,
            self.filter_width, self.padding, self.stride)
        self.output_shape = (self.num_output_channel, self.output_height,
                             self.output_height)

    """
    输入数据
    N:样本图片数量(比如一次计算10张图片)
    C:图片通道数量(比如红绿蓝三通道)
    H:图片高度(比如224)
    W:图片宽度(比如224)
    思维卷积操作
    """

    def forward(self, x):
        assert (x.ndim == 4)
        self.x = x
        assert (self.x.shape[1] == self.num_input_channel)
        assert (self.x.shape[2] == self.input_height)
        assert (self.x.shape[3] == self.input_width)
        self.batch_size = self.x.shape[0]

        if self.padding > 0:
            self.padded = np.pad(self.x,
                                 ((0, 0), (0, 0), (self.padding, self.padding),
                                  (self.padding, self.padding)), 'constant')
        else:
            self.padded = self.x
        #end if

        self.z = jit_conv_4d(self.padded, self.WeightsBias.W,
                             self.WeightsBias.B, self.output_height,
                             self.output_width, self.stride)
        self.a = self.activator.forward(self.z)
        return self.a

    def forward_fast(self, x):
        FN, C, FH, FW = self.WeightsBias.W.shape
        N, C, H, W = x.shape
        out_h = 1 + int((H + 2 * self.padding - FH) / self.stride)
        out_w = 1 + int((W + 2 * self.padding - FW) / self.stride)
        col_x = im2col(x, FH, FW, self.stride, self.padding)
        col_W = self.WeightsBias.W.reshape(FN, -1).T
        out = np.dot(col_x, col_W) + self.WeightsBias.B.reshape(-1, FN)
        self.z = out.reshape(N, out_h, out_w, -1).transpose(0, 3, 1, 2)
        self.x = x
        self.col_x = col_x
        self.col_W = col_W
        self.a = self.activator.forward(self.z)
        return self.z, self.a

    # 把激活函数算做是当前层,上一层的误差传入后,先经过激活函数的导数,而得到本层的针对z值的误差
    def backward(self, delta_in, flag):
        assert (delta_in.ndim == 4)
        assert (delta_in.shape == self.a.shape)

        # 计算激活函数的导数
        dz, _ = self.activator.backward(self.z, self.a, delta_in)

        # 转换误差矩阵尺寸
        dz_stride_1 = expand_delta_map(dz, self.batch_size,
                                       self.num_output_channel,
                                       self.input_height, self.input_width,
                                       self.output_height, self.output_width,
                                       self.filter_height, self.filter_width,
                                       self.padding, self.stride)

        # 求本层的输出误差矩阵时,应该用本层的输入误差矩阵互相关计算本层的卷积核的旋转
        # 由于输出误差矩阵的尺寸必须与本层的输入数据的尺寸一致,所以必须根据卷积核的尺寸,调整本层的输入误差矩阵的尺寸
        (pad_h,
         pad_w) = calculate_padding_size(dz_stride_1.shape[2],
                                         dz_stride_1.shape[3],
                                         self.filter_height, self.filter_width,
                                         self.input_height, self.input_width)
        dz_padded = np.pad(dz_stride_1,
                           ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w)),
                           'constant')

        # 计算本层的权重矩阵的梯度
        self._calculate_weightsbias_grad(dz_stride_1)

        # 计算本层输出到下一层的误差矩阵
        delta_out = self._calculate_delta_out(dz_padded, flag)
        return delta_out

    # 用输入数据乘以回传入的误差矩阵,得到卷积核的梯度矩阵
    def _calculate_weightsbias_grad(self, dz):
        self.WeightsBias.ClearGrads()
        (pad_h, pad_w) = calculate_padding_size(self.input_height,
                                                self.input_width, dz.shape[2],
                                                dz.shape[3],
                                                self.filter_height,
                                                self.filter_width, 1)
        input_padded = np.pad(self.x,
                              ((0, 0), (0, 0), (pad_h, pad_h), (pad_w, pad_w)),
                              'constant')
        for bs in range(self.batch_size):
            for oc in range(self.num_output_channel):  # == kernal count
                for ic in range(self.num_input_channel):  # == filter count
                    w_grad = np.zeros((self.filter_height, self.filter_width))
                    conv2d(input_padded[bs, ic], dz[bs, oc], 0, w_grad)
                    self.WeightsBias.W_grad[oc, ic] += w_grad
                #end ic
                self.WeightsBias.B_grad[oc] += dz[bs, oc].sum()
            #end oc
        #end bs
        self.WeightsBias.MeanGrads(self.batch_size)

    # 用输入误差矩阵乘以(旋转180度后的)卷积核
    def _calculate_delta_out(self, dz, flag):
        delta_out = np.zeros(self.x.shape)
        if flag != LayerIndexFlags.FirstLayer:
            rot_weights = self.WeightsBias.Rotate180()
            for bs in range(batch_size):
                for oc in range(self.num_output_channel):  # == kernal count
                    delta_per_input = np.zeros(
                        (self.input_height, self.input_width))
                    for ic in range(self.num_input_channel):  # == filter count
                        conv2d(dz[bs, oc], rot_weights[oc, ic], 0,
                               delta_per_input)
                        delta_out[bs, ic] += delta_per_input
                    #END IC
                #end oc
            #end bs
        # end if
        return delta_out

    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.WeightsBias.Update()

    def save_parameters(self, name):
        self.WeightsBias.Save(name)

    def load_parameters(self, name):
        self.WeightsBias.Load(name)
コード例 #9
0
class ConvLayer(CLayer):
    # define the number of input and output channel, also the filter size
    def __init__(self, 
                 input_shape,       # (InputChannelCount, H, W)
                 kernal_shape,      # (OutputChannelCount, FH, FW)
                 conv_param,        # (stride, padding)
                 hp):
        self.InC = input_shape[0]   # input channel count
        self.InH = input_shape[1]   # input image height
        self.InW = input_shape[2]   # input image width
        self.OutC = kernal_shape[0] # output channel count
        self.FH = kernal_shape[1]   # kernal/filter height
        self.FW = kernal_shape[2]   # kernal/filter width
        self.stride = conv_param[0]
        self.padding = conv_param[1]
        self.hp = hp

    def initialize(self, folder, name, create_new=False):
        self.WB = ConvWeightsBias(
            self.OutC, self.InC, self.FH, self.FW, 
            self.hp.init_method, self.hp.optimizer_name, self.hp.eta)
        self.WB.Initialize(folder, name, create_new)
        (self.OutH, self.OutW) = calculate_output_size(
            self.InH, self.InW, 
            self.FH, self.FW, 
            self.padding, self.stride)
        self.output_shape = (self.OutC, self.OutH, self.OutH)

    def set_filter(self, w, b):
        if w is not None:
            self.WB.W = w
        if b is not None:
            self.WB.B = b

    def forward(self, x, train=True):
        return self.forward_img2col(x, train)

    def backward(self, delta_in, layer_idx):
        delta_out, dw, db = self.backward_col2img(delta_in, layer_idx)
        return delta_out

    def forward_img2col(self, x, train=True):
        self.x = x
        self.batch_size = self.x.shape[0]
        assert(self.x.shape == (self.batch_size, self.InC, self.InH, self.InW))
        self.col_x = img2col(x, self.FH, self.FW, self.stride, self.padding)
        self.col_w = self.WB.W.reshape(self.OutC, -1).T
        self.col_b = self.WB.B.reshape(-1, self.OutC)
        out1 = np.dot(self.col_x, self.col_w) + self.col_b
        out2 = out1.reshape(self.batch_size, self.OutH, self.OutW, -1)
        self.z = np.transpose(out2, axes=(0, 3, 1, 2))
        return self.z

    def backward_col2img(self, delta_in, layer_idx):
        col_delta_in = np.transpose(delta_in, axes=(0,2,3,1)).reshape(-1, self.OutC)
        self.WB.dB = np.sum(col_delta_in, axis=0, keepdims=True).T / self.batch_size
        col_dW = np.dot(self.col_x.T, col_delta_in) / self.batch_size
        self.WB.dW = np.transpose(col_dW, axes=(1, 0)).reshape(self.OutC, self.InC, self.FH, self.FW)
        col_delta_out = np.dot(col_delta_in, self.col_w.T)
        delta_out = col2img(col_delta_out, self.x.shape, self.FH, self.FW, self.stride, self.padding, self.OutH, self.OutW)
        return delta_out, self.WB.dW, self.WB.dB
   
    def forward_numba(self, x, train=True):
        assert(x.ndim == 4)
        self.x = x
        assert(self.x.shape[1] == self.InC)
        assert(self.x.shape[2] == self.InH)
        assert(self.x.shape[3] == self.InW)
        self.batch_size = self.x.shape[0]

        if self.padding > 0:
            self.padded = np.pad(self.x, ((0,0), (0,0), (self.padding,self.padding), (self.padding,self.padding)), 'constant')
            #self.padded = np.pad(self.x, mode="constant", constant_value=0, pad_width=(0,0,0,0,self.padding,self.padding,self.padding,self.padding))
        else:
            self.padded = self.x
        #end if

        self.z = jit_conv_4d(self.padded, self.WB.W, self.WB.B, self.OutH, self.OutW, self.stride)
        return self.z

    def backward_numba(self, delta_in, flag):
        assert(delta_in.ndim == 4)
        assert(delta_in.shape == self.z.shape)
        
        # 如果正向计算中的stride不是1,转换成是1的等价误差数组
        dz_stride_1 = expand_delta_map(delta_in, self.batch_size, self.OutC, self.InH, self.InW, self.OutH, self.OutW, self.FH, self.FW, self.padding, self.stride)

        # 计算本层的权重矩阵的梯度
        self._calculate_weightsbias_grad(dz_stride_1)

        # 求本层的输出误差矩阵时,应该用本层的输入误差矩阵互相关计算本层的卷积核的旋转
        # 由于输出误差矩阵的尺寸必须与本层的输入数据的尺寸一致,所以必须根据卷积核的尺寸,调整本层的输入误差矩阵的尺寸
        (pad_h, pad_w) = calculate_padding_size(
            dz_stride_1.shape[2], dz_stride_1.shape[3], 
            self.FH, self.FW, 
            self.InH, self.InW)
        dz_padded = np.pad(dz_stride_1, ((0,0),(0,0),(pad_h, pad_h),(pad_w, pad_w)), 'constant')
        # 计算本层输出到下一层的误差矩阵
        delta_out = self._calculate_delta_out(dz_padded, flag)
        #return delta_out
        return delta_out, self.WB.dW, self.WB.dB

    # 用输入数据乘以回传入的误差矩阵,得到卷积核的梯度矩阵
    def _calculate_weightsbias_grad(self, dz):
        self.WB.ClearGrads()
        # 先把输入矩阵扩大,周边加0
        (pad_h, pad_w) = calculate_padding_size(
            self.InH, self.InW, 
            dz.shape[2], dz.shape[3], 
            self.FH, self.FW, 1)
        input_padded = np.pad(self.x, ((0,0),(0,0),(pad_h, pad_h),(pad_w,pad_w)), 'constant')
        # 输入矩阵与误差矩阵卷积得到权重梯度矩阵
        (self.WB.dW, self.WB.dB) = calcalate_weights_grad(
                                input_padded, dz, self.batch_size, 
                                self.OutC, self.InC, 
                                self.FH, self.FW, 
                                self.WB.dW, self.WB.dB)

        self.WB.MeanGrads(self.batch_size)
        
    # 用输入误差矩阵乘以(旋转180度后的)卷积核
    def _calculate_delta_out(self, dz, layer_idx):
        if layer_idx == 0:
            return None
        # 旋转卷积核180度
        rot_weights = self.WB.Rotate180()
        # 定义输出矩阵形状
        delta_out = np.zeros(self.x.shape).astype(np.float32)
        # 输入梯度矩阵卷积旋转后的卷积核,得到输出梯度矩阵
        delta_out = calculate_delta_out(dz, rot_weights, self.batch_size, 
                            self.InC, self.OutC, 
                            self.InH, self.InW, delta_out)

        return delta_out

    def pre_update(self):
        pass

    def update(self):
        self.WB.Update()
        
    def save_parameters(self):
        self.WB.SaveResultValue()

    def load_parameters(self):
        self.WB.LoadResultValue()
コード例 #10
0
    return rs


if __name__ == '__main__':
    stride = 1
    padding = 0
    fh = 3
    fw = 3
    input_channel = 3
    output_channel = 4
    iw = 28
    ih = 28
    (output_height,
     output_width) = ConvLayer.calculate_output_size(ih, iw, fh, fw, padding,
                                                     stride)
    wb = ConvWeightsBias(output_channel, input_channel, fh, fw,
                         InitialMethod.MSRA, OptimizerName.SGD, 0.1)
    wb.Initialize("test", "test", True)
    batch_size = 64
    x = np.random.randn(batch_size, input_channel, iw, ih)
    # dry run
    output1 = conv_4d(x, wb.W, wb.B, output_height, output_width, stride)
    s1 = time.time()
    for i in range(10):
        output1 = conv_4d(x, wb.W, wb.B, output_height, output_width, stride)
    e1 = time.time()
    print("Time used for Python:", e1 - s1)

    # dry run
    output2 = jit_conv_4d(x, wb.W, wb.B, output_height, output_width, stride)
    s2 = time.time()
    for i in range(10):
コード例 #11
0
ファイル: ConvLayer.py プロジェクト: yingleizhong/ai-edu
class ConvLayer(CLayer):
    # define the number of input and output channel, also the filter size
    def __init__(self, 
                 input_shape,       # (InputChannelCount, H, W)
                 kernal_shape,      # (OutputChannelCount, FH, FW)
                 conv_param,        # (stride, padding)
                 hp):
        self.num_input_channel = input_shape[0]
        self.input_height = input_shape[1]
        self.input_width = input_shape[2]
        self.num_output_channel = kernal_shape[0]
        self.filter_height = kernal_shape[1]
        self.filter_width = kernal_shape[2]
        self.stride = conv_param[0]
        self.padding = conv_param[1]
        self.hp = hp

    def initialize(self, folder, name, create_new=False):
        self.WB = ConvWeightsBias(
            self.num_output_channel, self.num_input_channel, self.filter_height, self.filter_width, 
            self.hp.init_method, self.hp.optimizer_name, self.hp.eta)
        self.WB.Initialize(folder, name, create_new)
        (self.output_height, self.output_width) = ConvLayer.calculate_output_size(
            self.input_height, self.input_width, 
            self.filter_height, self.filter_width, 
            self.padding, self.stride)
        self.output_shape = (self.num_output_channel, self.output_height, self.output_height)

    def forward(self, x, train=True):
        return self.forward_img2col(x, train)

    def backward(self, delta_in, layer_idx):
        delta_out, dw, db = self.backward_col2img(delta_in, layer_idx)
        return delta_out

    def forward_img2col(self, x, train=True):
        self.x = x
        assert(self.x.shape[1] == self.num_input_channel)
        assert(self.x.shape[2] == self.input_height)
        assert(self.x.shape[3] == self.input_width)
        self.batch_size = self.x.shape[0]
        FN, C, FH, FW = self.WB.W.shape
        N, C, H, W = x.shape
        out_h = 1 + int((H + 2 * self.padding - FH) / self.stride)
        out_w = 1 + int((W + 2 * self.padding - FW) / self.stride)
        self.col_x = img2col(x, FH, FW, self.stride, self.padding)
        self.col_w = self.WB.W.reshape(FN, -1).T
        out1 = np.dot(self.col_x, self.col_w) + self.WB.B.reshape(-1,FN)
        out2 = out1.reshape(N, out_h, out_w, -1)
        self.z = np.transpose(out2, axes=(0, 3, 1, 2))
        return self.z

    def backward_col2img(self, delta_in, layer_idx):
        FN, C, FH, FW = self.WB.W.shape
        dout = np.transpose(delta_in, axes=(0,2,3,1)).reshape(-1, FN)
        self.WB.dB = np.sum(dout, axis=0, keepdims=True).T / self.batch_size
        dW = np.dot(self.col_x.T, dout)
        self.WB.dW = np.transpose(dW, axes=(1, 0)).reshape(FN, C, FH, FW) / self.batch_size
        dcol = np.dot(dout, self.col_w.T)
        delta_out = col2img(dcol, self.x.shape, FH, FW, self.stride, self.padding)
        return delta_out, self.WB.dW, self.WB.dB
   
    
    def forward_numba(self, x, train=True):
        assert(x.ndim == 4)
        self.x = x
        assert(self.x.shape[1] == self.num_input_channel)
        assert(self.x.shape[2] == self.input_height)
        assert(self.x.shape[3] == self.input_width)
        self.batch_size = self.x.shape[0]

        if self.padding > 0:
            self.padded = np.pad(self.x, ((0,0), (0,0), (self.padding,self.padding), (self.padding,self.padding)), 'constant')
            #self.padded = np.pad(self.x, mode="constant", constant_value=0, pad_width=(0,0,0,0,self.padding,self.padding,self.padding,self.padding))
        else:
            self.padded = self.x
        #end if

        self.z = jit_conv_4d(self.padded, self.WB.W, self.WB.B, self.output_height, self.output_width, self.stride)
        return self.z

    def backward_numba(self, delta_in, flag):
        assert(delta_in.ndim == 4)
        assert(delta_in.shape == self.z.shape)
        
        # 转换误差矩阵尺寸
        dz_stride_1 = expand_delta_map(delta_in, self.batch_size, self.num_output_channel, self.input_height, self.input_width, self.output_height, self.output_width, self.filter_height, self.filter_width, self.padding, self.stride)

        # 求本层的输出误差矩阵时,应该用本层的输入误差矩阵互相关计算本层的卷积核的旋转
        # 由于输出误差矩阵的尺寸必须与本层的输入数据的尺寸一致,所以必须根据卷积核的尺寸,调整本层的输入误差矩阵的尺寸
        (pad_h, pad_w) = calculate_padding_size(
            dz_stride_1.shape[2], dz_stride_1.shape[3], 
            self.filter_height, self.filter_width, 
            self.input_height, self.input_width)
        
        dz_padded = np.pad(dz_stride_1, ((0,0),(0,0),(pad_h, pad_h),(pad_w, pad_w)), 'constant')

        # 计算本层的权重矩阵的梯度
        self._calculate_weightsbias_grad(dz_stride_1)

        # 计算本层输出到下一层的误差矩阵
        delta_out = self._calculate_delta_out(dz_padded, flag)
        #return delta_out
        return delta_out, self.WB.dW, self.WB.dB

    # 用输入数据乘以回传入的误差矩阵,得到卷积核的梯度矩阵
    def _calculate_weightsbias_grad(self, dz):
        self.WB.ClearGrads()
        # 先把输入矩阵扩大,周边加0
        (pad_h, pad_w) = calculate_padding_size(
            self.input_height, self.input_width, 
            dz.shape[2], dz.shape[3], 
            self.filter_height, self.filter_width, 1)
        input_padded = np.pad(self.x, ((0,0),(0,0),(pad_h, pad_h),(pad_w,pad_w)), 'constant')
        # 输入矩阵与误差矩阵卷积得到权重梯度矩阵
        (self.WB.dW, self.WB.dB) = calcalate_weights_grad(
                                input_padded, dz, self.batch_size, 
                                self.num_output_channel, self.num_input_channel, 
                                self.filter_height, self.filter_width, 
                                self.WB.dW, self.WB.dB)

        self.WB.MeanGrads(self.batch_size)

        
    # 用输入误差矩阵乘以(旋转180度后的)卷积核
    def _calculate_delta_out(self, dz, layer_idx):
        if layer_idx == 0:
            return None
        # 旋转卷积核180度
        rot_weights = self.WB.Rotate180()
        delta_out = np.zeros(self.x.shape).astype(np.float32)
        # 输入梯度矩阵卷积旋转后的卷积核,得到输出梯度矩阵
        delta_out = calculate_delta_out(dz, rot_weights, self.batch_size, 
                            self.num_input_channel, self.num_output_channel, 
                            self.input_height, self.input_width, delta_out)

        return delta_out

    def pre_update(self):
        self.weights.pre_Update()

    def update(self):
        self.WB.Update()
        
    def save_parameters(self):
        self.WB.SaveResultValue()

    def load_parameters(self):
        self.WB.LoadResultValue()

    @staticmethod
    def calculate_output_size(input_h, input_w, filter_h, filter_w, padding, stride=1):
        output_h = (input_h - filter_h + 2 * padding) // stride + 1    
        output_w = (input_w - filter_w + 2 * padding) // stride + 1
        return (output_h, output_w)
コード例 #12
0
ファイル: ConvLayer.py プロジェクト: czr22/ai-edu
 def Initialize(self):
     self.weights = ConvWeightsBias(self.num_output_channel, self.num_input_channel, self.num_filter_size, self.num_filter_size)
     self.weights.Initialize();