Пример #1
0
    def forward(self):

        in_features = self.in_features
        kernels = self.kernels
        bias = self.bias

        kernel_size = self.kernel_size
        stride_hw = self.stride_hw

        # 展开为核向量
        kernels_col = kel_to_col(kernels)

        # 输入补零
        if self.is_padding:
            pad_in_features, pad_h, pad_w = padding2d(in_features, kernel_size,
                                                      stride_hw)
            self.pad = (pad_h, pad_w)
        else:
            pad_in_features = in_features
            self.pad = ((0, 0), (0, 0))

        self.pad_in_features = pad_in_features
        self.pad_in_hw = (pad_in_features.shape[2], pad_in_features.shape[3])
        pad_in_features_col, pad_out_hw = img_to_col(pad_in_features,
                                                     kernel_size, stride_hw)
        self.pad_out_hw = pad_out_hw
        out_features = matmul_forward(pad_in_features_col, kernels_col, bias,
                                      self.pad_out_hw)

        z = Tensor(out_features, self,
                   (self.ts_in_features, self.ts_kernels, self.ts_bias))

        return z
Пример #2
0
    def forward(self):
        x = self.x_copy
        hc = self.hc_copy
        wf = self.wf_copy
        bf = self.bf_copy
        wi = self.wi_copy
        bi = self.bi_copy
        wc = self.wc_copy
        bc = self.bc_copy
        wo = self.wo_copy
        bo = self.bo_copy

        half_size = int(hc.shape[2] / 2)
        h, c = hc[:, :, :half_size], hc[:, :, half_size:]
        hx = Utils.concat([h, x], 2)
        # 遗忘门
        ft = Activate.sigmoid(hx @ wf + bf)    # 遗忘比例
        ct = c * ft
        # 输入门
        it = Activate.sigmoid(hx @ wi + bi)    # 记忆比例
        ct_ = Activate.tanh(hx @ wc + bc)      # 信息
        ct = ct + it * ct_
        # 输出门
        ot = Activate.sigmoid(hx @ wo + bo)
        ht = ot * Activate.tanh(ct)
        self.ht_ct = Utils.concat([ht, ct], 2)
        z = Tensor(self.ht_ct.arr, self, (self.x,  self.hc,     # hc(h和c合并)是复杂度为O(n)的关键
                                          self.wf, self.bf,
                                          self.wi, self.bi,
                                          self.wc, self.bc,
                                          self.wo, self.bo))
        return z
Пример #3
0
 def forward(self):
     prds = self.prds
     labs = self.labs
     axis = self.axis
     batch = prds.shape[0]
     prds_max = n.max(prds.arr, axis=axis, keepdims=True)
     prds_max = n.repeat(prds_max, prds.shape[axis], axis=axis)
     prds_max = Tensor(prds_max)
     prds = (prds - prds_max)
     eps = Utils.exp(prds)
     sum_p = Utils.sum(eps, axis=1)
     sum_p = Utils.repeat(sum_p, prds.shape[axis], axis=1)
     log_softmax = prds - Utils.log(sum_p)
     nll = Tensor(n.array([0.0])) - labs * log_softmax
     c = Tensor(n.array([1 / batch]))
     ret = c * nll
     return ret
Пример #4
0
    def forward(self):
        x = self.x_copy
        h = self.h_copy
        w = self.w_copy
        wz = self.wz_copy
        wr = self.wr_copy

        one = Tensor(n.array([1.]))

        hx = Utils.concat([h, x], 2)
        zt = Activate.sigmoid(hx @ wz)
        rt = Activate.sigmoid(hx @ wr)
        rth = Utils.concat([rt * h, x], 2)
        h_ = Activate.tanh(rth @ w)
        ht = (one - zt) * h + zt * h_
        self.ht = ht
        z = Tensor(ht.arr, self, (self.x, self.h, self.w, self.wz, self.wr))
        return z
Пример #5
0
 def __call__(self):
     prds = self.prds
     labs = self.labs
     axis = self.axis
     batch = prds.shape[0]
     eps = n.exp(prds.arr - n.max(prds.arr, axis=axis, keepdims=True))
     p = eps / n.sum(eps, axis=axis, keepdims=True)
     nll = - n.log(n.sum(p * labs.arr, axis=axis))
     nll = nll / batch
     z = Tensor(nll, self, (prds,))
     return z
Пример #6
0
def gaussian(shape, mean, std, is_grad=False):
    ret = Tensor(n.random.normal(mean, std, shape), is_grad=is_grad)
    return ret
Пример #7
0
 def forward(self):
     z = Tensor(self.tanh(self.x.arr), self, (self.x, ))
     return z
Пример #8
0
 def forward(self):
     z = Tensor(1 / (1 + n.exp(-self.x.arr)), self, (self.x, ))
     return z
Пример #9
0
 def forward(self):
     z = Tensor(n.where(self.x.arr > 0, self.x.arr, 0.), self, (self.x, ))
     return z
Пример #10
0
 def forward(self):
     z = Tensor(n.concatenate([i.arr for i in self.xls], self.axis), self,
                self.xls)
     return z
Пример #11
0
 def forward(self):
     z = Tensor(n.repeat(self.x.arr, self.reps, self.axis), self,
                (self.x, ))
     return z
Пример #12
0
 def forward(self):
     z = Tensor(n.log(self.x.arr), self, (self.x, ))
     return z
Пример #13
0
 def forward(self):
     z = Tensor(n.tile(self.x.arr, self.reps), self, (self.x, ))
     return z
Пример #14
0
 def forward(self):
     z = Tensor(n.sum(self.x.arr, self.axis, keepdims=True), self,
                (self.x, ))
     return z
Пример #15
0
def zeros(shape, is_grad=False):
    ret = Tensor(n.zeros(shape), is_grad=is_grad)
    return ret
Пример #16
0
def ones(shape, is_grad=False):
    ret = Tensor(n.ones(shape), is_grad=is_grad)
    return ret
Пример #17
0
 def forward(self):
     prds = self.prds
     labs = self.labs
     c = Tensor(n.array([1 / prds.shape[0], ]))
     loss = c * (labs - prds) * (labs - prds)
     return loss