Ejemplo n.º 1
0
 def forward_cpu(self, x: tensor) -> tensor:
     x.reshape([x.shape[0], x.shape[1], 1, x.shape[2], x.shape[3]])
     y = super(MaxPool2d, self).forward_cpu(x)
     x.reset_shape()
     y.reshape([x.shape[0], y.shape[1], y.shape[3], y.shape[4]])
     y.init_shape = y.shape
     return y
Ejemplo n.º 2
0
    def _backward_gpu(self, x: tensor, w: tensor, y: tensor) -> tensor:
        dx, dw, dy = x.gradient, w.gradient, y.gradient
        y.reset_shape()
        _dy = self.transpose_y.backward()
        dy.reshape([self.out_channels, -1])
        dcol = self.col.gradient
        self.gemm_dx.forward(dcol.device_data, w.device_data, dy.device_data, self._empty_gpu_tensor_obj)
        self.gemm_dw.forward(dw.device_data, dy.device_data, self.col.device_data, self._empty_gpu_tensor_obj)

        self.gemm_dx.run()
        self.gemm_dw.run()

        _dx = self.vol_col.backward()
        return dx
Ejemplo n.º 3
0
    def _backward_cpu(self, x: tensor, w: tensor, y: tensor) -> tensor:
        dx, dw, dy = x.gradient, w.gradient, y.gradient
        y.reset_shape()
        _dy = self.transpose_y.backward()
        dy.reshape([self.out_channels, -1])
        dcol = self.col.gradient

        dw.host_data = np.matmul(dy.host_data, self.col.host_data.T)
        w.reset_shape()

        w_reshaped = w.host_data.reshape([self.out_channels, -1])
        dcol.host_data = np.matmul(w_reshaped.T, dy.host_data)

        _dx = self.vol_col.backward()
        return dx
Ejemplo n.º 4
0
 def forward_cpu(self, x: tensor) -> tensor:
     data = np.tanh(x.host_data)
     if self.inplace:
         self.cache = [x, x]
         x.host_data = data
         return x
     else:
         y = zeros_like(x)
         self.cache = [x, y]
         y.host_data = data
         return y
Ejemplo n.º 5
0
 def forward(self, logit: tensor, target: tensor) -> tensor:
     self.batchsize = logit.shape[0]
     self.y = self.register_output_shape([1])
     self.register_forward_arg('logit', logit)
     if not self.with_logit:
         C = logit.shape[1]
         target = target.onehot(label_count=C)
     self.register_forward_arg('target', target)
     self.register_backward_arg('x', logit)
     self.register_backward_arg('t', target)
     super(crossentropyloss, self).forward(logit, target)
     return self.y
Ejemplo n.º 6
0
 def forward_cpu(self, x: tensor) -> tensor:
     tmp = x.host_data > 0
     data = x.host_data * tmp
     if self.inplace:
         self.cache = [x, tmp, x]
         x.host_data = data
         return x
     else:
         y = zeros_like(x)
         self.cache = [x, tmp, y]
         y.host_data = data
         return y
Ejemplo n.º 7
0
    def forward_cpu(self, x: tensor) -> tensor:
        if self._col == [] or self._vol == []:
            self._col = [1 for _ in range(self.dims)]
            self._vol = [1 for _ in range(self.dims)]

            for i in range(self.dims - 1, 0, -1):
                self._col[i] = int(
                    (x.shape[i + 2] + 2 * self.padding[i] - self.dilation[i] *
                     (self.kernel_size[i] - 1) - 1) // self.stride[i]) + 1
                self._vol[i] = x.shape[i + 2]
                self.channel_offset *= self.kernel_size[i]

            self.batch_size = x.shape[0]
            self.in_channels = x.shape[1]
            self.kernel = vol2col(self.batch_size, self.in_channels, self._vol,
                                  self._col, self.kernel_size, self.stride,
                                  self.padding, self.dilation)
        y = zeros([x.shape[0], x.shape[1], *self._col])
        y.reshape([self.in_channels * self.batch_size, -1])
        self.col = self.kernel.forward_cpu(x)
        self.col.reshape(
            [self.in_channels * self.batch_size, self.channel_offset, -1])
        max_idx = []
        for i in range(self.in_channels * self.batch_size):
            tmp = self.col.host_data[i]
            m_idx = np.argmax(tmp, axis=0)
            max_idx.append(m_idx)
            y.host_data[i] = self.col.host_data[i][m_idx, range(m_idx.size)]

        y.reshape([
            self.batch_size, self.in_channels, self._col[0], self._col[1],
            self._col[2]
        ])
        x.reset_shape()

        self.cache = [x, y, max_idx]
        return y
Ejemplo n.º 8
0
 def _backward_gpu(self, dx: tensor) -> tensor:
     dx.reshape(self.old_shape)
     return dx
Ejemplo n.º 9
0
 def _forward_gpu(self, x: tensor) -> tensor:
     x.reshape([x.shape[0], -1])
     return x
Ejemplo n.º 10
0
 def _backward_cpu(self, dx: tensor, dy: tensor) -> tensor:
     dx.host_data = np.transpose(dy.host_data, self.axes)
     return dx
Ejemplo n.º 11
0
def softmax_util_cpu(x: tensor, y: tensor) -> tensor:
    eX = np.exp((x.host_data.T - np.max(x.host_data, axis=1)).T)
    y.host_data = (eX.T / eX.sum(axis=1)).T
    return y
Ejemplo n.º 12
0
 def _backward_cpu(self, dx: tensor, dy: tensor) -> tensor:
     dx.host_data = dy.host_data / (1 - self.prob)
     dx[self.mask.host_data] = 0
     return dx
Ejemplo n.º 13
0
 def forward_cpu(self, x: tensor) -> tensor:
     x.reshape([x.shape[0], x.shape[1], 1, 1, x.shape[2]])
     y = super(MaxPool1d, self).forward_cpu(x)
     x.reset_shape()
     y.reshape([x.shape[0], y.shape[1], y.shape[-1]])
     return y