def _forward(self, mean, cov): mean, cov = self._check_input(mean, cov) self.mean = mean self.cov = cov self.eps = np.random.normal(size=mean.shape) output = mean.value + np.linalg.cholesky(cov.value) @ self.eps return Tensor(output, function=self)
def _forward(self, mean, std): mean, std = self._check_input(mean, std) self.mean = mean self.std = std self.eps = np.random.normal(size=mean.shape) output = mean.value + std.value * self.eps return Tensor(output, function=self)
def _forward(self, x, t): x, t = self._check_input(x, t) self.x = x self.t = t self.y = self._softmax(x.value) np.clip(self.y, 1e-10, 1, out=self.y) loss = np.sum(-t.value * np.log(self.y)) return Tensor(loss, function=self)
def _forward(self, x, istraining=False): x = self._convert2tensor(x) if istraining: self.x = x self.mask = (np.random.rand(*x.shape) > self.prob) * self.coef return Tensor(x.value * self.mask, function=self) else: return x
def _forward(self, x): x = self._convert2tensor(x) self._atleast_ndim(x, 1) self.x = x output = np.split(x.value, self.indices_or_sections, self.axis) self.n_output = len(output) self.delta = [None for _ in output] return tuple([Tensor(out, function=self) for out in output])
def _forward(self, *args, **kwargs): self.args = args self.kwargs = kwargs loss = 0 for arg in args: loss += np.square(arg.value).sum() for arg in kwargs.values(): loss += np.square(arg.value).sum() return Tensor(0.5 * loss, function=self)
def _forward(self, x): x = self._convert2tensor(x) self._equal_ndim(x, 4) self.x = x img = np.pad(x.value, [(p, ) for p in self.pad], "constant") patch = img2patch(img, self.pool_size, self.stride) n_batch, xlen_out, ylen_out, _, _, in_channels = patch.shape patch = patch.reshape(n_batch, xlen_out, ylen_out, -1, in_channels) self.shape = img.shape self.index = patch.argmax(axis=3) return Tensor(patch.max(axis=3), function=self)
def _forward(self, x, t): x, t = self._check_input(x, t) self.x = x self.t = t # y = self.forward(x) # np.clip(y, 1e-10, 1 - 1e-10, out=y) # return np.sum(-t * np.log(y) - (1 - t) * np.log(1 - y)) loss = np.sum( np.maximum(x.value, 0) - t.value * x.value + np.log1p(np.exp(-np.abs(x.value)))) return Tensor(loss, function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.tanh(x.value * 0.5) * 0.5 + 0.5 return Tensor(self.output, function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x return Tensor(np.square(x.value), function=self)
def _forward(self, x, shape): x = self._convert2tensor(x) self._atleast_ndim(x, 1) self.x = x return Tensor(x.value.reshape(*shape), function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x return Tensor(-x.value, function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x self.eps = np.random.exponential(1 / x.value, size=x.shape) output = x.value * self.eps return Tensor(output, function=self)
def _convert2tensor(self, x): if isinstance(x, (int, float, np.number, np.ndarray)): x = Tensor(x) elif not isinstance(x, Tensor): raise TypeError("Unsupported class for input: {}".format(type(x))) return x
def _forward(self, x, y): x, y = self._check_input(x, y) self.x = x self.y = y return Tensor(x.value / y.value, function=self)
def _forward(self, x, shape): x = self._convert2tensor(x) self.x = x output = np.broadcast_to(x.value, shape) return Tensor(output, function=self)
def _forward(self, x, y): x, y = self._check_input(x, y) self.x = x self.y = y return Tensor(0.5 * np.square(x.value - y.value).mean(), function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x output = np.maximum(x.value, 0) + np.log1p(np.exp(-np.abs(x.value))) return Tensor(output, function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x return Tensor(x.value.clip(min=0), function=self)
def _forward(self, x, y): x, y = self._check_input(x, y) self.x = x self.y = y self.output = np.power(x.value, y.value) return Tensor(self.output, function=self)
def _forward(self, x): self.x = x return Tensor(x.value, function=self)
def _forward(self, x): x = self._convert2tensor(x) self._atleast_ndim(x, 1) self.x = x output = x.value.sum(axis=self.axis, keepdims=self.keepdims) return Tensor(output, function=self)
def _forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.exp(x.value) return Tensor(self.output, function=self)