def forward(self, x, shape): x = self._convert2tensor(x) self._atleast_ndim(x, 1) self.x = x if isinstance(self.x, Constant): return Constant(x.value.reshape(*shape)) return Tensor(x.value.reshape(*shape), function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x output = np.nansum(x.value, axis=self.axis, keepdims=self.keepdims) if isinstance(self.x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x output = x.value.clip(min=0) if isinstance(x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.tanh(x.value * 0.5) * 0.5 + 0.5 if isinstance(self.x, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self): eps = 0.5 - np.random.uniform(size=self.loc.shape) self.eps = np.sign(eps) * np.log(1 - 2 * np.abs(eps)) self.output = self.loc.value - self.scale.value * self.eps if isinstance(self.loc, Constant) and isinstance(self.scale, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) self._atleast_ndim(x, 2) self.x = x if isinstance(self.x, Constant): return Constant(x.value.flatten()) return Tensor(x.value.flatten(), function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x output = np.maximum(x.value, 0) + np.log1p(np.exp(-np.abs(x.value))) if isinstance(x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x, shape): x = self._convert2tensor(x) self.x = x output = np.broadcast_to(x.value, shape) if isinstance(self.x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x if isinstance(self.x, Constant): return Constant(np.swapaxes(x.value, self.axis1, self.axis2)) return Tensor(np.swapaxes(x.value, self.axis1, self.axis2), function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x output = np.log(self.x.value) if isinstance(self.x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x, y): x, y = self._check_input(x, y) self.x = x self.y = y if isinstance(self.x, Constant) and isinstance(self.y, Constant): return Constant(x.value @ y.value) return Tensor(x.value @ y.value, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = self._softmax(x.value) if isinstance(x, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, a, b): a, b = self._check_input(a, b) self.a, self.b = a, b self.output = np.linalg.solve(a.value, b.value) if isinstance(self.a, Constant) and isinstance(self.b, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.linalg.cholesky(x.value) if isinstance(self.x, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) if self.axes is not None: self._equal_ndim(x, len(self.axes)) self.x = x if isinstance(self.x, Constant): return Constant(np.transpose(x.value, self.axes)) return Tensor(np.transpose(x.value, self.axes), function=self)
def _convert2tensor(self, x): if isinstance(x, (int, float, np.number, np.ndarray)): x = Constant(x) elif not isinstance(x, Tensor): raise TypeError( "Unsupported class for input: {}".format(type(x)) ) return x
def forward(self, x, y): x, y = self._check_input(x, y) self.x = x self.y = y self.output = np.power(x.value, y.value) if isinstance(self.x, Constant) and isinstance(self.y, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.abs(x.value) if isinstance(x, Constant): return Constant(self.output) self.sign = np.sign(x.value) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self._equal_ndim(x, 2) self.output = np.linalg.inv(x.value) if isinstance(self.x, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, x): x = self._convert2tensor(x) self._atleast_ndim(x, 1) self.x = x output = np.split(x.value, self.indices_or_sections, self.axis) if isinstance(self.x, Constant): return tuple([Constant(out) for out in output]) self.n_output = len(output) self.delta = [None for _ in output] return tuple([Tensor(out, function=self) for out in output])
def forward(self, x): x = self._convert2tensor(x) self.x = x self._equal_ndim(x, 2) sign, self.output = np.linalg.slogdet(x.value) if sign != 1: raise ValueError("matrix has to be positive-definite") if isinstance(self.x, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self, a, b): a = self._convert2tensor(a) b = self._convert2tensor(b) self._equal_ndim(a, 2) self._equal_ndim(b, 2) self.a = a self.b = b self.output = np.linalg.solve(a.value, b.value) if isinstance(self.a, Constant) and isinstance(self.b, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self): if self.coef.ndim != 1: raise NotImplementedError indices = np.array( [np.random.choice(self.n_component, p=c) for c in self.coef.value]) output = np.random.normal(loc=self.mu.value[indices], scale=self.std.value[indices]) if (isinstance(self.coef, Constant) and isinstance(self.mu, Constant) and isinstance(self.std, Constant)): return Constant(output) return Tensor(output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x self.output = np.prod(self.x.value, axis=self.axis, keepdims=True) if not self.keepdims: output = np.squeeze(self.output) if output.size == 1: output = output.item() else: output = self.output if isinstance(self.x, Constant): return Constant(output) return Tensor(output, function=self)
def forward(self, x): x = self._convert2tensor(x) self.x = x if isinstance(self.x, Constant): return Constant(-x.value) return Tensor(-x.value, function=self)
def forward(self): self.eps = np.random.standard_cauchy(size=self.loc.shape) self.output = self.scale.value * self.eps + self.loc.value if isinstance(self.loc, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self): self.output = np.random.gamma(self.shape.value, 1 / self.rate.value) if isinstance(self.shape, Constant) and isinstance( self.rate, Constant): return Constant(self.output) return Tensor(self.output, function=self)
def forward(self): self.eps = np.random.normal(size=self.mu.size) output = self.mu.value + self.L.value @ self.eps if isinstance(self.mu, Constant) and isinstance(self.cov, Constant): return Constant(output) return Tensor(output, self)
def forward(self): self.eps = np.random.normal(size=self.mu.shape) output = self.mu.value + np.einsum("...ij,...j->...i", self.L.value, self.eps) if isinstance(self.mu, Constant) and isinstance(self.cov, Constant): return Constant(output) return Tensor(output, self)
def forward(self): eps = np.random.standard_exponential(size=self.rate.shape) self.output = eps / self.rate.value if isinstance(self.rate, Constant): return Constant(self.output) return Tensor(self.output, self)