Пример #1
0
 def forward(self):
     eps = 0.5 - np.random.uniform(size=self.loc.shape)
     self.eps = np.sign(eps) * np.log(1 - 2 * np.abs(eps))
     self.output = self.loc.value - self.scale.value * self.eps
     if isinstance(self.loc, Constant) and isinstance(self.scale, Constant):
         return Constant(self.output)
     return Tensor(self.output, function=self)
Пример #2
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     output = np.log(self.x.value)
     if isinstance(self.x, Constant):
         return Constant(output)
     return Tensor(output, function=self)
Пример #3
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     output = x.value.clip(min=0)
     if isinstance(x, Constant):
         return Constant(output, function=self)
     return Tensor(output, function=self)
Пример #4
0
 def forward(self, x, y):
     x, y = self._check_input(x, y)
     self.x = x
     self.y = y
     if isinstance(self.x, Constant) and isinstance(self.y, Constant):
         return Constant(x.value / y.value)
     return Tensor(x.value / y.value, function=self)
Пример #5
0
 def forward(self, x, shape):
     x = self._convert2tensor(x)
     self.x = x
     output = np.broadcast_to(x.value, shape)
     if isinstance(self.x, Constant):
         return Constant(output)
     return Tensor(output, function=self)
Пример #6
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     output = x.value.sum(axis=self.axis, keepdims=self.keepdims)
     if isinstance(self.x, Constant):
         return Constant(output)
     return Tensor(output, function=self)
Пример #7
0
 def forward(self, x, t):
     x, t = self.check_input(x, t)
     self.x = x
     self.t = t
     loss = (np.maximum(x.value, 0) - t.value * x.value +
             np.log1p(np.exp(-np.abs(x.value))))
     return Tensor(loss, function=self)
Пример #8
0
 def forward(self, x, shape):
     x = self._convert2tensor(x)
     self._atleast_ndim(x, 1)
     self.x = x
     if isinstance(self.x, Constant):
         return Constant(x.value.reshape(*shape))
     return Tensor(x.value.reshape(*shape), function=self)
Пример #9
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self.output = np.linalg.cholesky(x.value)
     if isinstance(self.x, Constant):
         return Constant(self.output)
     return Tensor(self.output, function=self)
Пример #10
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self.output = sp.gamma(x.value)
     if isinstance(x, Constant):
         return Constant(self.output)
     return Tensor(self.output, function=self)
Пример #11
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self.ouput = np.tanh(x.value * 0.5) * 0.5 + 0.5
     if isinstance(self.x, Constant):
         return Constant(self.ouput)
     return Tensor(self.ouput, function=self)
Пример #12
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self.output = np.abs(x.value)
     if isinstance(x, Constant):
         return Constant(self.output)
     self.sign = np.sign(x.value)
     return Tensor(self.output, function=self)
Пример #13
0
 def forward(self, x, t):
     x, t = self._check_input(x, t)
     self.x = x
     self.t = t
     self.y = self._softmax(x.value)
     np.clip(self.y, 1e-10, 1, out=self.y)
     loss = -t.value * np.log(self.y)
     return Tensor(loss, function=self)
Пример #14
0
 def forward(self, x):
     x = self._convert2tensor(x)
     if self.axes is not None:
         self._equal_ndim(x, len(self.axes))
     self.x = x
     if isinstance(self.x, Constant):
         return Constant(np.transpose(x.value, self.axes))
     return Tensor(np.transpose(x.value, self.axes), function=self)
Пример #15
0
 def forward(self, x, y):
     x, y = self._check_input(x, y)
     self.x = x
     self.y = y
     self.output = np.power(x.value, y.value)
     if isinstance(self.x, Constant) and isinstance(self.y, Constant):
         return Constant(self.output)
     return Tensor(self.output, function=self)
Пример #16
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     output = np.maximum(x.value, 0) + np.log1p(
         np.exp(-np.abs(x.value)))  #Calculates log(1 + x)
     if isinstance(x, Constant):
         return Constant(output)
     return Tensor(output, function=self)
Пример #17
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self._equal_ndim(x, 2)
     self.ouput = np.linalg.inv(x.value)
     if isinstance(self.x, Constant):
         return Constant(self.ouput)
     return Tensor(self.ouput, function=self)
Пример #18
0
 def forward(self,x,y):
     x,y=self._check_input(x,y)
     self.x=x
     self.y=y
     img=np.pad(x.value,[(p,) for p in self.pad],"constant")
     self.shape=img.shape
     self.patch=img2patch(img,y.shape[:2],self.stride)
     return Tensor(np.tensordot(self.patch,y.value,axes=((3, 4, 5), (0, 1, 2))),function=self)
Пример #19
0
    def forward(self, x, mu, tau):
        x, mu, tau = self._check_input(x, mu, tau)
        self.x = x
        self.mu = mu
        self.tau = tau

        output = (-0.5 * np.square(x.value - mu.value) * tau.value +
                  0.5 * np.log(tau.value) - 0.5 * np.log(2 * np.pi))
        return Tensor(output, function=self)
Пример #20
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self._atleast_ndim(x, 1)
     self.x = x
     output = np.split(x.value, self.indices_or_sections, self.axis)
     if isinstance(self.x, Constant):
         return tuple([Constant(out) for out in output])
     self.n_output = len(output)
     self.delta = [None for _ in output]
     return tuple([Tensor(out, function=self) for out in output])
Пример #21
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self._equal_ndim(x, 2)
     sign, self.output = np.linalg.slogdet(x.value)
     if sign != 1:
         raise ValueError("matrix has to be positive-definite")
     if isinstance(self.x, Constant):
         return Constant(self.output)
     return Tensor(self.output, function=self)
Пример #22
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self._equal_ndim(x, 4)
     self.x = x
     img = np.pad(x.value, [(p, ) for p in self.pad], "constant")
     patch = img2patch(img, self.pool_size, self.stride)
     n_batch, xlen_out, ylen_out, _, _, in_channels = patch.shape
     patch = patch.reshape(n_batch, xlen_out, ylen_out, -1, in_channels)
     self.shape = img.shape
     self.index = patch.argmax(axis=3)
     return Tensor(patch.max(axis=3), function=self)
Пример #23
0
 def forward(self, a, b):
     a = self._convert2tensor(a)
     b = self._convert2tensor(b)
     self._equal_ndim(a, 2)
     self._equal_ndim(b, 2)
     self.a = a
     self.b = b
     self.ouput = np.linalg.solve(a.value, b.value)
     if isinstance(self.a, Constant) and isinstance(self.b, Constant):
         return Constant(self.ouput)
     return Tensor(self.ouput, function=self)
Пример #24
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     self.output = np.prod(self.x.value, axis=self.axis, keepdims=True)
     if not self.keepdims:
         output = np.squeeze(self.output)  #np.squeeze:Remove single-dimensional entries from the shape of an array.
         if output.size == 1:
             output = output.item()
     else:
         output = self.output
     if isinstance(self.x, Constant):
         return Constant(output)
     return Tensor(output, function=self)
Пример #25
0
 def forward(self):
     if self.coef.ndim != 1:
         raise NotImplementedError
     indices = np.array(
         [np.random.choice(self.n_component, p=c) for c in self.coef.value]
     )
     output = np.random.normal(
         loc=self.mu.value[indices],
         scale=self.std.value[indices]
     )
     if (
                     isinstance(self.coef, Constant)
                 and isinstance(self.mu, Constant)
             and isinstance(self.std, Constant)
     ):
         return Constant(output)
     return Tensor(output, function=self)
Пример #26
0
 def forward(self):
     self.eps = np.random.normal(size=self.mu.shape)
     output = self.mu.value + self.std.value * self.eps
     if isinstance(self.mu, Constant) and isinstance(self.var, Constant):
         return Constant(output)
     return Tensor(output, self)
Пример #27
0
 def forward(self, x):
     x = self._convert2tensor(x)
     self.x = x
     if isinstance(self.x, Constant):
         return Constant(-x.value)
     return Tensor(-x.value, function=self)
Пример #28
0
 def forward(self):
     eps = np.random.standard_exponential(size=self.rate.shape)
     self.output = eps / self.rate.value
     if isinstance(self.rate, Constant):
         return Constant(self.output)
     return Tensor(self.output, self)
 def forward(self):
     self.eps = np.random.normal(size=self.mu.size)
     output = self.mu.value + self.L.value @ self.eps
     if isinstance(self.mu, Constant) and isinstance(self.cov, Constant):
         return Constant(output)
     return Tensor(output, self)
Пример #30
0
 def forward(self):
     if self.alpha.ndim == 1:
         return Tensor(np.random.dirichlet(self.alpha.value), function=self)
     else:
         raise NotImplementedError