def forward(self, tensor1, tensor2): scalars_to_device(self.cond, tensor1, tensor2) nc = numpy_or_cupy(tensor1, tensor2) data = nc.where(self.cond.data, tensor1.data, tensor2.data) requires_grad = tensor1.requires_grad or tensor2.requires_grad device = tensor1.device return nets.Tensor(data, requires_grad=requires_grad, device=device)
def concatenate(iterable): r"""Concatenate multiples ``Tensor`` from an iterable. .. note:: The ``Tensor`` in ``iterable`` should and must have the same shape. Args: iterable (tuple, list): list containing ``Tensor`` to concatenate. Returns: Tensor: the concatenation of all ``Tensor``. """ assert isinstance(iterable, ITERABLE), (f'iterable type {type(iterable)} unsupported for `concatenate` function.' f'Types currently supported are list, tuple.') requires_grad = False hooks = [] nc = numpy_or_cupy(*iterable) data = nc.array([]) for idx, t in enumerate(iterable): t = nets.to_tensor(t) requires_grad = t.requires_grad or requires_grad if data.size == 0: data = t.data else: data = nc.concatenate((data, t.data)) if t.requires_grad: def grad_fn(grad): return grad[idx:idx+t.shape[0]] hooks.append(nets.Hook(t, grad_fn)) tensor = nets.Tensor(data, requires_grad, device=iterable[0].device) for hook in hooks: tensor.register_hook(hook) return tensor
def backward(self, grad): tensor, = self.tensors bigger_grad = nets.zeros_like(tensor) nc = numpy_or_cupy(grad) if self.axis is None: # If there is no axis, the argmax is the location of he maximum single element max_indices = nets.unravel_index( nets.argmax(tensor), tensor.shape) bigger_grad[max_indices] = grad else: # If there is an axis, we reconstruct the bigger matrix by 'rolling' on this axis max_indices = nets.argmax(tensor, axis=self.axis) for i, roll in enumerate(nets.rollaxis(bigger_grad, self.axis)): roll += (max_indices == i).astype(int) * grad return bigger_grad
def argmax(t, axis=None): r"""Get the indices of maximum elements from a tensor. Args: t (Tensor): tensor get maximum indices from axis (int, optional): index of the axis. Default is ``None``. Returns: Tensor """ t = nets.to_tensor(t) nc = numpy_or_cupy(t) if axis is None: data = nc.unravel_index(nc.argmax(t.data), t.shape) else: data = nc.argmax(t.data, axis=axis) return nets.Tensor(data, device=t.device)
def rollaxis(t, axis, start=0): """Roll the specified axis backwards, until it lies in a given position. Args: t (Tensor): Input tensor. axis (int): The axis to be rolled. The positions of the other axes do not change relative to one another. start (int, optional): When ``start <= axis``, the axis is rolled back until it lies in this position. When ``start > axis``, the axis is rolled until it lies before this position. The default, 0, results in a "complete" roll. Returns: Tensor """ nc = numpy_or_cupy(t) data = nc.rollaxis(t.data, axis, start=start) return nets.Tensor(data, requires_grad=t.requires_grad, device=t.device)
def tensor2string(tensor, prefix="", precision=4, separator=', ', floatmode=None, edgeitems=3, threshold=100, max_line_width=100, suppress_small=True): # Representation nc = numpy_or_cupy(tensor) array_str = nc.array_str(tensor.data, precision=precision, max_line_width=max_line_width, suppress_small=suppress_small) # Prefix array_str = f"\n{prefix}".join(array_str.split("\n")) return array_str
def forward(self, tensor1, tensor2): nc = numpy_or_cupy(tensor1, tensor2) data = nc.multiply(tensor1.data, tensor2.data) requires_grad = tensor1.requires_grad or tensor2.requires_grad device = tensor1.device return nets.Tensor(data, requires_grad=requires_grad, device=device)
def forward(self, tensor): nc = numpy_or_cupy(tensor) data = nc.tanh(tensor.data) return nets.Tensor(data, requires_grad=tensor.requires_grad, device=tensor.device)
def forward(self, tensor): nc = numpy_or_cupy(tensor) data = nc.max(tensor.data, axis=self.axis) return nets.Tensor(data, requires_grad=tensor.requires_grad, device=tensor.device)
def forward(self, tensor): nc = numpy_or_cupy(tensor) data = nc.pad(tensor.data, pad_width=self.padding, constant_values=self.constant_values) return nets.Tensor(data, requires_grad=tensor.requires_grad, device=tensor.device)