Пример #1
0
def dirac_(tensor, groups=1):
    """Fill tensor with the dirac delta function.

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    groups : number, optional, default=1
        The groups of convolution.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    dimensions = tensor.ndimension()
    if dimensions not in [3, 4, 5]:
        raise ValueError('Only tensors with 3, 4, or 5 dimensions are supported.')
    sizes = tensor.size()
    if sizes[0] % groups != 0:
        raise ValueError('Dimension 0 should be divisible by groups.')
    out_channels_per_grp = sizes[0] // groups
    min_dim = min(out_channels_per_grp, sizes[1])
    with grad_mode.no_grad():
        tensor.zero_()
        for g in range(groups):
            for d in range(min_dim):
                item = [g * out_channels_per_grp + d, d]
                for i in range(2, dimensions):
                    item.append(sizes[i] // 2)
                tensor[tuple(item)] = 1
        return tensor
Пример #2
0
def xavier_uniform_(tensor, gain=1):
    r"""Fill tensor from a xavier uniform distribution.

    .. math::
        \text{tensor} \sim \mathcal{U}(-\alpha, \alpha) \\ \, \\ \,
            \text{where} \quad \alpha = \text{gain} \times
                \sqrt{\frac{6}{\text{fan\_in} + \text{fan\_out}}}

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    gain : number, optional, default=1
        The gain value.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / (fan_in + fan_out))
    a = math.sqrt(3.0) * std
    with grad_mode.no_grad():
        return tensor.uniform_(-a, a)
Пример #3
0
def kaiming_uniform_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
    r"""Fill tensor from a kaiming uniform distribution.

    .. math::
        \text{tensor} \sim \mathcal{U}(-\alpha, \alpha) \\ \, \\ \,
            \text{where} \quad \alpha = \text{gain} \times
                \sqrt{\frac{3}{\text{fan}}}

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    a : number, optional, default=0
        The negative slope to compute gain value.
    mode : {'fan_in', 'fan_out'}, optional
        The mode to compute fans.
    nonlinearity : str, optional, default='leaky_relu'
        The nonlinearity to compute gain value.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    fan = _calculate_correct_fan(tensor, mode)
    gain = calculate_gain(nonlinearity, a)
    std = gain / math.sqrt(fan)
    bound = math.sqrt(3.0) * std
    with grad_mode.no_grad():
        return tensor.uniform_(-bound, bound)
Пример #4
0
def xavier_normal_(tensor, gain=1):
    r"""Fill tensor from a xavier normal distribution.

    .. math::
        \text{tensor} \sim \mathcal{N}(0, \sigma^{2}) \\ \, \\ \,
            \text{where} \quad \sigma = \text{gain} \times
                \sqrt{\frac{2}{\text{fan\_in} + \text{fan\_out}}}

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    gain : number, optional, default=1
        The gain value.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    fan_in, fan_out = _calculate_fan_in_and_fan_out(tensor)
    std = gain * math.sqrt(2.0 / (fan_in + fan_out))
    with grad_mode.no_grad():
        return tensor.normal_(0, std)
Пример #5
0
def kaiming_normal_(tensor, a=0, mode='fan_in', nonlinearity='leaky_relu'):
    r"""Fill tensor from a kaiming normal distribution.

    .. math::
        \text{tensor} \sim \mathcal{N}(0, \sigma^{2}) \\ \, \\ \,
            \text{where} \quad \sigma = \text{gain} \times
                \sqrt{\frac{1}{\text{fan}}}

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    a : number, optional, default=0
        The negative slope to compute gain value.
    mode : {'fan_in', 'fan_out'}, optional
        The mode to compute fans.
    nonlinearity : str, optional, default='leaky_relu'
        The nonlinearity to compute gain value.

    Returns
    -------
    dragon.vm.torch.Tensor
        The output tensor.

    """
    fan = _calculate_correct_fan(tensor, mode)
    gain = calculate_gain(nonlinearity, a)
    std = gain / math.sqrt(fan)
    with grad_mode.no_grad():
        return tensor.normal_(0, std)
Пример #6
0
 def _apply(self, fn):
     for module in self.children():
         module._apply(fn)
     for param in self._parameters.values():
         if param is not None:
             with grad_mode.no_grad():
                 fn(param)
     for key, buf in self._buffers.items():
         if buf is not None:
             self._buffers[key] = fn(buf)
     return self
Пример #7
0
    def apply(function, *args, **kwargs):
        """Apply function and create a checkpoint."""
        kwargs.pop('preserve_rng_state', True)
        variable_scope = kwargs.pop('variable_scope', 'Buffer')
        original_variable_scope = context.get_variable_scope(True)
        if kwargs:
            raise ValueError('Unexpected keyword arguments: ' +
                             ','.join(arg for arg in kwargs))

        # Run function.
        graph_tape = tapes.Tape()
        graph_tape._tracing = True  # Enable tracing.
        graph_tape._checkpointing = True  # Enable checkpointing.
        graph_tape._original_variable_scope = original_variable_scope
        with grad_mode.no_grad(), graph_tape:
            with context.variable_scope(variable_scope):
                outputs = function(*args)

        # Collect involving tensors.
        tensor_inputs, tensor_outputs = [], []
        for arg in args:
            if isinstance(arg, Tensor):
                tensor_inputs.append(arg)
        for arg in nest.flatten(outputs):
            if isinstance(arg, Tensor):
                tensor_outputs.append(arg)

        # Fill tape with function context.
        op_tape = tapes.OrderedTape()
        op_handle = workspace.get_workspace().create_handle('Checkpoint')
        op_tape.add_element(proto_util.make_operator_def(
            op_type='Checkpoint',
            name=op_handle,
            inputs=[input.id for input in tensor_inputs],
            outputs=[output.id for output in tensor_outputs],
            defs=[v.SerializeAs() for v in graph_tape.get_elements()],
            buffer_scope=variable_scope,
            to_impl=True))
        op_tape.add_handle(op_handle)
        op_tape.merge_handles(graph_tape.get_handles())

        # Save input tensors for backward.
        for input in tensor_inputs + graph_tape.get_sources():
            op_tape.add_source(input)

        # Save tape for backward.
        for output in tensor_outputs:
            output._tape = op_tape
            output._requires_grad = True

        return outputs
Пример #8
0
def zeros_(tensor):
    r"""Fill tensor with zeros.

    .. math:: \text{tensor} \leftarrow 0

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.

    Returns
    -------
    dragon.vm.torch.Tensor
        The output tensor.

    """
    with grad_mode.no_grad():
        return tensor.fill_(0)
Пример #9
0
def constant_(tensor, val):
    r"""Fill tensor with the scalar value.

    .. math:: \text{tensor} \leftarrow \text{value}

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    val : number
        The value to fill.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    with grad_mode.no_grad():
        return tensor.fill_(val)
Пример #10
0
def eye_(tensor):
    r"""Fill tensor as the identity matrix.

    .. math:: \text{tensor} \leftarrow \text{diag}(1, 1, ..., 1)

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    if tensor.ndimension() != 2:
        raise ValueError('Only tensors with 2 dimensions are supported.')
    with grad_mode.no_grad():
        init_funcs.eye(*tensor.shape, out=tensor, requires_grad=tensor.requires_grad)
    return tensor
Пример #11
0
def uniform_(tensor, a=0, b=1):
    r"""Fill tensor from an uniform distribution.

    .. math:: \text{tensor} \sim \mathcal{U}(\alpha, \beta)

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    a : number, optional, default=-1
        The value to :math:`\alpha`.
    b : number, optional, default=1
        The value to :math:`\beta`.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    with grad_mode.no_grad():
        return tensor.uniform_(a, b)
Пример #12
0
def normal_(tensor, mean=0, std=1):
    r"""Fill tensor from a normal distribution.

    .. math:: \text{tensor} \sim \mathcal{N}(\mu, \sigma^{2})

    Parameters
    ----------
    tensor : dragon.vm.torch.Tensor
        The input tensor.
    mean : number, optional, default=0
        The value to :math:`\mu`.
    std : number, optional, default=1
        The value to :math:`\sigma`.

    Returns
    -------
    dragon.vm.torch.Tensor
        The input tensor.

    """
    with grad_mode.no_grad():
        return tensor.normal_(mean, std)