Пример #1
0
    def __call__(self, var, block=None):
        """Initialize the input tensor with Normal distribution.

        Args:
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.

        Returns:
            The initialization op
        """
        block = self._check_block(block)

        assert isinstance(block, framework.Block)

        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
                                 "guassian_random")

        if self._seed == 0:
            self._seed = block.program.random_seed

        if framework.in_dygraph_mode():
            out_var = _C_ops.gaussian_random('shape', var.shape, 'dtype',
                                             var.dtype, 'mean', self._mean,
                                             'std', self._std_dev, 'seed',
                                             self._seed, 'use_mkldnn', False)
            out_var._share_underline_tensor_to(var)
            return None
        else:
            op = block.append_op(type="gaussian_random",
                                 outputs={"Out": var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": var.dtype,
                                     "mean": self._mean,
                                     "std": self._std_dev,
                                     "seed": self._seed,
                                     "use_mkldnn": False
                                 },
                                 stop_gradient=True)

            var.op = op
            return op
Пример #2
0
def gaussian(shape, mean=0.0, std=1.0, dtype=None, name=None):
    """
    This OP returns a Tensor filled with random values sampled from a Gaussian
    distribution, with ``shape`` and ``dtype``.

    Args:
        shape (list|tuple|Tensor): The shape of the output Tensor. If ``shape``
            is a list or tuple, the elements of it should be integers or Tensors
            (with the shape [1], and the data type int32 or int64). If ``shape``
            is a Tensor, it should be a 1-D Tensor(with the data type int32 or
            int64).
        mean (float|int, optional): Mean of the output tensor, default is 0.0.
        std (float|int, optional): Standard deviation of the output tensor, default
            is 1.0.
        seed (int, optional): Random seed of generator.
        dtype (str|np.dtype, optional): The data type of the output Tensor.
            Supported data types: float32, float64.
            Default is None, use global default dtype (see ``get_default_dtype``
            for details).
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: A Tensor filled with random values sampled from a Gaussian
        distribution, with ``shape`` and ``dtype``. 
    """
    op_type_for_check = 'gaussian/standard_normal/randn/normal'
    seed = 0

    if dtype is None:
        dtype = paddle.framework.get_default_dtype()
        if dtype not in ['float32', 'float64']:
            raise TypeError(
                "{} only supports [float32, float64], but the default dtype is {}"
                .format(op_type_for_check, dtype))
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)

    if in_dygraph_mode():
        shape = utils.convert_shape_to_list(shape)
        return _C_ops.gaussian_random('shape',
                                      shape, 'mean', float(mean), 'std',
                                      float(std), 'seed', seed, 'dtype', dtype)

    check_shape(shape, op_type_for_check)
    check_dtype(dtype, 'dtype', ['float32', 'float64'], op_type_for_check)

    inputs = {}
    attrs = {
        'mean': mean,
        'std': std,
        'seed': seed,
        'dtype': dtype,
        'use_mkldnn': False
    }
    utils.get_shape_tensor_inputs(inputs=inputs,
                                  attrs=attrs,
                                  shape=shape,
                                  op_type=op_type_for_check)

    helper = LayerHelper('gaussian', **locals())
    out = helper.create_variable_for_type_inference(dtype)
    helper.append_op(type='gaussian_random',
                     inputs=inputs,
                     outputs={'Out': out},
                     attrs=attrs)
    out.stop_gradient = True
    return out
Пример #3
0
    def __call__(self, var, block=None):
        """Initialize the input tensor with MSRA initialization.

        Args:
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.

        Returns:
            The initialization op
        """
        block = self._check_block(block)

        assert isinstance(var, framework.Variable)
        assert isinstance(block, framework.Block)
        f_in, f_out = self._compute_fans(var)

        # If fan_in is passed, use it
        fan_in = f_in if self._fan_in is None else self._fan_in

        if self._seed == 0:
            self._seed = block.program.random_seed

        # to be compatible of fp16 initalizers
        if var.dtype == VarDesc.VarType.FP16 or (
                var.dtype == VarDesc.VarType.BF16 and not self._uniform):
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['masra_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

        if framework._non_static_mode():
            if self._uniform:
                limit = math.sqrt(6.0 / float(fan_in))
                out_var = _C_ops.uniform_random('shape', out_var.shape, 'min',
                                                -limit, 'max', limit, 'seed',
                                                self._seed, 'dtype',
                                                int(out_dtype))
            else:
                std = math.sqrt(2.0 / float(fan_in))
                if in_dygraph_mode():
                    place = _current_expected_place()
                    out_var = _C_ops.final_state_gaussian_random(
                        out_var.shape, 0.0, std, self._seed, out_dtype, place)
                else:
                    out_var = _C_ops.gaussian_random('shape',
                                                     out_var.shape, 'dtype',
                                                     int(out_dtype), 'mean',
                                                     0.0, 'std', std, 'seed',
                                                     self._seed)

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None
        else:
            if self._uniform:
                limit = math.sqrt(6.0 / float(fan_in))
                op = block.append_op(type="uniform_random",
                                     inputs={},
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": int(out_dtype),
                                         "min": -limit,
                                         "max": limit,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)

            else:
                std = math.sqrt(2.0 / float(fan_in))
                op = block.append_op(type="gaussian_random",
                                     outputs={"Out": out_var},
                                     attrs={
                                         "shape": out_var.shape,
                                         "dtype": int(out_dtype),
                                         "mean": 0.0,
                                         "std": std,
                                         "seed": self._seed
                                     },
                                     stop_gradient=True)

            if var.dtype == VarDesc.VarType.FP16 or (
                    var.dtype == VarDesc.VarType.BF16 and not self._uniform):
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })

            var.op = op
            return op
Пример #4
0
    def __call__(self, var, block=None):
        """Initialize the input tensor with Normal distribution.

        Args:
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.

        Returns:
            The initialization op
        """
        block = self._check_block(block)

        assert isinstance(block, framework.Block)

        check_variable_and_dtype(var, "Out",
                                 ["uint16", "float16", "float32", "float64"],
                                 "guassian_random")

        # to be compatible of fp16 initalizers
        if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
            out_dtype = VarDesc.VarType.FP32
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['normal_init', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=out_dtype,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
        else:
            out_dtype = var.dtype
            out_var = var

        if self._seed == 0:
            self._seed = block.program.random_seed

        if in_dygraph_mode():
            place = _current_expected_place()
            out_var = _C_ops.final_state_gaussian_random(
                var.shape, self._mean, self._std_dev, self._seed, out_dtype,
                place)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.final_state_cast(out_var, var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None

        if _in_legacy_dygraph():
            out_var = _C_ops.gaussian_random('shape', var.shape, 'dtype',
                                             out_dtype, 'mean', self._mean,
                                             'std', self._std_dev, 'seed',
                                             self._seed, 'use_mkldnn', False)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                var_tmp = _C_ops.cast(out_var, 'in_dtype', out_var.dtype,
                                      'out_dtype', var.dtype)
                var_tmp._share_underline_tensor_to(var)
            else:
                out_var._share_underline_tensor_to(var)
            return None
        else:
            op = block.append_op(type="gaussian_random",
                                 outputs={"Out": out_var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": out_dtype,
                                     "mean": self._mean,
                                     "std": self._std_dev,
                                     "seed": self._seed,
                                     "use_mkldnn": False
                                 },
                                 stop_gradient=True)

            if var.dtype in [VarDesc.VarType.FP16, VarDesc.VarType.BF16]:
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                })
            var.op = op
            return op