Пример #1
0
    def __call__(cls, *inputs):
        tracer = framework._dygraph_tracer()
        block = framework.default_main_program().current_block()
        ivar_inputs = [x._ivar for x in inputs]

        if not hasattr(cls, 'forward_id'):
            cls.forward_id = core.PyLayer.num_funcs() + 1
            PyLayer.register_func(cls.forward_id, cls._do_forward)
            cls.backward_id = core.PyLayer.num_funcs() + 1
            PyLayer.register_func(cls.backward_id, cls._do_backward)

        iop = core.OpBase(cls.__class__.__name__ + str(cls.forward_id))
        iop.forward_id = cls.forward_id
        iop.backward_id = cls.backward_id
        block.ops.append(iop)
        ivars = tracer.py_trace(iop, ivar_inputs, False)
        ret = []
        for ivar in ivars:
            tensor = ivar.value().get_tensor()
            py_var = framework.Variable(block,
                                        type=core.VarDesc.VarType.LOD_TENSOR,
                                        name=None,
                                        shape=tensor.shape(),
                                        dtype=tensor._dtype(),
                                        ivar=ivar)
            ret.append(py_var)
        return ret
Пример #2
0
def to_variable(value, block=None, name=None, zero_copy=None):
    """
    The API will create a ``Variable`` object from numpy\.ndarray or Variable object.

    Parameters:
        value(ndarray): The numpy\.ndarray object that needs to be converted, it can be multi-dimension, and the data type is one of numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}.
        block(fluid.Block, optional): Which block this variable will be in. Default: None.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
        zero_copy(bool, optional): Whether to share memory with the input numpy array. This parameter only works with CPUPlace and will be set to True when it is None. Default: None.

    Returns:
        Variable: ``Tensor`` created from the specified numpy\.ndarray object, data type and shape is the same as ``value`` .

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
            y = fluid.dygraph.to_variable(x)
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)

    """
    if isinstance(value, np.ndarray):
        assert framework.in_dygraph_mode(
        ), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        if isinstance(framework._current_expected_place(),
                      framework.core.CPUPlace):
            if zero_copy is None:
                zero_copy = True
            tensor.set(value, framework._current_expected_place(), zero_copy)
        else:
            assert not zero_copy, "zero_copy mode can only be used with CPUPlace"
            tensor.set(value, framework._current_expected_place(), False)
        return py_var
    elif isinstance(value, framework.Variable):
        return value
    else:
        raise TypeError(
            "to_variable only accepts 'ndarray' and 'Variable' as value's input"
        )
Пример #3
0
    def apply_collective_grads(self):
        """
        AllReduce the Parameters' gradient.
        """
        if not self._is_data_parallel_mode():
            return

        grad_var_set = set()
        grad_vars = []
        for param in self._layers.parameters():
            # NOTE(zcd): The grad_ivar maybe no generated.
            if param.trainable and param._ivar._grad_ivar():
                g_var = framework.Variable(
                    block=self._helper.main_program.current_block(),
                    name=param._ivar._grad_name(),
                    stop_gradient=True,
                    ivar=param._ivar._grad_ivar())
                grad_vars.append(g_var)
                assert g_var not in grad_var_set
                grad_var_set.add(g_var)

        # FIXME(zcd): the type of the var should be LoDTensor, i.e
        # the gradients should be dense, otherwise, the following
        # logic should be updated.
        # 128 MB as a group
        mega_bytes = 128 * 1024 * 1024
        group_idx = 0
        memory_counter = 0
        grad_var_groups = OrderedDict()
        dtype = grad_vars[0].dtype
        for g_var in grad_vars:
            # Note: the dtype of the same group should be the same.
            bytes = np.prod(g_var.shape) * core.size_of_dtype(g_var.dtype)
            if memory_counter < mega_bytes and dtype == g_var.dtype:
                memory_counter += bytes
            else:
                memory_counter = bytes
                group_idx += 1
            grad_var_groups.setdefault(group_idx, []).append(g_var)

        coalesced_grads_and_vars = self._coalesce_tensors(grad_var_groups)

        for coalesced_grad, g_vars, g_shapes in coalesced_grads_and_vars:
            collective._allreduce(coalesced_grad,
                                  coalesced_grad,
                                  sync_mode=False)

        self._split_tensors(coalesced_grads_and_vars)
Пример #4
0
def to_variable(value, block=None, name=None):
    """
    This function will create a variable from ndarray

    Args:
        value(ndarray): the numpy value need to be convert
        block(fluid.Block|None): which block this variable will be in
        name(str|None): Name of Variable

    return:
        Variable: The variable created from given numpy

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x)

    """
    if isinstance(value, np.ndarray):
        assert framework.in_dygraph_mode(
        ), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        if value.dtype == np.float16:
            value = value.view(np.uint16)
        tensor.set(value, framework._current_expected_place())
        return py_var
    elif isinstance(value, framework.Variable):
        return value
    else:
        raise TypeError(
            "to_variable only accepts 'ndarray' and 'Variable' as value's input"
        )
Пример #5
0
def to_variable(value, block=None, name=None):
    if isinstance(value, np.ndarray):
        assert enabled(), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        tensor.set(value, framework._current_expected_place())
        return py_var
    elif isinstance(value, framework.Variable):
        return value