Ejemplo n.º 1
0
def to_variable(value, block=None, name=None, zero_copy=None):
    """
    The API will create a ``Variable`` object from numpy\.ndarray or Variable object.

    Parameters:
        value(ndarray): The numpy\.ndarray object that needs to be converted, it can be multi-dimension, and the data type is one of numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}.
        block(fluid.Block, optional): Which block this variable will be in. Default: None.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
        zero_copy(bool, optional): Whether to share memory with the input numpy array. This parameter only works with CPUPlace and will be set to True when it is None. Default: None.

    Returns:
        Variable: ``Tensor`` created from the specified numpy\.ndarray object, data type and shape is the same as ``value`` .

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
            y = fluid.dygraph.to_variable(x)
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)

    """
    if isinstance(value, np.ndarray):
        assert framework.in_dygraph_mode(
        ), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        if isinstance(framework._current_expected_place(),
                      framework.core.CPUPlace):
            if zero_copy is None:
                zero_copy = True
            tensor.set(value, framework._current_expected_place(), zero_copy)
        else:
            assert not zero_copy, "zero_copy mode can only be used with CPUPlace"
            tensor.set(value, framework._current_expected_place(), False)
        return py_var
    elif isinstance(value, framework.Variable):
        return value
    else:
        raise TypeError(
            "to_variable only accepts 'ndarray' and 'Variable' as value's input"
        )
Ejemplo n.º 2
0
def to_variable(value, name=None, zero_copy=None):
    """
    The API will create a ``Variable`` object from numpy\.ndarray or Variable object.

    Parameters:
        value(ndarray|Variable): The numpy\.ndarray or Variable object that needs to be converted, it can be multi-dimension, and the data type is one of numpy\.{float16, float32, float64, int16, int32, int64, uint8, uint16}.
        name(str, optional): The default value is None. Normally there is no need for user to set this property. For more information, please refer to :ref:`api_guide_Name`
        zero_copy(bool, optional): Whether to share memory with the input numpy array. This parameter only works with CPUPlace and will be set to True when it is None. Default: None.

    Returns:
        Variable: If ``value`` is a numpy\.ndarray object, return ``Tensor`` created from the specified numpy\.ndarray object, which has same data type and shape with ``value``. If ``value`` is a Variable object, just return ``value``.


    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
            y = fluid.dygraph.to_variable(x)
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)

    """
    if isinstance(value, np.ndarray):
        assert framework.in_dygraph_mode(
        ), "to_variable could only be called in dygraph mode"
        if isinstance(framework._current_expected_place(),
                      framework.core.CPUPlace):
            if zero_copy is None:
                zero_copy = True
        else:
            assert not zero_copy, "zero_copy mode can only be used with CPUPlace"
            zero_copy = False
        py_var = core.VarBase(value=value,
                              place=framework._current_expected_place(),
                              persistable=False,
                              zero_copy=zero_copy,
                              name=name if name else '')
        return py_var
    elif isinstance(value, (core.VarBase, framework.Variable)):
        return value
    else:
        raise TypeError(
            "to_variable only accepts 'ndarray' and 'Variable' as value's input"
        )
Ejemplo n.º 3
0
 def test_place_guard(self):
     if core.is_compiled_with_cuda():
         paddle.set_device("gpu:0")
         with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
             self.assertTrue(
                 isinstance(_current_expected_place(),
                            type(core.CPUPlace())))
     else:
         paddle.set_device("cpu")
         with paddle.fluid.framework._dygraph_place_guard(core.CPUPlace()):
             self.assertTrue(
                 isinstance(_current_expected_place(),
                            type(core.CPUPlace())))
Ejemplo n.º 4
0
    def trace_op(self, op, stop_gradient=False):
        # record op's trace id
        op.iop._trace_id = self._trace_id

        # trace op and save it
        backward_refs = self.trace(op.iop, op.inputs, op.outputs, op.block.desc,
                                   framework._current_expected_place(),
                                   stop_gradient)

        if not stop_gradient:
            self._trace_id += 1
            self._ops[op.iop._trace_id] = op

            # register backward hooks and variables if needed
            if len(backward_refs) > 0:
                op.iop.register_backward_hooks(release_op)

                # TODO(minqiyang): remove all inputs and outputs after seperate
                # var and grad
                op.backward_refs = defaultdict(list)
                for k, v in six.iteritems(op.inputs):
                    if k in backward_refs:
                        op.backward_refs[k] = op.inputs[k]

                for k, v in six.iteritems(op.outputs):
                    if k in backward_refs:
                        op.backward_refs[k] = op.outputs[k]
Ejemplo n.º 5
0
 def __setattr__(self, name, value):
     if isinstance(getattr(type(self), name, None), property):
         object.__setattr__(self, name, value)
     if isinstance(value, framework.Parameter):
         params = self.__dict__.get('_parameters', None)
         if params is None:
             raise ValueError(
                 "super(YourLayer, self).__init__() should be called first")
         if value.name in self._loaddict_holder:
             var = value._ivar.value()
             tensor = var.get_tensor()
             tensor.set(self._loaddict_holder[value.name].numpy(),
                        framework._current_expected_place())
         if name in params:
             # remove unused param in tracer
             if framework._dygraph_tracer_ is not None:
                 framework._dygraph_tracer_._vars.pop(params[name].name,
                                                      None)
         params[name] = value
     elif isinstance(value, core.Layer):
         layers = self.__dict__.get('_sub_layers', None)
         if layers is None:
             raise ValueError(
                 "super(YourLayer, self).__init__() should be called first")
         layers[name] = value
     else:
         object.__setattr__(self, name, value)
Ejemplo n.º 6
0
def prepare_context(strategy=None):
    '''
    :api_attr: imperative
    '''
    if strategy is None:
        strategy = ParallelStrategy()
        strategy.nranks = Env().nranks
        strategy.local_rank = Env().local_rank
        strategy.trainer_endpoints = Env().trainer_endpoints
        strategy.current_endpoint = Env().current_endpoint
    if strategy.nranks < 2:
        return
    assert framework.in_dygraph_mode() is True, \
        "dygraph.prepare_context should be used with dygraph mode."
    place = framework._current_expected_place()
    assert place is not None, \
        "dygraph.prepare_context should be used in fluid.dygraph.guard(place) guard."
    if not parallel_helper._is_parallel_ctx_initialized():
        if isinstance(place, core.CUDAPlace):
            parallel_helper._set_parallel_ctx(
                core.NCCLParallelContext(strategy, place))
        else:
            # TODO(Yancey1989): add Gloo Parallel Context to support CPU parallel computation
            assert ("Only support CUDAPlace for now.")
        parallel_helper._init_parallel_ctx()
    return strategy
Ejemplo n.º 7
0
def get_device():
    """
    This funciton can get the current global device of the program is running.
    It's a string which is like 'cpu' and 'gpu:0'. if the global device is not
    set, it will return a string which is 'gpu:0' when cuda is avaliable or it 
    will return a string which is 'cpu' when cuda is not avaliable.

    Examples:

     .. code-block:: python
            
        import paddle
        paddle.disable_static()
        device = paddle.get_device()

    """
    device = ''
    place = framework._current_expected_place()
    if isinstance(place, core.CPUPlace):
        device = 'cpu'
    elif isinstance(place, core.CUDAPlace):
        device_id = place.get_device_id()
        device = 'gpu:' + str(device_id)

    return device
Ejemplo n.º 8
0
    def _prepare(self, inputs):
        """
        Prepare inputs, outputs, attrs.
        """
        assert isinstance(inputs, (tuple, list))
        # Flatten inputs with nested structure into single list.
        flatten_inputs = flatten(inputs)
        # Convert variable into VarBase and feed in training data.
        input_vars = []
        expected_place = framework._current_expected_place()
        for i, value in enumerate(flatten_inputs):
            if isinstance(value, np.ndarray):
                var = None
                if not framework._in_eager_mode_:
                    var = core.VarBase(value=value,
                                       name=self._inputs[i].desc.name(),
                                       persistable=False,
                                       place=expected_place,
                                       zero_copy=True)
                else:
                    var = core.eager.Tensor(value=value,
                                            name=self._inputs[i].desc.name(),
                                            persistable=False,
                                            place=expected_place,
                                            zero_copy=True)
            elif isinstance(value, (core.VarBase, core.eager.Tensor)):
                # NOTE(Aurelius84): If var is on CPUPlace, it will be transformed multi times
                # into CUDAPlace when it's as input of multi Ops. so we move it in advance
                # to avoid this problem.
                if value.stop_gradient and not value.place._equals(
                        expected_place):
                    var = value._copy_to(expected_place, False)
                    var.stop_gradient = True
                else:
                    var = value
                var.name = self._inputs[i].desc.name()
            else:
                continue
            input_vars.append(var)

        def create_out(var_id):
            var = self._outputs[var_id]
            assert isinstance(var, framework.Variable)
            var_desc = var.desc
            varbase = None
            if not framework._in_eager_mode_:
                var_base = core.VarBase(var_desc.dtype(), var_desc.shape(),
                                        var_desc.name(), var_desc.type(),
                                        False)
            else:
                var_base = core.eager.Tensor(var_desc.dtype(),
                                             var_desc.shape(), var_desc.name(),
                                             var_desc.type(), False)
            return var_base

        # Create VarBase to receive output data.
        out_vars = list(map(create_out, self._outputs.var_ids))

        return input_vars, out_vars
Ejemplo n.º 9
0
def _to_LodTensor(ndarray):
    if not isinstance(ndarray, np.ndarray):
        raise TypeError(
            'Type of `ndarray` should be numpy.ndarray, but received {}.'.
            format(type(ndarray)))
    t = core.LoDTensor()
    place = _current_expected_place()
    t.set(ndarray, place)
    return t
Ejemplo n.º 10
0
def _get_place(place):
    place = _get_paddle_place(place)
    if place is None:
        place = _current_expected_place()
    elif not isinstance(place, (core.Place, core.CPUPlace, core.CUDAPinnedPlace,
                                core.CUDAPlace)):
        raise ValueError(
            "'place' must be any of paddle.Place, paddle.CPUPlace, paddle.CUDAPinnedPlace, paddle.CUDAPlace"
        )
    return place
Ejemplo n.º 11
0
 def trace_op(self,
              type,
              inputs,
              outputs,
              attrs,
              stop_gradient=False,
              inplace_map=None):
     self.trace(type, inputs, outputs, attrs,
                framework._current_expected_place(), self._has_grad
                and not stop_gradient, inplace_map if inplace_map else {})
Ejemplo n.º 12
0
 def test_xpu(self):
     if core.is_compiled_with_xpu():
         with fluid.dygraph.guard():
             out = paddle.to_tensor([1, 2])
             device = paddle.get_device()
             self.assertEqual(
                 isinstance(framework._current_expected_place(),
                            core.XPUPlace), True)
             self.assertTrue(out.place.is_xpu_place())
             self.assertEqual(device, "xpu:0")
Ejemplo n.º 13
0
 def test_cpu(self):
     with fluid.dygraph.guard():
         paddle.set_device('cpu')
         out1 = paddle.zeros(shape=[1, 3], dtype='float32')
         out2 = paddle.ones(shape=[1, 3], dtype='float32')
         out3 = paddle.concat(x=[out1, out2], axis=0)
         device = paddle.get_device()
         self.assertEqual(
             isinstance(framework._current_expected_place(), core.CPUPlace),
             True)
         self.assertEqual(device, "cpu")
Ejemplo n.º 14
0
def randperm(n, dtype="int64", name=None):
    """
    Returns a 1-D Tensor filled with random permutation values from 0
    to n-1, with ``dtype``.

    Args:
        n (int): The upper bound (exclusive), and it should be greater than 0.
        dtype (str|np.dtype, optional): The data type of
            the output Tensor. Supported data types: int32, int64, float32,
            float64. Default is int64.
        name (str, optional): The default value is None. Normally there is no
            need for user to set this property. For more information, please
            refer to :ref:`api_guide_Name`.

    Returns:
        Tensor: A 1-D Tensor filled with random permutation values from 0
        to n-1, with ``dtype``.

    Examples:
        .. code-block:: python

            import paddle

            out1 = paddle.randperm(5)
            # [4, 1, 2, 3, 0]  # random

            out2 = paddle.randperm(7, 'int32')
            # [1, 6, 2, 0, 4, 3, 5]  # random
 
    """
    if not isinstance(dtype, core.VarDesc.VarType):
        dtype = convert_np_dtype_to_dtype_(dtype)

    if in_dygraph_mode():
        return _C_ops.final_state_randperm(n, dtype, _current_expected_place())
    if _in_legacy_dygraph():
        return _C_ops.randperm('n', n, 'seed', 0, 'dtype', dtype)

    if n < 1:
        raise ValueError(
            "The input n should be greater than 0 in randperm op.")
    check_dtype(dtype, 'dtype', ['int64', 'int32', 'float32', 'float64'],
                'randperm')

    helper = LayerHelper("randperm", **locals())
    out = helper.create_variable_for_type_inference(dtype)
    attrs = {'n': n, 'dtype': dtype, 'seed': 0}
    helper.append_op(type='randperm',
                     inputs={},
                     outputs={'Out': out},
                     attrs=attrs)
    out.stop_gradient = True
    return out
Ejemplo n.º 15
0
 def test_gpu(self):
     if core.is_compiled_with_cuda():
         with fluid.dygraph.guard():
             paddle.set_device('gpu:0')
             out1 = paddle.zeros(shape=[1, 3], dtype='float32')
             out2 = paddle.ones(shape=[1, 3], dtype='float32')
             out3 = paddle.concat(x=[out1, out2], axis=0)
             device = paddle.get_device()
             self.assertEqual(
                 isinstance(framework._current_expected_place(),
                            core.CUDAPlace), True)
             self.assertEqual(device, "gpu:0")
Ejemplo n.º 16
0
    def trace_op(self, op, inputs, outputs, stop_gradient=False):
        # record op's trace id
        op.iop._trace_id = self._trace_id

        self.trace(op.iop, inputs, outputs, op.attrs,
                   framework._current_expected_place(), stop_gradient)

        if not stop_gradient and self._train_mode:
            self._trace_id += 1
            self._ops[op.iop._trace_id] = op

            # register backward hooks and variables if needed
            op.iop.register_backward_hooks(release_op)
Ejemplo n.º 17
0
    def load_dict(self, stat_dict, include_sublayers=True):
        self._loaddict_holder = stat_dict
        for name, item in self.__dict__.get('_parameters', None).items():
            if item.name in stat_dict:
                var = item._ivar.value()
                tensor = var.get_tensor()
                tensor.set(stat_dict[item.name].numpy(),
                           framework._current_expected_place())

        if include_sublayers:
            for layer_name, layer_item in self._sub_layers.items():
                if layer_item is not None:
                    layer_item.load_dict(stat_dict)
Ejemplo n.º 18
0
def valid_vars(vars):
    """
    Note: run_program_op.InferShape requires `X`/'Out' not be null.
    But it's common in dy2static, fake varBase is created to handle the
    problem.
    """
    if vars:
        return vars
    return [
        core.VarBase(value=[1],
                     name='Fake_var',
                     place=framework._current_expected_place())
    ]
Ejemplo n.º 19
0
def to_variable(value, block=None, name=None):
    """
    This function will create a variable from ndarray

    Args:
        value(ndarray): the numpy value need to be convert
        block(fluid.Block|None): which block this variable will be in
        name(str|None): Name of Variable

    return:
        Variable: The variable created from given numpy

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard():
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x)

    """
    if isinstance(value, np.ndarray):
        assert framework.in_dygraph_mode(
        ), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        if value.dtype == np.float16:
            value = value.view(np.uint16)
        tensor.set(value, framework._current_expected_place())
        return py_var
    elif isinstance(value, framework.Variable):
        return value
    else:
        raise TypeError(
            "to_variable only accepts 'ndarray' and 'Variable' as value's input"
        )
Ejemplo n.º 20
0
    def __init__(self, program, parameters, feed_names, fetch_names):
        self._program = program
        self._feed_names = feed_names
        self._fetch_names = fetch_names

        self._place = _current_expected_place()

        self._scope = core.Scope()
        for p in parameters:
            src_tensor = p.value().get_tensor()
            dst_tensor = self._scope.var(p.name).get_tensor()
            dst_tensor._share_data_with(src_tensor)

        self._exe = Executor(self._place)
        self._compiled_program = None
        self._build_strategy = None
        self._exec_strategy = None
Ejemplo n.º 21
0
def guard(place=None):
    """
    :api_attr: imperative

    This context will create a dygraph context for dygraph to run, using python ``with`` statement.

    Parameters:
        place(fluid.CPUPlace| fluid.CUDAPlace|str, optional): Place to execute dygraph. 
            If None, the running place will be determined according to the way of paddle compilation.
            If ``place`` is string, It can be ``cpu``, ``gpu:x`` and ``xpu:x``, where ``x`` is the
            index of the GPUs or XPUs. Default: None

    return:
        None

    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard():
            inp = np.ones([3, 1024], dtype='float32')
            t = fluid.dygraph.base.to_variable(inp)
            linear1 = fluid.Linear(1024, 4, bias_attr=False)
            linear2 = fluid.Linear(4, 4)
            ret = linear1(t)
            dy_ret = linear2(ret)

    """
    train = framework.Program()
    startup = framework.Program()
    tracer = Tracer()
    VarBase = core.VarBase

    if place is not None:
        expected_place = _get_paddle_place(place)
    else:
        expected_place = framework._current_expected_place()

    with framework.program_guard(train, startup):
        with framework.unique_name.guard():
            with framework._dygraph_guard(tracer):
                with framework._dygraph_place_guard(expected_place):
                    yield
Ejemplo n.º 22
0
def to_variable(value, block=None, name=None):
    if isinstance(value, np.ndarray):
        assert enabled(), "to_variable could only be called in dygraph mode"

        if not block:
            block = framework.default_main_program().current_block()
        py_var = framework.Variable(block,
                                    type=core.VarDesc.VarType.LOD_TENSOR,
                                    name=name,
                                    shape=value.shape,
                                    dtype=value.dtype,
                                    stop_gradient=True)
        var = py_var._ivar.value()
        tensor = var.get_tensor()
        tensor.set(value, framework._current_expected_place())
        return py_var
    elif isinstance(value, framework.Variable):
        return value
Ejemplo n.º 23
0
    def _init_thread(self):
        self._var_names = [v.name for v in self._feed_list]
        self._shapes = [v.shape for v in self._feed_list]
        self._dtypes = [v.dtype for v in self._feed_list]
        self._need_check_feed = [
            v.desc.need_check_feed() for v in self._feed_list
        ]
        # if only 1 place, do not need to keep order
        self._blocking_queue = core.init_lod_tensor_blocking_queue(
            core.Variable(), self._blocking_queue_capacity,
            len(self._places) > 1)
        self._reader = core.create_py_reader(
            self._blocking_queue, self._var_names, self._shapes, self._dtypes,
            self._need_check_feed, self._places, self._use_buffer_reader, True,
            self._pin_memory)

        self._thread = threading.Thread(target=self._thread_loop,
                                        args=(_current_expected_place(), ))
        self._thread.daemon = True
        self._thread.start()
Ejemplo n.º 24
0
 def __setattr__(self, name, value):
     if isinstance(value, framework.Parameter):
         params = self.__dict__.get('_parameters', None)
         if params is None:
             raise ValueError(
                 "super(YourLayer, self).__init__() should be called first")
         if value.name in self._loaddict_holder:
             var = value._ivar.value()
             tensor = var.get_tensor()
             tensor.set(self._loaddict_holder[value.name].numpy(),
                        framework._current_expected_place())
         params[name] = value
     elif isinstance(value, core.Layer):
         layers = self.__dict__.get('_sub_layers', None)
         if layers is None:
             raise ValueError(
                 "super(YourLayer, self).__init__() should be called first")
         layers[name] = value
     else:
         object.__setattr__(self, name, value)
Ejemplo n.º 25
0
    def _prepare(self, inputs):
        """
        Prepare inputs, outputs, attrs.
        """
        assert isinstance(inputs, (tuple, list))
        # Flatten inputs with nested structure into single list.
        flatten_inputs = flatten(inputs)
        # Convert variable into VarBase and feed in training data.
        input_vars = []
        for i, value in enumerate(flatten_inputs):
            if isinstance(value, np.ndarray):
                var = core.VarBase(value=value,
                                   name=self._inputs[i].desc.name(),
                                   persistable=False,
                                   place=framework._current_expected_place(),
                                   zero_copy=True)
            elif isinstance(value, core.VarBase):
                var = value
                var.name = self._inputs[i].desc.name()
            else:
                continue
            input_vars.append(var)

        # Create VarBase to receive output data.
        out_vars = []
        for idx in self._outputs.var_ids:
            var = self._outputs[idx]
            assert isinstance(var, framework.Variable)
            var_desc = var.desc
            var_base = core.VarBase(var_desc.dtype(), var_desc.shape(),
                                    var_desc.name(), var_desc.type(), False)
            out_vars.append(var_base)

        # Hold forward variables
        tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
                                     "program_out_scope",
                                     core.VarDesc.VarType.STEP_SCOPES, True)

        tmp_scope_vec.value().set_scope(self._inner_scope)

        return input_vars, out_vars, tmp_scope_vec
Ejemplo n.º 26
0
    def add_parameter(self, name, parameter):
        """Adds a Parameter instance.

          Added parameter can be access like self.name.

        Args:
            name: name of this sublayer.
            parameter: an instance of Parameter.
        Returns:
            the parameter passed in.
        """
        assert isinstance(parameter, framework.Parameter)

        if parameter.name in self._loaddict_holder:
            var = parameter._ivar.value()
            tensor = var.get_tensor()
            tensor.set(self._loaddict_holder[parameter.name].numpy(),
                       framework._current_expected_place())

        self._parameters[name] = parameter
        return parameter
Ejemplo n.º 27
0
def get_device():
    """
    This funciton can get the current global device of the program is running.
    It's a string which is like 'cpu', 'gpu:x', 'xpu:x', 'mlu:x' and 'npu:x'. if the global device is not
    set, it will return a string which is 'gpu:x' when cuda is avaliable or it 
    will return a string which is 'cpu' when cuda is not avaliable.

    Examples:

     .. code-block:: python
            
        import paddle
        device = paddle.device.get_device()

    """
    device = ''
    place = framework._current_expected_place()
    if isinstance(place, core.CPUPlace):
        device = 'cpu'
    elif isinstance(place, core.CUDAPlace):
        device_id = place.get_device_id()
        device = 'gpu:' + str(device_id)
    elif isinstance(place, core.XPUPlace):
        device_id = place.get_device_id()
        device = 'xpu:' + str(device_id)
    elif isinstance(place, core.NPUPlace):
        device_id = place.get_device_id()
        device = 'npu:' + str(device_id)
    elif isinstance(place, core.IPUPlace):
        num_devices = core.get_ipu_device_count()
        device = "ipus:{{0-{}}}".format(num_devices - 1)
    elif isinstance(place, core.MLUPlace):
        device_id = place.get_device_id()
        device = 'mlu:' + str(device_id)
    else:
        raise ValueError(
            "The device specification {} is invalid".format(place))

    return device
Ejemplo n.º 28
0
    def trace_op(self,
                 type,
                 inputs,
                 outputs,
                 attrs,
                 stop_gradient=False,
                 inplace_map=None):
        if not framework._in_legacy_dygraph():
            # inputs : {"sum": [tensor], ...}
            # outputs : {"sum": [tensor], ...}
            if type in final_state_name_mapping.keys():
                final_state_type = final_state_name_mapping[type][
                    "final_op_name"]

                assert final_state_type in _C_ops.__dict__
                self.eager_final_state_trace_op(type, inputs, outputs, attrs,
                                                stop_gradient, inplace_map)
            else:
                self.eager_trace_op(type, inputs, outputs, attrs, stop_gradient,
                                    inplace_map)
        else:
            self.trace(type, inputs, outputs, attrs,
                       framework._current_expected_place(), self._has_grad and
                       not stop_gradient, inplace_map if inplace_map else {})
Ejemplo n.º 29
0
def to_variable(value, name=None, zero_copy=None, dtype=None):
    r"""
    :api_attr: imperative

    The API will create a ``Variable`` object from 
    tuple, list, numpy\.ndarray or Variable object.

    Parameters:
        value(tuple|list|ndarray|Variable|Tensor): Initial data. 
            Can be a list, tuple, NumPy ndarray, Variable, Tensor.
            The shape can be multi-dimensional. The data type is one of 
            numpy\.{float16, float32, float64, int16, int32, int64, 
            uint8, uint16, complex64, complex128}.
        name(str, optional): The default value is None. Normally there is no 
            need for user to set this property. For more information, please 
            refer to :ref:`api_guide_Name` . 
        zero_copy(bool, optional): Whether to share memory with the input numpy 
            array. This parameter only works with CPUPlace and will be set to 
            True when it is None. Default: None. (Note: zero_copy is discarded temporally for some reason.)
        dtype(str, optional): The desired data type of returned ``Variable`` .
            Can be 'bool' , 'float16' , 'float32' , 'float64' , 'int8' , 'int16' , 
            'int32' , 'int64' , 'uint8' . Default: None.

    Returns:
        Variable : If ``value`` is a tuple/list/numpy\.ndarray object, 
            return ``Tensor`` created from the corresponding numpy\.ndarray object, which has 
            same data type and shape with ``value``. 


    Examples:

     .. code-block:: python

        import numpy as np
        import paddle.fluid as fluid

        with fluid.dygraph.guard(fluid.CPUPlace()):
            x = np.ones([2, 2], np.float32)
            y = fluid.dygraph.to_variable(x, zero_copy=False)
            x[0][0] = -1
            y[0][0].numpy()  # array([1.], dtype=float32)
            y = fluid.dygraph.to_variable(x)
            x[0][0] = 0
            y[0][0].numpy()  # array([0.], dtype=float32)
            c = np.array([2+1j, 2])
            z = fluid.dygraph.to_variable(c)
            z.numpy() # array([2.+1.j, 2.+0.j])
            z.dtype # 'complex128'

            y = fluid.dygraph.to_variable([[0.1, 1.2], [2.2, 3.1], [4.9, 5.2]])
            y.shape     # [3L, 2L]

            y = fluid.dygraph.to_variable(((0.1, 1.2), (2.2, 3.1), (4.9, 5.2)), dtype='int32')
            y.shape     # [3L, 2L]

    """
    support_type = (list, tuple, np.ndarray, core.eager.Tensor, core.VarBase,
                    framework.Variable, core.Tensor, core.LoDTensor)
    if not isinstance(value, support_type):
        raise TypeError(
            "The type of 'value' in fluid.dygraph.to_variable must be %s, but received %s."
            % (support_type, type(value)))
    if isinstance(value,
                  (core.eager.Tensor, core.VarBase, framework.Variable)):
        return value
    elif isinstance(value, (core.Tensor, core.LoDTensor)):
        return core.VarBase(value)
    else:
        if isinstance(framework._current_expected_place(),
                      framework.core.CPUPlace):
            #TODO(zhiqiu): we found two problems when enable zero_copy on CPUPlace.
            # (1): eigen requires 16-bytes alignments, but the data of numpy array may not statisfy.
            # Details: https://eigen.tuxfamily.org/dox/group__TopicUnalignedArrayAssert.html
            # (2): when used in flask framework, it may result in hang.
            # Details: https://github.com/PaddlePaddle/Paddle/issues/26635
            # So, we temporally diable the zero_copy strategy.
            if zero_copy == True:
                warnings.warn(
                    "Currently, zero_copy is not supported, and it will be discarded."
                )
                zero_copy = False
        else:
            assert not zero_copy, "zero_copy mode can only be used with CPUPlace"

        if not isinstance(value, np.ndarray):
            value = np.array(value)

        if dtype is not None:
            dtype = convert_dtype(dtype)
            if value.dtype != dtype:
                value = value.astype(dtype)

        if _in_eager_without_dygraph_check():
            return core.eager.Tensor(value,
                                     framework._current_expected_place(),
                                     False, zero_copy, name if name else None,
                                     True)
        else:
            py_var = core.VarBase(value=value,
                                  place=framework._current_expected_place(),
                                  persistable=False,
                                  zero_copy=zero_copy,
                                  name=name if name else '')
            return py_var
Ejemplo n.º 30
0
def grad(outputs,
         inputs,
         grad_outputs=None,
         retain_graph=None,
         create_graph=False,
         only_inputs=True,
         allow_unused=False,
         no_grad_vars=None):
    ''' 
    .. note::
        **This API is ONLY available in imperative mode.**

    This API computes the sum of gradients of `outputs` with respect to each `inputs` .

    Parameters:
        outputs (Tensor|list(Tensor)|tuple(Tensor)): the output Tensor or 
            Tensor list/tuple of the graph to compute gradients.
        inputs (Tensor|list(Tensor)|tuple(Tensor)): the input Tensor or 
            Tensor list/tuple of the graph to compute gradients. The returned
            values of this API are the gradients of `inputs` . 
        grad_outputs (Tensor|list(Tensor|None)|tuple(Tensor|None), optional): 
            initial gradient values of `outputs` . If `grad_outputs` is None, 
            the initial gradient values of `outputs` would be Tensors filled with 1; 
            if `grad_outputs` is not None, it must have the same length as `outputs` , 
            and in this case, the initial gradient value of the i-th `outputs` would
            be: (1) a Tensor filled with 1 when the i-th element of `grad_outputs` 
            is None; (2) the i-th element of `grad_outputs` when the i-th element of
            `grad_outputs` is a Tensor. Default None.
        retain_graph (bool, optional): whether to retain the forward graph which 
            is used to calculate the gradient. When it is True, the graph would 
            be retained, in which way users can calculate backward twice for the 
            same graph. When it is False, the graph would be freed. Default None,
            which means it is equal to `create_graph` . 
        create_graph (bool, optional): whether to create the gradient graphs of
            the computing process. When it is True, higher order derivatives are
            supported to compute; when it is False, the gradient graphs of the
            computing process would be discarded. Default False.
        only_inputs (bool, optional): whether to only compute the gradients of
            `inputs` . If it is False, the gradients of all remaining leaf 
            Tensors in the graph would be also computed and accumulated. 
            If it is True, only the gradients of `inputs` would be computed.
            Default True. only_inputs=False is under development, and it is
            not supported yet.    
        allow_unused (bool, optional): whether to raise error or return None if some 
            Tensors of `inputs` are unreachable in the graph. If some Tensors of 
            `inputs` are unreachable in the graph (i.e., their gradients are None),  
            error would be raised if allow_unused=False, or None would be returned as
            their gradients if allow_unused=True. Default False.
        no_grad_vars (Tensor|list(Tensor)|tuple(Tensor)|set(Tensor), optional): 
            the Tensors whose gradients are not needed to compute. Default None.

    Returns:
        list: a list of Tensors, whose length is the same as the Tensor number 
        inside `inputs`, and the i-th returned Tensor is the sum of gradients of 
        `outputs` with respect to the i-th `inputs`.

    Examples 1:
        .. code-block:: python

            import paddle

            def test_dygraph_grad(create_graph):
                x = paddle.ones(shape=[1], dtype='float32')
                x.stop_gradient = False
                y = x * x

                # Since y = x * x, dx = 2 * x
                dx = paddle.grad(
                        outputs=[y],
                        inputs=[x],
                        create_graph=create_graph,
                        retain_graph=True)[0]

                z = y + dx

                # If create_graph = False, the gradient of dx
                # would not be backpropagated. Therefore,
                # z = x * x + dx, and x.gradient() = 2 * x = 2.0

                # If create_graph = True, the gradient of dx
                # would be backpropagated. Therefore,
                # z = x * x + dx = x * x + 2 * x, and
                # x.gradient() = 2 * x + 2 = 4.0

                z.backward()
                return x.gradient()

            print(test_dygraph_grad(create_graph=False)) # [2.]
            print(test_dygraph_grad(create_graph=True)) # [4.]

    Examples 2:
        .. code-block:: python

            import paddle

            def test_dygraph_grad(grad_outputs=None):
                x = paddle.to_tensor(2.0)
                x.stop_gradient = False

                y1 = x * x
                y2 = x * 3 

                # If grad_outputs=None, dy1 = [1], dy2 = [1].
                # If grad_outputs=[g1, g2], then:
                #    - dy1 = [1] if g1 is None else g1
                #    - dy2 = [1] if g2 is None else g2

                # Since y1 = x * x, dx = 2 * x * dy1.
                # Since y2 = x * 3, dx = 3 * dy2.
                # Therefore, the final result would be:
                # dx = 2 * x * dy1 + 3 * dy2 = 4 * dy1 + 3 * dy2.

                dx = paddle.grad(
                    outputs=[y1, y2], 
                    inputs=[x],
                    grad_outputs=grad_outputs)[0]

                return dx.numpy()

            grad_value = paddle.to_tensor(4.0)
            # dy1 = [1], dy2 = [1]
            print(test_dygraph_grad(None)) # [7.]

            # dy1 = [1], dy2 = [4]
            print(test_dygraph_grad([None, grad_value])) # [16.]

            # dy1 = [4], dy2 = [1]
            print(test_dygraph_grad([grad_value, None])) # [19.]

            # dy1 = [3], dy2 = [4]
            grad_y1 = paddle.to_tensor(3.0)
            print(test_dygraph_grad([grad_y1, grad_value])) # [24.]
	'''
    def check_in_out(in_out_list, name):
        assert in_out_list is not None, "{} should not be None".format(name)

        if isinstance(in_out_list, (list, tuple)):
            assert len(in_out_list) > 0, "{} cannot be empty".format(name)
            for each_var in in_out_list:
                if _in_eager_without_dygraph_check():
                    assert isinstance(
                        each_var, core.eager.Tensor
                    ), "Elements of {} must be Tensor".format(name)
                else:
                    assert isinstance(
                        each_var, core.VarBase
                    ), "Elements of {} must be Variable".format(name)
            return in_out_list
        else:
            if _in_eager_without_dygraph_check():
                assert isinstance(
                    in_out_list, core.eager.Tensor
                ), "{} must be Tensor or list of Tensor".format(name)
            else:
                assert isinstance(
                    in_out_list, core.VarBase
                ), "{} must be Variable or list of Variable".format(name)
            return [in_out_list]

    outputs = check_in_out(outputs, 'outputs')
    inputs = check_in_out(inputs, 'inputs')

    if grad_outputs is not None:
        if not isinstance(grad_outputs, (list, tuple)):
            grad_outputs = [grad_outputs]

        for each_var in grad_outputs:
            if each_var is not None:
                if _in_eager_without_dygraph_check():
                    assert isinstance(
                        each_var, core.eager.Tensor
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
                else:
                    assert isinstance(
                        each_var, core.VarBase
                    ), "grad_outputs must be None, a Variable or a list containing None or Variables"
    else:
        grad_outputs = []

    if len(grad_outputs) > 0:
        assert len(grad_outputs) == len(
            outputs), "The length of grad_outputs must be equal to outputs"

    if no_grad_vars is None:
        no_grad_vars = []
    elif isinstance(no_grad_vars, (core.VarBase, core.eager.Tensor)):
        no_grad_vars = [no_grad_vars]
    elif isinstance(no_grad_vars, core.eager.Tensor):
        no_grad_vars = [no_grad_vars]
    elif isinstance(no_grad_vars, (list, tuple, set)):
        no_grad_vars = list(no_grad_vars)
        for var in no_grad_vars:
            if _in_eager_without_dygraph_check():
                assert isinstance(
                    var,
                    core.eager.Tensor), "no_grad_vars can only contains Tensor"
            else:
                assert isinstance(
                    var,
                    core.VarBase), "no_grad_vars can only contains Variable"
    else:
        if _in_eager_without_dygraph_check():
            raise AssertionError(
                "no_grad_vars must be None, Tensor or list/tuple/set of Tensors"
            )
        else:
            raise AssertionError(
                "no_grad_vars must be None, Variable or list/tuple/set of Variables"
            )

    assert isinstance(create_graph, bool), "create_graph must be True or False"

    if retain_graph is None:
        retain_graph = create_graph

    assert isinstance(retain_graph,
                      bool), "retain_graph must be None, True or False"

    assert isinstance(allow_unused, bool), "allow_unused must be True or False"

    assert isinstance(only_inputs, bool), "only_inputs must be True or False"
    assert only_inputs, "only_inputs=False is not supported yet"

    if _in_eager_without_dygraph_check():
        return core.eager.run_partial_grad(outputs, inputs, grad_outputs,
                                           retain_graph, create_graph,
                                           only_inputs, allow_unused,
                                           no_grad_vars)
    else:
        place = core.Place()
        place.set_place(framework._current_expected_place())
        return core.dygraph_partial_grad(inputs, outputs, grad_outputs,
                                         no_grad_vars, place, create_graph,
                                         retain_graph, allow_unused,
                                         only_inputs)