Exemplo n.º 1
0
 def create_tensor(value, dtype, shape):
     if framework._in_eager_mode_:
         out = _C_ops.final_state_full(shape, value, dtype,
                                       framework._current_expected_place())
     else:
         out = _varbase_creator(dtype=dtype)
         out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
                                    'value', value, 'force_cpu', False)
     out.stop_gradient = True
     return out
Exemplo n.º 2
0
    def __call__(self, var, block=None):
        """Initialize the input tensor with constant.

        Args:
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.

        Returns:
            The initialization op
        """
        block = self._check_block(block)

        assert (isinstance(var, framework.Variable)
                or isinstance(var, framework.EagerParamBase))
        assert isinstance(block, framework.Block)

        if framework._non_static_mode():
            _C_ops.fill_constant(var, 'value', float(self._value),
                                 'force_cpu', self._force_cpu, 'dtype',
                                 int(var.dtype), 'str_value',
                                 str(float(self._value)), 'shape', var.shape)
            return None
        else:
            # fill constant should set the "str_value" to preserve precision
            op = block.append_op(type="fill_constant",
                                 outputs={"Out": var},
                                 attrs={
                                     "shape": var.shape,
                                     "dtype": int(var.dtype),
                                     "value": float(self._value),
                                     'str_value': str(float(self._value)),
                                     'force_cpu': self._force_cpu
                                 },
                                 stop_gradient=True)

            var.op = op
            return op
Exemplo n.º 3
0
 def create_tensor(value, dtype, shape):
     out = _varbase_creator(dtype=dtype)
     out = _C_ops.fill_constant(out, 'dtype', dtype, 'shape', shape,
                                'value', value, 'force_cpu', False)
     out.stop_gradient = True
     return out
Exemplo n.º 4
0
    def __call__(self, var, block=None):
        """Initialize the input tensor with dirac initializer.

        Args:
            var(Tensor): Tensor that needs to be initialized.
            block(Block, optional): The block in which initialization ops
                   should be added. Used in static graph only, default None.

        Returns:
            The most critical OP(scatter) in this initializer, which contains 7~8 ops in total.
        """
        block = self._check_block(block)
        assert isinstance(var, framework.Parameter)
        assert isinstance(block, framework.Block)
        check_variable_and_dtype(var, "Out",
                                 ['float16', 'bfloat16', 'float32', 'float64'],
                                 'Dirac')

        assert len(var.shape) in [
            3, 4, 5
        ], "Only Tensor with 3/4/5 dimensions can be initialized by Dirac"
        assert (var.shape[0] % self._groups
                ) == 0, "Tensor 0-dimension must be divisible by groups"

        if var.dtype != VarDesc.VarType.FP32:
            out_var = block.create_var(name=unique_name.generate(".".join(
                ['dirac', var.name, 'tmp'])),
                                       shape=var.shape,
                                       dtype=VarDesc.VarType.FP32,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False)
        else:
            out_var = var
        op = None
        if framework.in_dygraph_mode():
            with fluid.dygraph.no_grad():
                _C_ops.fill_constant(out_var, 'value', float(0), 'force_cpu',
                                     False,
                                     'dtype', out_var.dtype, 'str_value',
                                     str(float(0)), 'shape', out_var.shape)
        else:
            block.append_op(type='fill_constant',
                            inputs={},
                            outputs={'Out': out_var},
                            attrs={
                                'value': float(0),
                                'dtype': out_var.dtype,
                                'shape': out_var.shape,
                            },
                            stop_gradient=True)

        origin_shape = var.shape
        num_per_group = origin_shape[0] // self._groups
        min_shape = min(num_per_group, origin_shape[1])

        idx_list = []
        value_list = []
        strides = []
        prod = 1
        for dim in reversed(origin_shape):
            strides.insert(0, prod)
            prod *= dim
        for i in range(self._groups):
            for j in range(min_shape):
                value_list.append(1.0)
                offset = 0
                for (k, stride) in enumerate(strides):
                    if (k == 0):
                        offset += (j + i * num_per_group) * stride
                    elif (k == 1):
                        offset += j * stride
                    else:
                        offset += origin_shape[k] // 2 * stride
                idx_list.append(offset)
        if framework.in_dygraph_mode():
            with fluid.dygraph.no_grad():
                tmp_out, _ = _C_ops.reshape2(out_var, None, 'shape', [-1])
                tmp_out._share_underline_tensor_to(out_var)
        else:
            x_shape = block.create_var(name=unique_name.generate(".".join(
                [out_var.name, "XShape"])),
                                       dtype=out_var.dtype,
                                       shape=out_var.shape,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False,
                                       stop_gradient=True)
            block.append_op(type="reshape2",
                            inputs={"X": out_var},
                            attrs={'shape': [-1]},
                            outputs={
                                "Out": out_var,
                                "XShape": x_shape
                            },
                            stop_gradient=True)

        index_tensor = block.create_var(
            name=unique_name.generate('scatter_index'),
            persistable=False,
            stop_gradient=True)

        if framework.in_dygraph_mode():
            with fluid.dygraph.no_grad():
                tmp_tensor = framework._varbase_creator()
                _C_ops.assign_value(tmp_tensor, 'shape', [len(idx_list)],
                                    'dtype', VarDesc.VarType.INT64,
                                    'int64_values', idx_list)
                tmp_tensor._share_underline_tensor_to(index_tensor)
        else:
            block.append_op(type='assign_value',
                            outputs={'Out': index_tensor},
                            attrs={
                                'dtype': VarDesc.VarType.INT64,
                                'shape': [len(idx_list)],
                                'int64_values': idx_list
                            },
                            stop_gradient=True)

        value_tensor = block.create_var(
            name=unique_name.generate('scatter_value'),
            persistable=False,
            stop_gradient=True)

        if framework.in_dygraph_mode():
            with fluid.dygraph.no_grad():
                tmp_tensor = framework._varbase_creator()
                _C_ops.assign_value(tmp_tensor, 'shape', [len(value_list)],
                                    'dtype', VarDesc.VarType.FP32,
                                    'fp32_values', value_list)
                tmp_tensor._share_underline_tensor_to(value_tensor)
        else:
            block.append_op(type='assign_value',
                            outputs={'Out': value_tensor},
                            attrs={
                                'dtype': VarDesc.VarType.FP32,
                                'shape': [len(value_list)],
                                'fp32_values': value_list
                            },
                            stop_gradient=True)

        if framework.in_dygraph_mode():
            with fluid.dygraph.no_grad():
                tmp_out = _C_ops.final_state_scatter(out_var, index_tensor,
                                                     value_tensor, True)
                tmp_out._share_underline_tensor_to(out_var)
                tmp_reshape_out, _ = _C_ops.reshape2(out_var, None, 'shape',
                                                     origin_shape)
                tmp_reshape_out._share_underline_tensor_to(out_var)
                if var.dtype != VarDesc.VarType.FP32:
                    tmp_cast_out = _C_ops.cast(out_var, 'in_dtype',
                                               out_var.dtype, 'out_dtype',
                                               var.dtype)
                    tmp_cast_out._share_underline_tensor_to(var)

        else:
            op = block.append_op(type="scatter",
                                 inputs={
                                     "X": out_var,
                                     "Ids": index_tensor,
                                     "Updates": value_tensor
                                 },
                                 attrs={'overwrite': True},
                                 outputs={"Out": out_var},
                                 stop_gradient=True)
            x_shape = block.create_var(name=unique_name.generate(".".join(
                [out_var.name, "XShape"])),
                                       dtype=out_var.dtype,
                                       shape=out_var.shape,
                                       type=VarDesc.VarType.LOD_TENSOR,
                                       persistable=False,
                                       stop_gradient=True)
            block.append_op(type="reshape2",
                            inputs={"X": out_var},
                            attrs={'shape': origin_shape},
                            outputs={
                                "Out": out_var,
                                "XShape": x_shape
                            },
                            stop_gradient=True)
            if var.dtype != VarDesc.VarType.FP32:
                block.append_op(type="cast",
                                inputs={"X": out_var},
                                outputs={"Out": var},
                                attrs={
                                    "in_dtype": out_var.dtype,
                                    "out_dtype": var.dtype
                                },
                                stop_gradient=True)
        if not in_dynamic_mode():
            var.op = op
        return op