Exemplo n.º 1
0
    def setUp(self):
        '''
	    Test eye op with specified shape
        '''
        self.set_npu()
        self.place = paddle.NPUPlace(0)
        self.op_type = "eye"
        self.inputs = {}

        self.num_rows = 0
        self.num_columns = 0
        self.dtype = np.float32

        self.initTestCase()

        if self.num_columns == 0:
            self.attrs = {
                'num_rows': self.num_rows,
                'dtype': framework.convert_np_dtype_to_dtype_(self.dtype)
            }
            self.outputs = {'Out': np.eye(self.num_rows, dtype=self.dtype)}
        else:
            self.attrs = {
                'num_rows': self.num_rows,
                'num_columns': self.num_columns,
                'dtype': framework.convert_np_dtype_to_dtype_(self.dtype)
            }
            self.outputs = {
                'Out': np.eye(self.num_rows,
                              self.num_columns,
                              dtype=self.dtype)
            }
Exemplo n.º 2
0
 def setUp(self):
     self.op_type = "reduce_sum"
     self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
     self.outputs = {'Out': self.inputs['X'].sum().astype('float64')}
     self.attrs = {'reduce_all': True}
     self.attrs.update({
         'in_dtype':
         int(convert_np_dtype_to_dtype_(np.float32)),
         'out_dtype':
         int(convert_np_dtype_to_dtype_(np.float64))
     })
Exemplo n.º 3
0
 def setUp(self):
     self.op_type = "reduce_sum"
     self.inputs = {'X': np.random.random((6, 2, 10)).astype("float64")}
     self.outputs = {'Out': self.inputs['X'].sum(axis=1, keepdims=True)}
     self.attrs = {'dim': [1], 'keep_dim': True}
     self.attrs.update({
         'in_dtype':
         int(convert_np_dtype_to_dtype_(np.float32)),
         'out_dtype':
         int(convert_np_dtype_to_dtype_(np.float64))
     })
Exemplo n.º 4
0
    def fill_block_desc(self, block_desc):
        for name in self.vars:
            var_desc = block_desc.var(cpt.to_bytes(name))
            var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
            if self.vars_lod_level is not None and name in self.vars_lod_level.keys(
            ):
                var_desc.set_lod_level(self.vars_lod_level[name])
            if self.vars_var_type is not None and name in self.vars_var_type.keys(
            ):
                if self.vars_var_type[name] == VarType.LOD_TENSOR_ARRAY:
                    var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR_ARRAY)
                elif self.vars_var_type[name] == VarType.STEP_SCOPES:
                    var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES)
                    continue
            var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
            if self.vars_dtype is not None and name in self.vars_dtype.keys():
                var_desc.set_dtype(
                    convert_np_dtype_to_dtype_(self.vars_dtype[name]))

        for op_config in self.ops:
            op_desc = block_desc.append_op()
            op_desc.set_type(op_config.type)
            for name, values in op_config.inputs.items():
                op_desc.set_input(name, values)
            for name, values in op_config.attrs.items():
                op_desc._set_attr(name, values)
            for name, values in op_config.outputs.items():
                op_desc.set_output(name, values)
                for v in values:
                    if block_desc.has_var_recursive(cpt.to_bytes(v)):
                        continue
                    var_desc = block_desc.var(cpt.to_bytes(v))
                    var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
                    if op_config.outputs_var_type is not None and v in op_config.outputs_var_type.keys(
                    ):
                        if op_config.outputs_var_type[
                                v] == VarType.LOD_TENSOR_ARRAY:
                            var_desc.set_type(
                                core.VarDesc.VarType.LOD_TENSOR_ARRAY)
                        elif op_config.outputs_var_type[
                                v] == VarType.STEP_SCOPES:
                            var_desc.set_type(core.VarDesc.VarType.STEP_SCOPES)
                            continue
                    var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
                    if op_config.outputs_dtype is not None and v in op_config.outputs_dtype.keys(
                    ):
                        var_desc.set_dtype(
                            convert_np_dtype_to_dtype_(
                                op_config.outputs_dtype[v]))
            if op_config.type not in _OP_WITHOUT_KERNEL_SET:
                op_desc.infer_var_type(block_desc)
                op_desc.infer_shape(block_desc)
            op_desc.check_attrs()
Exemplo n.º 5
0
    def test_from_numpy(self):
        x_numpy = np.ones([10, 12])
        x_np_spec = InputSpec.from_numpy(x_numpy)
        self.assertEqual(x_np_spec.dtype,
                         convert_np_dtype_to_dtype_(x_numpy.dtype))
        self.assertEqual(x_np_spec.shape, x_numpy.shape)
        self.assertEqual(x_np_spec.name, None)

        x_numpy2 = np.array([1, 2, 3, 4]).astype('int64')
        x_np_spec2 = InputSpec.from_numpy(x_numpy2, name='x_np_int64')
        self.assertEqual(x_np_spec2.dtype,
                         convert_np_dtype_to_dtype_(x_numpy2.dtype))
        self.assertEqual(x_np_spec2.shape, x_numpy2.shape)
        self.assertEqual(x_np_spec2.name, 'x_np_int64')
 def setUp(self):
     self.op_type = "fill_zeros_like2"
     self.dtype = np.float32
     self.init_dtype()
     self.inputs = {'X': np.random.random((219, 232)).astype(self.dtype)}
     self.outputs = {'Out': np.zeros_like(self.inputs["X"])}
     self.attrs = {'dtype': convert_np_dtype_to_dtype_(self.dtype)}
Exemplo n.º 7
0
 def init_config(self):
     shape = [500, 3]
     dtype = 'bool'
     dtype_inner = convert_np_dtype_to_dtype_(dtype)
     self.attrs = {'shape': shape, 'dtype': dtype_inner}
     self.inputs = {}
     self.outputs = {'Out': np.zeros(shape).astype(dtype)}
Exemplo n.º 8
0
 def init_config(self):
     self.shape = [500, 3]
     dtype = 'float32'
     dtype_inner = convert_np_dtype_to_dtype_(dtype)
     self.attrs = {'dtype': dtype_inner}
     self.inputs = {"ShapeTensor": np.array(self.shape).astype("int32")}
     self.outputs = {'Out': np.zeros(self.shape).astype(dtype)}
 def setUp(self):
     self.op_type = "assign_value"
     self.inputs = {}
     self.attrs = {}
     self.init_data()
     self.attrs["shape"] = self.value.shape
     self.attrs["dtype"] = framework.convert_np_dtype_to_dtype_(
         self.value.dtype)
     self.outputs = {"Out": self.value}
Exemplo n.º 10
0
 def __init__(self, shape, dtype='float32', name=None):
     # replace `None` in shape  with -1
     self.shape = self._verify(shape)
     # convert dtype into united represention
     if dtype is not None:
         if not isinstance(dtype, core.VarDesc.VarType):
             dtype = convert_np_dtype_to_dtype_(dtype)
     self.dtype = dtype
     self.name = name
Exemplo n.º 11
0
    def setUp(self):
        self.initDefaultParameters()
        self.initParameters()
        if not isinstance(self.x, np.ndarray):
            self.x = np.array(self.x)

        self.inputs = {'X': self.x, 'MaxLenTensor': self.maxlen_tensor}
        self.outputs = {'Y': self.calc_ground_truth_mask()}
        self.attrs = {'out_dtype': convert_np_dtype_to_dtype_(self.mask_dtype)}
Exemplo n.º 12
0
 def setUp(self):
     self.op_type = "assign_value"
     x = numpy.random.random(size=(2, 5)).astype(numpy.float32)
     self.inputs = {}
     self.outputs = {'Out': x}
     self.attrs = {
         'shape': x.shape,
         'dtype': framework.convert_np_dtype_to_dtype_(x.dtype),
         'fp32_values': [float(v) for v in x.flat]
     }
Exemplo n.º 13
0
def unique_segment(data, dtype="int64"):
    """Return Segment Id from data
    """
    if in_dygraph_mode():
        attr_dtype = convert_np_dtype_to_dtype_(dtype)
        unique, index, _ = core.ops.unique_with_counts(data, "dtype",
                                                       attr_dtype)
        return unique, index
    else:
        unique, index, _ = L.unique_with_counts(data)
        return unique, index
Exemplo n.º 14
0
    def setUp(self):
        '''
	Test eye op with specified shape
        '''
        self.op_type = "eye"

        self.inputs = {}
        self.attrs = {
            'num_rows': 219,
            'num_columns': 319,
            'dtype': framework.convert_np_dtype_to_dtype_(np.int32)
        }
        self.outputs = {'Out': np.eye(219, 319, dtype=np.int32)}
Exemplo n.º 15
0
    def compare(self):
        a = np.array([[1.0 + 1.0j, 2.0 + 1.0j],
                      [3.0 + 1.0j, 4.0 + 1.0j]]).astype(self._dtype)
        b = np.array([[1.0 + 1.0j, 1.0 + 1.0j]]).astype(self._dtype)

        with dg.guard():
            x = dg.to_variable(a, "x")
            y = dg.to_variable(b)
            out = paddle.fluid.layers.elementwise_add(x, y)
            self.assertIsNotNone("{}".format(out))

        self.assertTrue(np.allclose(out.numpy(), a + b))
        self.assertEqual(out.dtype, convert_np_dtype_to_dtype_(self._dtype))
        self.assertEqual(out.shape, x.shape)
Exemplo n.º 16
0
    def setUp(self):
        self.op_type = "fill_any_like"
        self.python_api = paddle.full_like
        self.init_data()

        x = np.zeros(self.shape)
        out = np.full_like(x, self.fill_value, self.dtype)

        self.inputs = {'X': x}
        self.outputs = {'Out': out}
        self.attrs = {
            'value': self.fill_value,
            'dtype': convert_np_dtype_to_dtype_(self.dtype)
        }
Exemplo n.º 17
0
    def init_config(self):
        self.shape = [123, 92]
        self.infer_shape = [-1, 92]

        dtype = 'float32'
        dtype_inner = convert_np_dtype_to_dtype_(dtype)

        shape_tensor_list = []
        for index, ele in enumerate(self.shape):
            shape_tensor_list.append(("x" + str(index), np.ones(
                (1)).astype('int32') * ele))

        self.inputs = {"ShapeTensorList": shape_tensor_list}
        self.attrs = {'shape': self.infer_shape, 'dtype': dtype_inner}
        self.outputs = {'Out': np.zeros(self.shape).astype(dtype)}
    def setUp(self):
        self.op_type = "fill_constant_batch_size_like"
        self.python_api = fill_constant_batch_size_like
        self.init_data()

        input = np.zeros(self.shape)
        out = np.full_like(input, self.value, self.dtype)

        self.inputs = {'Input': input}
        self.outputs = {'Out': out}
        self.attrs = {
            'shape': self.shape,
            'dtype': convert_np_dtype_to_dtype_(self.dtype),
            'value': self.value,
            'input_dim_idx': self.input_dim_idx,
            'output_dim_idx': self.output_dim_idx,
            'force_cpu': self.force_cpu
        }
Exemplo n.º 19
0
 def test_shape_with_none(self):
     tensor_spec = InputSpec([None, 4, None], dtype='int8', name='x_spec')
     self.assertEqual(tensor_spec.dtype, convert_np_dtype_to_dtype_('int8'))
     self.assertEqual(tensor_spec.name, 'x_spec')
     self.assertEqual(tensor_spec.shape, (-1, 4, -1))
Exemplo n.º 20
0
 def test_default(self):
     tensor_spec = InputSpec([3, 4])
     self.assertEqual(tensor_spec.dtype,
                      convert_np_dtype_to_dtype_('float32'))
     self.assertEqual(tensor_spec.name, None)
Exemplo n.º 21
0
def create_fake_model(program_config):
    '''  Create a Paddle model(in memory) according to the given config.  '''
    paddle.enable_static()
    main_program_desc = core.ProgramDesc()
    util_program = fluid.Program()
    main_block_desc = main_program_desc.block(0)

    var_desc = main_block_desc.var(cpt.to_bytes("feed"))
    var_desc.set_type(core.VarDesc.VarType.FEED_MINIBATCH)
    var_desc.set_persistable(True)

    index = 0
    for name, tensor_config in program_config.inputs.items():
        var_desc = main_block_desc.var(cpt.to_bytes(name))
        var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
        var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
        var_desc.set_shape(tensor_config.shape)
        var_desc.set_need_check_feed(True)
        if tensor_config.lod is not None:
            var_desc.set_lod_level(len(tensor_config.lod))
        op_desc = main_block_desc._prepend_op()
        op_desc.set_type("feed")
        op_desc.set_input('X', ["feed"])
        op_desc.set_output('Out', [name])
        op_desc._set_attr("col", index)
        index = index + 1

    save_var_map = {}
    for name, tensor_config in program_config.weights.items():
        var_desc = main_block_desc.var(cpt.to_bytes(name))
        var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
        var_desc.set_dtype(convert_np_dtype_to_dtype_(tensor_config.dtype))
        var_desc.set_shape(tensor_config.shape)
        var_desc.set_persistable(True)

        save_var_map[name] = util_program.global_block().create_parameter(
            dtype=tensor_config.dtype,
            shape=tensor_config.shape,
            type=core.VarDesc.VarType.LOD_TENSOR,
            name=name,
            initializer=NumpyArrayInitializer(tensor_config.data))
    in_vars = []
    for name in sorted(save_var_map.keys()):
        in_vars.append(save_var_map[name])

    out_var = util_program.global_block().create_var(
        type=core.VarDesc.VarType.RAW, name="out_var_0")
    out_var.desc.set_persistable(True)
    util_program.global_block().append_op(type='save_combine',
                                          inputs={'X': in_vars},
                                          outputs={'Y': out_var},
                                          attrs={
                                              'file_path': '',
                                              'save_to_memory': True
                                          })
    for op_config in program_config.ops:
        op_desc = main_block_desc.append_op()
        op_desc.set_type(op_config.type)
        for name, values in op_config.inputs.items():
            op_desc.set_input(name, values)
        for name, values in op_config.attrs.items():
            op_desc._set_attr(name, values)
        for name, values in op_config.outputs.items():
            op_desc.set_output(name, values)
            for v in values:
                var_desc = main_block_desc.var(cpt.to_bytes(v))
                var_desc.set_type(core.VarDesc.VarType.LOD_TENSOR)
                var_desc.set_dtype(convert_np_dtype_to_dtype_(np.float32))
                if op_config.outputs_dtype is not None and v in op_config.outputs_dtype.keys(
                ):
                    var_desc.set_dtype(
                        convert_np_dtype_to_dtype_(op_config.outputs_dtype[v]))

        op_desc.infer_var_type(main_block_desc)
        op_desc.infer_shape(main_block_desc)
        op_desc.check_attrs()

    for index, name in enumerate(program_config.outputs):
        var_desc = main_block_desc.var(cpt.to_bytes("fetch"))
        var_desc.set_type(core.VarDesc.VarType.FETCH_LIST)
        var_desc.set_need_check_feed(True)
        op_desc = main_block_desc.append_op()
        op_desc.set_type("fetch")
        op_desc.set_input('X', [name])
        op_desc.set_output('Out', ["fetch"])
        op_desc._set_attr("col", index)

    main_program_desc._set_version()
    paddle.fluid.core.save_op_version_info(main_program_desc)

    model = main_program_desc.serialize_to_string()

    util_program._sync_with_cpp()
    place = fluid.CPUPlace()
    executor = fluid.Executor(place)
    scope = fluid.Scope()
    with fluid.scope_guard(scope):
        executor.run(util_program)
        params = scope.find_var("out_var_0").get_bytes()
    return model, params
Exemplo n.º 22
0
    def __init__(self,
                 block,
                 type=core.VarDesc.VarType.LOD_TENSOR,
                 name=None,
                 shape=None,
                 dtype=None,
                 lod_level=None,
                 capacity=None,
                 persistable=None,
                 error_clip=None,
                 stop_gradient=False,
                 is_data=False,
                 need_check_feed=False,
                 belong_to_optimizer=False,
                 **kwargs):
        self.block = block
        if name is None:
            name = unique_name.generate('_generated_var')

        if dtype is not None:
            if not isinstance(dtype, core.VarDesc.VarType):
                dtype = convert_np_dtype_to_dtype_(dtype)

        self.belong_to_optimizer = belong_to_optimizer

        self.error_clip = error_clip

        is_new_var = False
        name = cpt.to_text(name)
        self.desc = self.block.desc.find_var(cpt.to_bytes(name))

        if self.desc is None:
            self.desc = self.block.desc.var(cpt.to_bytes(name))
            is_new_var = True

        if is_new_var:
            self.desc.set_type(type)
        elif self.desc.type() != type:
            raise ValueError("MpcVariable {0} has been created before. The "
                             "previous type is {1}; the new type is {2}. They"
                             " are not matched".format(self.name,
                                                       self.desc.type(), type))
        if shape is not None:
            if is_new_var:
                # resize the shape for MpcVariable
                mpc_shape = list(shape)
                mpc_shape.insert(0, 2)
                self.desc.set_shape(mpc_shape)
            else:
                old_shape = self.shape
                shape = tuple(shape)
                if shape != old_shape:
                    raise ValueError(
                        "MpcVariable {0} has been created before. the previous "
                        "shape is {1}; the new shape is {2}. They are not "
                        "matched.".format(self.name, old_shape, shape))
        if dtype is not None:
            if is_new_var:
                self.desc.set_dtype(dtype)
            else:
                old_dtype = self.dtype
                if dtype != old_dtype:
                    raise ValueError(
                        "MpcVariable {0} has been created before. "
                        "The previous data type is {1}; the new "
                        "data type is {2}. They are not "
                        "matched.".format(self.name, old_dtype, dtype))

        if lod_level is not None:
            if is_new_var:
                self.desc.set_lod_level(lod_level)
            else:
                if lod_level != self.lod_level:
                    raise ValueError(
                        "MpcVariable {0} has been created before. "
                        "The previous lod_level is {1}; the new "
                        "lod_level is {2}. They are not "
                        "matched".format(self.name, self.lod_level, lod_level))
        if persistable is not None:
            if is_new_var:
                self.desc.set_persistable(persistable)
            else:
                if persistable != self.persistable:
                    raise ValueError(
                        "MpcVariable {0} has been created before."
                        "The previous persistable is {1}; the new "
                        "persistable is {2}. They are not matched".format(
                            self.name, self.persistable, persistable))

        if need_check_feed and is_new_var:
            self.desc.set_need_check_feed(need_check_feed)

        if capacity is not None:
            if is_new_var:
                self.desc.set_capacity(capacity)
            else:
                # TODO(abhinavarora) by Paddle 1.7: Compare with set capacity once,
                # get_capacity is implemented
                pass

        self.block.vars[name] = self
        self.op = None
        self._stop_gradient = stop_gradient
        self.is_data = is_data
Exemplo n.º 23
0
Arquivo: nn.py Projeto: iducn/Paddle
def tdm_child(x, node_nums, child_nums, param_attr=None, dtype='int32'):
    """
    **Tdm Child**
     According to the input node_id on the given tree, return the corresponding child node_id and 
      whether child is a leaf node by leaf_mask value.
    .. code-block:: text

        Given:
            tree[[0], [1, 2], [3, 4], [5, 6]] # A binary tree with seven nodes
            x = [[2], [3]]
            node_nums = 7
            child_nums = 2

          we get:
            child = [[5, 6],
                     [0, 0]]
            leaf_mask = [[1, 1],
                         [0, 0]]
    Args:
        x(Variable): Variable contained the node_id information, dtype support int32/int64.
        node_nums(int): Number of total nodes.
        child_nums(int): Maximum number of child nodes per node.
        param_attr(ParamAttr): To specify the tdm-tree-info parameter property. Default: None, which means the
            default weight parameter property is used. See usage for details in: ref: `api_fluid_ParamAttr`, should
            has shape(node_nums, 3 + child_nums), dtype support int32/int64. 
            The dimension[1] of tdm-tree-info contains the following: 
            1. Item_id(int, shape(1)), if node is a leaf node, give its item_id corresponding to node_id, else give 0.
            2. Layer_id(int, shape(1)), indicates which layer the node is on.
            3. Parent_id(int, shape(1)), node's parent node.
            4. Child_id(int, shape(child_nums)), all child node's node_id of this node should be given. 
            If the number of child nodes is insufficient, padding 0 until child nums equal to child_nums
        dtype(str): The data type of output child and leaf_mask, support int32/int64.

    Returns:
        tuple: A tuple including input node's child(Variable) and leaf_mask(Variable). 
            If child is a leaf node, leaf_mask equal ot 1, otherwise equal to 0.

    Examples:
        .. code-block:: python
        import paddle.fluid as fluid
        import numpy as np
        x = fluid.data(name="x", shape=[None, 1], dtype="int32", lod_level=1)
        tree_info = [[0,0,0,1,2],
                     [0,1,0,3,4],[0,1,0,5,6],
                     [0,2,1,0,0],[1,2,1,0,0],[2,2,2,0,0],[3,2,2,0,0]]
        tree_info_np = np.array(tree_info)
        tree_info_np = np.reshape(tree_info_np, (7,5))
        node_nums = 7
        child_nums = 2
        child, leaf_mask  = fluid.contrib.layers.tdm_child(x, node_nums, child_nums,
                                param_attr=fluid.ParamAttr(
                                    initializer=fluid.initializer.NumpyArrayInitializer(
                                                                            tree_info_np)))
        place = fluid.CPUPlace()
        exe = fluid.Executor(place)
        exe.run(fluid.default_startup_program())
        xx = np.array([[2],[3]]).reshape((2,1)).astype("int32")
        child_res, leaf_mask_res = exe.run(feed={"x":xx}, fetch_list=[child, leaf_mask])
     """
    helper = LayerHelper("tdm_child", **locals())
    check_dtype(dtype, 'dtype', ['int32', 'int64'],
                'fluid.contrib.layers.tdm_child')
    c_dtype = convert_np_dtype_to_dtype_(dtype)
    tree_info = helper.create_parameter(attr=helper.param_attr,
                                        shape=[node_nums, 3 + child_nums],
                                        dtype=dtype,
                                        default_initializer=Constant(0))
    tree_info.stop_gradient = True

    child = helper.create_variable_for_type_inference(dtype=dtype)
    leaf_mask = helper.create_variable_for_type_inference(dtype=dtype)

    helper.append_op(type='tdm_child',
                     inputs={
                         'X': x,
                         'TreeInfo': tree_info
                     },
                     outputs={
                         'Child': child,
                         'LeafMask': leaf_mask
                     },
                     attrs={
                         'child_nums': child_nums,
                         'dtype': c_dtype
                     },
                     stop_gradient=True)
    return (child, leaf_mask)
Exemplo n.º 24
0
 def test_convert_np_dtype_to_dtype(self):
     self.assertEqual(convert_np_dtype_to_dtype_(np.complex64),
                      core.VarDesc.VarType.COMPLEX64)
     self.assertEqual(convert_np_dtype_to_dtype_(np.complex64),
                      core.VarDesc.VarType.COMPLEX64)