def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Id Variable
        ids_tensor = scope.var('Ids').get_tensor()
        ids_array = np.array([[0], [4], [3], [5]]).astype("int64")
        ids_tensor.set(ids_array, place)

        # create and initialize W Variable
        rows = [0, 1, 2, 3, 4, 5, 6]
        row_numel = 12

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(len(rows))
        w_selected_rows.set_rows(rows)
        w_array = np.ones((len(rows), row_numel)).astype("float32")
        for i in range(len(rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out')
        lookup_table.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)
        # all(): return True if all elements of the iterable are true (or if the iterable is empty)
        for idx, row in enumerate(ids_array):
            assert (row[0] == result_array[idx]).all()
Esempio n. 2
0
    def check_with_place(self, place, lazy_mode):
        scope = core.Scope()
        self.setup(scope, place, lazy_mode)

        op_args = dict()
        op_args['lazy_mode'] = lazy_mode
        for key, np_array in self.dense_inputs.items():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.init_output, place)
            op_args[s] = s
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.items():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])

            for i in range(np_array.size):
                self.assertLess((actual[i] - np_array[i]), 0.00001)
    def check_with_place(self, place):
        scope = core.Scope()
        x_rows = [0, 5, 5, 4, 19]
        height = 20
        row_numel = 2

        np_array = np.ones((len(x_rows), row_numel)).astype("float32")
        np_array[1, :] = 2.0
        np_array[2, :] = 3.0
        np_array[3, :] = 4.0

        # initialize input variable X
        x = scope.var('X').get_selected_rows()
        x.set_rows(x_rows)
        x.set_height(height)
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        # initialize input variable Out
        out = scope.var("Out").get_tensor()

        op = Operator("get_tensor_from_selected_rows", X="X", Out="Out")

        op.run(scope, place)

        out_array = np.array(out)
        self.assertEqual((5, 2), out_array.shape)
        assert (out_array == np_array).all()
Esempio n. 4
0
def create_op(scope, op_type, inputs, outputs, attrs):
    kwargs = dict()

    def __create_var__(name, var_name):
        scope.var(var_name).get_tensor()
        kwargs[name].append(var_name)

    for in_name, in_dup in Operator.get_op_inputs(op_type):
        if in_name in inputs:
            kwargs[in_name] = []
            if in_dup:
                sub_in = inputs[in_name]
                for item in sub_in:
                    sub_in_name, _ = item[0], item[1]
                    __create_var__(in_name, sub_in_name)
            else:
                __create_var__(in_name, in_name)

    for out_name, out_dup in Operator.get_op_outputs(op_type):
        if out_name in outputs:
            kwargs[out_name] = []
            if out_dup:
                sub_out = outputs[out_name]
                for item in sub_out:
                    sub_out_name, _ = item[0], item[1]
                    __create_var__(out_name, sub_out_name)
            else:
                __create_var__(out_name, out_name)

    for attr_name in Operator.get_op_attr_names(op_type):
        if attr_name in attrs:
            kwargs[attr_name] = attrs[attr_name]

    return Operator(op_type, **kwargs)
Esempio n. 5
0
    def check_with_place(self, place, inplace):
        scope = core.Scope()
        if inplace:
            self.create_lod_tensor(scope, place, "x1")
            self.create_selected_rows(scope, place, "x2", True)
            out = scope.var("x1").get_tensor()
            out_name = "x1"
        else:
            self.create_selected_rows(scope, place, "x1", True)
            self.create_lod_tensor(scope, place, "x2")
            out = scope.var("out").get_tensor()
            out_name = "out"

        # create and run sum operator
        sum_op = Operator("sum", X=["x1", "x2"], Out=out_name)
        sum_op.run(scope, place)

        result = np.ones((1, self.height)).astype(np.int32).tolist()[0]
        for ele in self.rows:
            result[ele] += 1

        out_t = np.array(out)
        self.assertEqual(out_t.shape[0], self.height)
        self.assertTrue(
            np.array_equal(
                out_t,
                self._get_array([i
                                 for i in range(self.height)], self.row_numel)
                * np.tile(
                    np.array(result).reshape(self.height, 1), self.row_numel)))
Esempio n. 6
0
    def check_with_place(self, place):
        scope = core.Scope()
        self.setup(scope, place)

        op_args = dict()
        for key, np_array in self.dense_inputs.items():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.outputs[s], place)
            op_args[s] = s
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.items():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])
            for idx, row_id in enumerate(self.rows):
                j = 0
                while j < self.row_numel:
                    pos = row_id * self.row_numel + j
                    self.assertLess(
                        (actual[pos] - np_array[pos]) / actual[pos], 0.00001)
                    j += 1
Esempio n. 7
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Variable
        feature_len = 12
        rows = [0, 4, 4, 7]
        np_array = np.ones((len(rows), feature_len)).astype("float32")

        in_x = scope.var('X').get_selected_rows()
        in_x.set_height(len(rows))
        in_x.set_rows(rows)
        in_x_tensor = in_x.get_tensor()
        in_x_tensor.set(np_array, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        extract_rows_op = Operator("extract_rows", X='X', Out='Out')
        extract_rows_op.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)
        result_array = [ele[0] for ele in result_array]
        assert result_array == rows
    def check_with_selected_rows(self, place):
        scope = core.Scope()
        x_rows = [0, 1, 5, 4, 19]
        x_height = 20
        row_numel = 2
        np_array = np.ones((len(x_rows), row_numel)).astype("float32")

        # initialize input variable
        x = scope.var('X').get_selected_rows()
        x.set_rows(x_rows)
        x.set_height(x_height)
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        # initialize the Out variable
        out = scope.var("Out").get_selected_rows()
        out_tensor = out.get_tensor()

        op = Operator("share_data", X="X", Out="Out")
        op.run(scope, place)

        out_height = out.height()
        out_rows = out.rows()
        self.assertTrue(np.allclose(np_array, out_tensor))
        self.assertEqual(x_height, out_height)
        self.assertEqual(x_rows, out_rows)
Esempio n. 9
0
    def check_with_place(self, place):
        scope = core.Scope()
        self.setup(scope, place)

        op_args = dict()
        for key, np_array in self.dense_inputs.iteritems():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.outputs[s], place)
            op_args[s] = s
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.iteritems():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])
            for idx, row_id in enumerate(self.rows):
                j = 0
                while j < self.row_numel:
                    pos = row_id * self.row_numel + j
                    self.assertLess((actual[pos] - np_array[pos]) / actual[pos],
                                    0.00001)
                    j += 1
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Grad Variable
        height = 10
        rows = [0, 4, 7]
        self.row_numel = 12

        x_selected_rows = scope.var('X1').get_selected_rows()
        x_selected_rows.set_height(height)
        x_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), self.row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        x_tensor = x_selected_rows.get_tensor()
        x_tensor.set(np_array, place)

        out_selected_rows = scope.var('Out1').get_selected_rows()
        # create and run sqrt operator
        sqrt_op = Operator("sqrt", X='X1', Out='Out1')
        sqrt_op.run(scope, place)

        # get and compare result
        result_array = np.array(out_selected_rows.get_tensor())
        self.assertTrue(np.allclose(result_array, np.sqrt(np_array)))
Esempio n. 11
0
    def check_with_place(self, place, in_name, out_name):
        scope = core.Scope()

        # create and initialize Grad Variable
        in_height = 10
        in_rows = [0, 4, 7]
        in_row_numel = 12
        scale = 2.0

        in_selected_rows = scope.var(in_name).get_selected_rows()
        in_selected_rows.set_height(in_height)
        in_selected_rows.set_rows(in_rows)
        in_array = np.random.random(
            (len(in_rows), in_row_numel)).astype("float32")

        in_tensor = in_selected_rows.get_tensor()
        in_tensor.set(in_array, place)

        # create and initialize Param Variable
        out_selected_rows = scope.var(out_name).get_selected_rows()
        out_tensor = out_selected_rows.get_tensor()
        out_tensor._set_dims(in_tensor._get_dims())

        # create and run sgd operator
        scale_op = Operator("scale", X=in_name, Out=out_name, scale=scale)
        scale_op.run(scope, place)

        # get and compare result
        out_height = out_selected_rows.height()
        out_rows = out_selected_rows.rows()
        result_array = np.array(out_tensor)

        assert (in_array * scale == result_array).all()
        assert in_height == out_height
        assert in_rows == out_rows
Esempio n. 12
0
 def test_run(self):
     op = Operator(
         'beam_search',
         pre_ids='pre_ids',
         pre_scores='pre_scores',
         ids='ids',
         scores='scores',
         selected_ids='selected_ids',
         selected_scores='selected_scores',
         parent_idx='parent_idx',
         level=0,
         beam_size=2,
         end_id=0,
     )
     op.run(self.scope, core.CPUPlace())
     selected_ids = self.scope.find_var("selected_ids").get_tensor()
     selected_scores = self.scope.find_var("selected_scores").get_tensor()
     parent_idx = self.scope.find_var("parent_idx").get_tensor()
     self.assertTrue(
         np.allclose(np.array(selected_ids),
                     np.array([4, 2, 3, 8])[:, np.newaxis]))
     self.assertTrue(
         np.allclose(np.array(selected_scores),
                     np.array([0.5, 0.6, 0.9, 0.7])[:, np.newaxis]))
     self.assertEqual(selected_ids.lod(), [[0, 2, 4], [0, 1, 2, 3, 4]])
     self.assertTrue(
         np.allclose(np.array(parent_idx), np.array([0, 1, 2, 3])))
    def check_with_place(self, place):
        scope = core.Scope()

        rows = [0, 1, 2, 3, 4, 5, 6]
        row_numel = 7

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(len(rows))
        w_selected_rows.set_rows(rows)
        w_array = np.ones((len(rows), row_numel)).astype("float32")
        for i in range(len(rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create and initialize Id Variable
        ids = scope.var("Ids").get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator("lookup_sparse_table_grad_split",
                                Grad='W',
                                Row={'Ids'},
                                Value={'W'},
                                is_entry=False,
                                tablename="sparse")
        lookup_table.run(scope, place)

        # get result from Out
        result_array1 = np.array(ids)
        print(result_array1)
        print("== = = == == = == ==== ==== === ")
        value = scope.var("W").get_tensor()
        result_array1 = np.array(value)
        print(result_array1.shape)
        print(result_array1)
Esempio n. 14
0
    def check_with_place(self, place):
        scope = core.Scope()

        row_width = 12
        # create and initialize Grad Variable
        grad_height = 10
        grad_rows = [0, 4, 7]

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(grad_height)
        grad_selected_rows.set_rows(grad_rows)
        grad_array = np.ones((len(grad_rows), row_width)).astype("float32")
        grad_array[0, 0] = 2.0
        grad_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_array, place)

        # create and initialize Param Variable
        # create and initialize W Variable
        param_rows = [0, 1, 2, 3, 4, 5, 6, 7]

        # init Param
        w_selected_rows = scope.var('Param').get_selected_rows()
        w_selected_rows.set_height(len(param_rows))
        w_selected_rows.set_rows(param_rows)
        w_selected_rows.sync_index()
        w_array = np.ones((len(param_rows), row_width)).astype("float32")
        for i in range(len(param_rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        w_before_optimize = np.array(w_tensor)

        # create and initialize LeraningRate Variable
        lr_value = 0.1
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), lr_value).astype("float32")
        lr.set(lr_array, place)

        # optimize with Python
        w_after_optimize = np.copy(w_before_optimize)
        for index, id in enumerate(grad_rows):
            w_after_optimize[id] = w_before_optimize[
                id] - lr_value * grad_array[index]

        # create and run sgd operator
        sgd_op = Operator(
            "sgd",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            LearningRate='LearningRate')
        sgd_op.run(scope, place)

        # get and compare result
        result_array = np.array(w_tensor)
        assert (result_array == w_after_optimize).all()
Esempio n. 15
0
    def check_sgd_step(self, place):
        self.setup(place=place, step=15.0)

        dgc_momentum_op = Operator(self.op_type, **self.kwargs)
        dgc_momentum_op.run(self.scope, self.place)

        self.check(np.array(self.param_tensor), self.outputs['SGDOut'],
                   self.place, self.param_name)
Esempio n. 16
0
    def check_with_place(self, place):
        scope = core.Scope()

        row_width = 12
        # create and initialize Grad Variable
        grad_height = 10
        grad_rows = [0, 4, 7]

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(grad_height)
        grad_selected_rows.set_rows(grad_rows)
        grad_array = np.ones((len(grad_rows), row_width)).astype("float32")
        grad_array[0, 0] = 2.0
        grad_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_array, place)

        # create and initialize Param Variable
        # create and initialize W Variable
        param_rows = [0, 1, 2, 3, 4, 5, 6, 7]

        # init Param
        w_selected_rows = scope.var('Param').get_selected_rows()
        w_selected_rows.set_height(len(param_rows))
        w_selected_rows.set_rows(param_rows)
        w_array = np.ones((len(param_rows), row_width)).astype("float32")
        for i in range(len(param_rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        w_before_optimize = np.array(w_tensor)

        # create and initialize LeraningRate Variable
        lr_value = 0.1
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), lr_value).astype("float32")
        lr.set(lr_array, place)

        # optimize with Python
        w_after_optimize = np.copy(w_before_optimize)
        for index, id in enumerate(grad_rows):
            w_after_optimize[id] = w_before_optimize[
                id] - lr_value * grad_array[index]

        # create and run sgd operator
        sgd_op = Operator(
            "sgd",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            LearningRate='LearningRate')
        sgd_op.run(scope, place)

        # get and compare result
        result_array = np.array(w_tensor)
        assert (result_array == w_after_optimize).all()
Esempio n. 17
0
    def test_get_set(self):
        ids = self.scope.var("ids").get_lod_tensor_array()
        scores = self.scope.var("scores").get_lod_tensor_array()
        # Construct sample data with 5 steps and 2 source sentences
        # beam_size = 2, end_id = 1
        # start with start_id
        [
            self.append_lod_tensor(array, [[0, 1, 2], [0, 1, 2]],
                                   np.array([0, 0], dtype=dtype))
            for array, dtype in ((ids, "int64"), (scores, "float32"))
        ]
        [
            self.append_lod_tensor(array, [[0, 1, 2], [0, 2, 4]],
                                   np.array([2, 3, 4, 5], dtype=dtype))
            for array, dtype in ((ids, "int64"), (scores, "float32"))
        ]
        [
            self.append_lod_tensor(array, [[0, 2, 4], [0, 2, 2, 4, 4]],
                                   np.array([3, 1, 5, 4], dtype=dtype))
            for array, dtype in ((ids, "int64"), (scores, "float32"))
        ]
        [
            self.append_lod_tensor(array, [[0, 2, 4], [0, 1, 2, 3, 4]],
                                   np.array([1, 1, 3, 5], dtype=dtype))
            for array, dtype in ((ids, "int64"), (scores, "float32"))
        ]
        [
            self.append_lod_tensor(array, [[0, 2, 4], [0, 0, 0, 2, 2]],
                                   np.array([5, 1], dtype=dtype))
            for array, dtype in ((ids, "int64"), (scores, "float32"))
        ]

        sentence_ids = self.scope.var("sentence_ids").get_tensor()
        sentence_scores = self.scope.var("sentence_scores").get_tensor()

        beam_search_decode_op = Operator(
            "beam_search_decode",
            # inputs
            Ids="ids",
            Scores="scores",
            # outputs
            SentenceIds="sentence_ids",
            SentenceScores="sentence_scores",
            beam_size=2,
            end_id=1,
        )

        beam_search_decode_op.run(self.scope, self.place)

        expected_lod = [[0, 2, 4], [0, 4, 7, 12, 17]]
        self.assertEqual(sentence_ids.lod(), expected_lod)
        self.assertEqual(sentence_scores.lod(), expected_lod)

        expected_data = np.array(
            [0, 2, 3, 1, 0, 2, 1, 0, 4, 5, 3, 5, 0, 4, 5, 3, 1], "int64")
        self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))
        self.assertTrue(
            np.array_equal(np.array(sentence_scores), expected_data))
Esempio n. 18
0
    def test_run_and_check(self):
        self.setup(place=core.CUDAPlace(0))
        kwargs = {
            # inputs
            'U': self.u_name,
            'V': self.v_name,
            'Grad': self.grad_name,
            'Param': self.param_name,
            'current_step': self.current_step_name,
            'nranks': self.nranks_name,

            # outputs
            'U_out': self.u_name,
            'V_out': self.v_name,
            'EncodeGrad': self.encode_grad_name,
            'Grad_out': self.grad_name,
            'k': self.k_name,
            'GatherBuff': self.gather_buff_name,

            # attrs
            'm': 0.9,
            'sparsity': [0.75, 0.9375, 0.984375, 0.996, 0.999],
            'use_nesterov': True,
            'rampup_begin_step': float(0.0),
            'rampup_step': float(10.0),
            'regular_coeff': float(1e-4),
            'regular_type': int(2),
        }

        dgc_op = Operator('dgc', **kwargs)

        #atol = 1e-6
        dgc_op.run(self.scope, self.place)

        u_out = np.array(self.u_tensor)
        v_out = np.array(self.v_tensor)
        grad_out = np.array(self.grad_tensor)
        encode_grad_out = np.array(self.encode_grad_tensor)
        k = int(np.array(self.k_tensor)[0])

        print("u_out:", u_out[0:20])
        print("v_out:", v_out[0:20])
        print("encode_grad_out:", encode_grad_out)
        print("k_out:", k)

        self.assertEqual(k, int(g_array_size * 0.25))

        index = encode_grad_out[0:k].view(dtype=np.int32)
        value = encode_grad_out[k:2 * k]

        acl = 1e-7

        for i in range(0, k):
            self.assertAlmostEqual(u_out[index[i]], 0.0)
            self.assertAlmostEqual(v_out[index[i]], 0.0)

        a_min = np.amin(value)
        dangling = [x for x in v_out if x > a_min]
Esempio n. 19
0
    def check_momentum_step(self, place):
        self.setup(place=place)

        dgc_momentum_op = Operator(self.op_type, **self.kwargs)
        dgc_momentum_op.run(self.scope, self.place)

        self.check(np.array(self.param_tensor), self.outputs['ParamOut'],
                   self.place, self.param_name)

        self.check(np.array(self.velocity_tensor), self.outputs['VelocityOut'],
                   self.place, self.velocity_name)
Esempio n. 20
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Grad Variable   
        height = 10
        rows = [0, 4, 7]
        row_numel = 12

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array, place)

        # create and initialize Param Variable
        param = scope.var('Param').get_tensor()
        param_array = np.full((height, row_numel), 5.0).astype("float32")
        param.set(param_array, place)

        # create and initialize LeraningRate Variable
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and run sgd operator
        sgd_op = Operator(
            "sgd",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            LearningRate='LearningRate')
        sgd_op.run(scope, place)

        # get and compare result
        result_array = np.array(param)

        # rows[0] = 0, 5.0 - 2.0 * 2.0
        self.assertAlmostEqual(1.0, result_array[rows[0], 0])
        # rows[0] = 0, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[0], 2])
        # 5.0 - 2.0 * 0.0
        self.assertAlmostEqual(5.0, result_array[1, 0])
        # rows[1] = 4, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[1], 10])
        # 5.0 - 2.0 * 0.0
        self.assertAlmostEqual(5.0, result_array[5, 8])
        # rows[2] = 7, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[2], 1])
        # rows[2] = 7, 5.0 - 2.0 * 4.0
        self.assertAlmostEqual(-3.0, result_array[rows[2], 8])
Esempio n. 21
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Grad Variable   
        height = 10
        rows = [0, 4, 7]
        self.conf()

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), self.row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array, place)

        # create and initialize Param Variable
        param = scope.var('Param').get_tensor()
        param_array = np.full((height, self.row_numel), 5.0).astype("float32")
        param.set(param_array, place)

        # create and initialize LeraningRate Variable
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and run sgd operator
        sgd_op = Operator(
            "sgd",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            LearningRate='LearningRate')
        sgd_op.run(scope, place)

        # get and compare result
        result_array = np.array(param)

        # rows[0] = 0, 5.0 - 2.0 * 2.0
        self.assertAlmostEqual(1.0, result_array[rows[0], 0])
        # rows[0] = 0, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[0], 2])
        # 5.0 - 2.0 * 0.0
        self.assertAlmostEqual(5.0, result_array[1, 0])
        # rows[1] = 4, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[1], 10])
        # 5.0 - 2.0 * 0.0
        self.assertAlmostEqual(5.0, result_array[5, 8])
        # rows[2] = 7, 5.0 - 2.0 * 1.0
        self.assertAlmostEqual(3.0, result_array[rows[2], 1])
        # rows[2] = 7, 5.0 - 2.0 * 4.0
        self.assertAlmostEqual(-3.0, result_array[rows[2], 8])
Esempio n. 22
0
    def check_with_place(self, place):
        scope = core.Scope()
        self.prepare_input(scope, place)

        out_selected_rows = self.create_out_selected_row(scope)
        out_selected_rows.set_height(0)
        out_selected_rows.set_rows([])

        elementwise_mul = Operator("elementwise_mul", X='X', Y='Y', Out='Out')
        elementwise_mul.run(scope, place)
        self.check_result(out_selected_rows)
Esempio n. 23
0
    def check_with_tensor(self, place):
        scope = core.Scope()
        np_array = np.random.rand(2, 3, 5).astype("float32")

        # initialize input and output variable
        x = scope.var('X').get_tensor()
        x.set(np_array, place)
        out = scope.var("Out").get_tensor()

        op = Operator("share_data", X="X", Out="Out")
        op.run(scope, place)
        self.assertTrue(np.allclose(np_array, out))
    def check_with_place(self, place):
        scope = core.Scope()
        rows = [0, 5, 7, 4, 20]
        height = 20
        row_numel = 2

        # initialize input variable X
        x = scope.var('X').get_selected_rows()
        x.set_rows(rows)
        x.set_height(height)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 1] = 4.0
        np_array[4, 1] = 8.0
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        height_sections = [5, 5, 5, 5, 3]

        # initialize output variables [out0, out1]
        outs_name = ["out%d" % i for i in range(len(height_sections))]
        outs = [
            scope.var(var_name).get_selected_rows() for var_name in outs_name
        ]

        # expected output selected rows
        expected_out0_rows = [0, 4]
        expected_out1_rows = [0, 2]
        expected_out2_rows = []
        expected_out4_rows = [0]

        op = Operator(
            "split_selected_rows",
            X="X",
            Out=outs_name,
            height_sections=height_sections)

        op.run(scope, place)

        self.assertEqual(outs[0].rows(), expected_out0_rows)
        self.assertEqual(outs[1].rows(), expected_out1_rows)
        self.assertEqual(outs[2].rows(), expected_out2_rows)
        self.assertEqual(outs[4].rows(), expected_out4_rows)

        self.assertEqual(outs[0].height(), height_sections[0])
        self.assertEqual(outs[4].height(), height_sections[4])

        self.assertAlmostEqual(2.0, np.array(outs[0].get_tensor())[0, 0])
        self.assertAlmostEqual(4.0, np.array(outs[1].get_tensor())[1, 1])
        self.assertAlmostEqual(8.0, np.array(outs[4].get_tensor())[0, 1])

        self.assertEqual(outs[2].numel(), 0)
        self.assertEqual(outs[3].numel(), 0)
    def test_get_set(self):
        ids = self.scope.var("ids").get_lod_tensor_array()
        self.append_lod_tensor(
            ids, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],
            np.array(
                [1, 2, 3, 4, 5, 6], dtype="int64"))
        self.append_lod_tensor(
            ids, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],
            np.array(
                [0, 1, 2, 3, 4, 5], dtype="int64"))
        self.append_lod_tensor(
            ids, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],
            np.array(
                [0, 1, 2, 3, 4], dtype="int64"))

        scores = self.scope.var("scores").get_lod_tensor_array()
        self.append_lod_tensor(
            scores, [[0, 3, 6], [0, 1, 2, 3, 4, 5, 6]],
            np.array(
                [1, 2, 3, 4, 5, 6], dtype="float64"))
        self.append_lod_tensor(
            scores, [[0, 3, 6], [0, 1, 1, 3, 5, 5, 6]],
            np.array(
                [0, 1, 2, 3, 4, 5], dtype="float64"))
        self.append_lod_tensor(
            scores, [[0, 3, 6], [0, 0, 1, 2, 3, 4, 5]],
            np.array(
                [0, 1, 2, 3, 4], dtype="float64"))

        sentence_ids = self.scope.var("sentence_ids").get_tensor()
        sentence_scores = self.scope.var("sentence_scores").get_tensor()

        beam_search_decode_op = Operator(
            "beam_search_decode",
            # inputs
            Ids="ids",
            Scores="scores",
            # outputs
            SentenceIds="sentence_ids",
            SentenceScores="sentence_scores")

        beam_search_decode_op.run(self.scope, self.place)

        expected_lod = [[0, 4, 8], [0, 1, 3, 6, 9, 10, 13, 16, 19]]
        self.assertEqual(sentence_ids.lod(), expected_lod)
        self.assertEqual(sentence_scores.lod(), expected_lod)

        expected_data = np.array(
            [2, 1, 0, 3, 1, 0, 3, 2, 1, 5, 4, 3, 2, 4, 4, 3, 6, 5, 4], "int64")
        self.assertTrue(np.array_equal(np.array(sentence_ids), expected_data))
        self.assertTrue(
            np.array_equal(np.array(sentence_scores), expected_data))
Esempio n. 26
0
    def check_with_place(self, place):
        scope = core.Scope()
        condition = scope.var('Condition').get_tensor()
        condition.set(self.cond_data, place)

        out = scope.var("Out").get_tensor()
        out.set(np.full(self.shape, 0).astype('int64'), place)

        op = Operator("where_index", Condition="Condition", Out="Out")
        op.run(scope, place)

        out_array = np.array(out)
        self.assertTrue((out_array == self.out_data).all())
    def test_check_output(self):
        self.prepare_ids()
        self.prepare_w()
        out_tensor = self.scope.var('Out').get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator(self.op_type, W='W', Ids='Ids', Out='Out')
        lookup_table.run(self.scope, self.place)

        # get result from Out
        result_array = np.array(out_tensor)
        ref = _lookup(self.w_fp32, self.ids, self.flat_ids, self.op_type)
        self._check_output(ref, result_array)
Esempio n. 28
0
    def run_and_check(self):
        grad_name = self.grad_sr_name if self.is_sparse else self.grad_name

        kwargs = {
            'Param': self.param_name,
            'Grad': grad_name,
            'MeanSquare': self.mean_square_name,
            'Moment': self.moment_name,
            'LearningRate': self.lr_name,
            'ParamOut': self.param_name,
            'MeanSquareOut': self.mean_square_name,
            'MomentOut': self.moment_name,
            'epsilon': self.epsilon,
            'decay': self.decay,
            'momentum': self.momentum,
            'centered': self.centered
        }

        if self.centered:
            kwargs['MeanGrad'] = self.mean_grad_name
            kwargs['MeanGradOut'] = self.mean_grad_name

        rmsprop_op = Operator('rmsprop', **kwargs)
        atol = 1e-6

        rmsprop_op.run(self.scope, self.place)

        self.check(
            np.array(self.mean_square_tensor),
            self.ms_out,
            self.place,
            self.mean_square_name,
            atol=atol)
        self.check(
            np.array(self.moment_tensor),
            self.moment_out,
            self.place,
            self.moment_name,
            atol=atol)
        self.check(
            np.array(self.param_tensor),
            self.param_out,
            self.place,
            self.param_name,
            atol=atol)

        if self.centered:
            self.check(
                np.array(self.mean_grad_tensor), self.mg_out, self.place,
                self.mean_grad_name)
 def check_with_place(self, place):
     scope = core.Scope()
     out = scope.var("X").get_selected_rows()
     paddle.seed(10)
     op = Operator("uniform_random",
                   Out="X",
                   shape=[1000, 784],
                   min=-5.0,
                   max=10.0,
                   seed=10)
     op.run(scope, place)
     self.assertEqual(out.get_tensor().shape(), [1000, 784])
     hist, prob = output_hist(np.array(out.get_tensor()))
     self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01),
                     "hist: " + str(hist))
Esempio n. 30
0
 def test_run(self):
     op = Operator(
         'beam_search',
         pre_ids="pre_ids",
         ids='ids',
         scores='scores',
         selected_ids='selected_ids',
         selected_scores='selected_scores',
         level=0,
         beam_size=2,
         end_id=0, )
     op.run(self.scope, core.CPUPlace())
     selected_ids = self.scope.find_var("selected_ids").get_tensor()
     print 'selected_ids', np.array(selected_ids)
     print 'lod', selected_ids.lod()
Esempio n. 31
0
    def check_with_place(self, place):
        scope = core.Scope()
        # create Out Variable
        out = scope.var('Out').get_selected_rows()

        # create and run fill_constant_op operator
        fill_constant_op = Operator(
            "fill_constant", shape=[123, 92], value=3.8, Out='Out')
        fill_constant_op.run(scope, place)

        # get result from Out
        result_array = np.array(out.get_tensor())
        full_array = np.full((123, 92), 3.8, 'float32')

        self.assertTrue(np.array_equal(result_array, full_array))
    def check_with_place(self, place):
        scope = core.Scope()
        rows = [0, 5, 7, 4, 20]
        height = 20
        row_numel = 2

        # initialize input variable X
        x = scope.var('X').get_selected_rows()
        x.set_rows(rows)
        x.set_height(height)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 1] = 4.0
        np_array[4, 1] = 8.0
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        height_sections = [5, 5, 5, 5, 3]

        # initialize output variables [out0, out1]
        outs_name = ["out%d" % i for i in xrange(len(height_sections))]
        outs = [
            scope.var(var_name).get_selected_rows() for var_name in outs_name
        ]

        # expected output selected rows
        expected_out0_rows = [0, 4]
        expected_out1_rows = [0, 2]
        expected_out4_rows = [0]

        op = Operator(
            "split_selected_rows",
            X="X",
            Out=outs_name,
            height_sections=height_sections)

        op.run(scope, place)

        self.assertEqual(outs[0].rows(), expected_out0_rows)
        self.assertEqual(outs[1].rows(), expected_out1_rows)
        self.assertEqual(outs[4].rows(), expected_out4_rows)

        self.assertEqual(outs[0].height(), height_sections[0])
        self.assertEqual(outs[4].height(), height_sections[4])

        self.assertAlmostEqual(2.0, np.array(outs[0].get_tensor())[0, 0])
        self.assertAlmostEqual(4.0, np.array(outs[1].get_tensor())[1, 1])
        self.assertAlmostEqual(8.0, np.array(outs[4].get_tensor())[0, 1])
Esempio n. 33
0
def set_input(scope, op, inputs, place):
    def np_value_to_fluid_value(input):
        if input.dtype == np.float16:
            input = input.view(np.uint16)
        return input

    def __set_input__(var_name, var):
        if isinstance(var, tuple) or isinstance(var, np.ndarray):
            tensor = scope.find_var(var_name).get_tensor()
            if isinstance(var, tuple):
                tensor.set_recursive_sequence_lengths(var[1])
                var = var[0]
            tensor._set_dims(var.shape)
            tensor.set(np_value_to_fluid_value(var), place)
        elif isinstance(var, float):
            scope.find_var(var_name).set_float(var)
        elif isinstance(var, int):
            scope.find_var(var_name).set_int(var)

    for in_name, in_dup in Operator.get_op_inputs(op.type()):
        if in_name in inputs:
            if in_dup:
                sub_in = inputs[in_name]
                for item in sub_in:
                    sub_in_name, sub_in_val = item[0], item[1]
                    __set_input__(sub_in_name, sub_in_val)
            else:
                __set_input__(in_name, inputs[in_name])
Esempio n. 34
0
 def test_run(self):
     op = Operator(
         'beam_search',
         pre_ids="pre_ids",
         ids='ids',
         scores='scores',
         selected_ids='selected_ids',
         selected_scores='selected_scores',
         level=0,
         beam_size=2,
         end_id=0,
     )
     op.run(self.scope, core.CPUPlace())
     selected_ids = self.scope.find_var("selected_ids").get_tensor()
     print 'selected_ids', np.array(selected_ids)
     print 'lod', selected_ids.lod()
    def check_with_place(self, place):
        scope = core.Scope()
        ids_array = self.prepare_ids(scope, place)

        self.prepare_w(scope, place)

        out_tensor = self.create_out_tensor(scope, place)

        # create and run lookup_table_v2 operator
        lookup_table = Operator("lookup_table_v2", W='W', Ids='Ids', Out='Out')
        lookup_table.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)

        self.check_result(ids_array, result_array)
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Id Variable
        ids = scope.var("Ids").get_tensor()
        ids_array = np.array([0, 2, 3, 5, 100]).astype("int64")
        ids.set(ids_array, place)

        # create and initialize W Variable
        rows = [0, 1, 2, 3, 4, 5, 6]
        row_numel = 10000

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(len(rows))
        w_selected_rows.set_rows(rows)
        w_array = np.ones((len(rows), row_numel)).astype("float32")
        for i in range(len(rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator(
            "lookup_sparse_table",
            W='W',
            Ids='Ids',
            Out='Out',
            min=-5.0,
            max=10.0,
            seed=10)
        lookup_table.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)
        # all(): return True if all elements of the iterable are true (or if the iterable is empty)
        for idx, row in enumerate(ids_array[:-2]):
            assert (row == result_array[idx]).all()

        # check the random value
        hist, prob = output_hist(result_array[-1])
        self.assertTrue(
            np.allclose(
                hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
Esempio n. 37
0
    def check_with_place(self, place):
        scope = core.Scope()
        out = scope.var("X").get_selected_rows()

        op = Operator(
            "uniform_random",
            Out="X",
            shape=[4, 784],
            min=-5.0,
            max=10.0,
            seed=10)
        op.run(scope, place)
        self.assertEqual(out.get_tensor().shape(), [4, 784])
        hist, prob = output_hist(np.array(out.get_tensor()))
        self.assertTrue(
            np.allclose(
                hist, prob, rtol=0, atol=0.01), "hist: " + str(hist))
Esempio n. 38
0
    def check_with_place(self, place):
        scope = core.Scope()
        out = scope.var("X").get_selected_rows()
        shape_tensor = scope.var("Shape").get_tensor()
        shape_tensor.set(np.array([4, 784]).astype("int64"), place)

        op = Operator("uniform_random",
                      ShapeTensor="Shape",
                      Out="X",
                      min=-5.0,
                      max=10.0,
                      seed=10)
        op.run(scope, place)
        self.assertEqual(out.get_tensor().shape(), [4, 784])
        hist, prob = output_hist(np.array(out.get_tensor()))
        self.assertTrue(np.allclose(hist, prob, rtol=0, atol=0.01),
                        "hist: " + str(hist))
    def check_grad_with_place(self, place):
        scope = core.Scope()
        height = 10
        row_numel = 2

        # attr
        height_sections = [5, 5]

        # initialize input variable X
        out0_grad = scope.var("out0@GRAD").get_selected_rows()
        rows0 = [0, 5]
        out0_grad.set_rows(rows0)
        out0_grad.set_height(height)
        out0_grad_tensor = out0_grad.get_tensor()
        np_array = np.ones((len(rows0), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        out0_grad_tensor.set(np_array, place)

        out1_grad = scope.var("out1@GRAD").get_selected_rows()
        rows1 = [2, 0]
        out1_grad.set_rows(rows1)
        out1_grad.set_height(height)
        out1_grad_tensor = out1_grad.get_tensor()
        np_array = np.ones((len(rows1), row_numel)).astype("float32")
        np_array[0, 1] = 4.0
        out1_grad_tensor.set(np_array, place)

        x_grad = scope.var("X@GRAD").get_selected_rows()

        grad_op = Operator(
            "sum",
            X=["out0@GRAD", "out1@GRAD"],
            Out="X@GRAD",
            height_sections=height_sections)

        grad_op.run(scope, place)

        self.assertEqual(x_grad.rows(), rows0 + rows1)
        self.assertEqual(x_grad.height(), height)

        self.assertAlmostEqual(2.0, np.array(x_grad.get_tensor())[0, 0])
        self.assertAlmostEqual(4.0, np.array(x_grad.get_tensor())[2, 1])
Esempio n. 40
0
def create_op(scope, op_type, inputs, outputs, attrs):
    kwargs = dict()

    op_maker = core.op_proto_and_checker_maker
    op_role_attr_name = op_maker.kOpRoleAttrName()

    if op_role_attr_name not in attrs:
        attrs[op_role_attr_name] = int(op_maker.OpRole.Forward)

    def __create_var__(name, var_name):
        scope.var(var_name).get_tensor()
        kwargs[name].append(var_name)

    for in_name, in_dup in Operator.get_op_inputs(op_type):
        if in_name in inputs:
            kwargs[in_name] = []
            if in_dup:
                sub_in = inputs[in_name]
                for item in sub_in:
                    sub_in_name, _ = item[0], item[1]
                    __create_var__(in_name, sub_in_name)
            else:
                __create_var__(in_name, in_name)

    for out_name, out_dup in Operator.get_op_outputs(op_type):
        if out_name in outputs:
            kwargs[out_name] = []
            if out_dup:
                sub_out = outputs[out_name]
                for item in sub_out:
                    sub_out_name, _ = item[0], item[1]
                    __create_var__(out_name, sub_out_name)
            else:
                __create_var__(out_name, out_name)

    for attr_name in Operator.get_op_attr_names(op_type):
        if attr_name in attrs:
            kwargs[attr_name] = attrs[attr_name]

    return Operator(op_type, **kwargs)
Esempio n. 41
0
    def check_output_with_place(self, place, atol):
        outs, fetch_list = self._calc_output(place)
        for out_name, out_dup in Operator.get_op_outputs(self.op_type):
            if out_name not in self.outputs:
                continue

            def find_actual(target_name, fetch_list):
                found = [
                    i for i, var in enumerate(fetch_list)
                    if var.name == target_name
                ]
                self.assertTrue(
                    len(found) == 1, "Found {} {}".format(
                        len(found), target_name))
                return found[0]

            if out_dup:
                sub_out = self.outputs[out_name]
                if not isinstance(sub_out, list):
                    raise AssertionError("sub_out type %s is not list",
                                         type(sub_out))
                for item in sub_out:
                    sub_out_name, expect = item[0], item[1]
                    idx = find_actual(sub_out_name, fetch_list)
                    actual = outs[idx]
                    actual_t = np.array(actual)
                    expect_t = expect[0] \
                        if isinstance(expect, tuple) else expect
                    self.assertTrue(
                        np.allclose(
                            actual_t, expect_t, atol=atol),
                        "Output (" + sub_out_name + ") has diff at " +
                        str(place))
                    if isinstance(expect, tuple):
                        self.assertListEqual(
                            actual.lod(), expect[1], "Output (" + sub_out_name +
                            ") has different lod at " + str(place))
            else:
                idx = find_actual(out_name, fetch_list)
                actual = outs[idx]
                actual_t = np.array(actual)
                expect = self.outputs[out_name]
                expect_t = expect[0] if isinstance(expect, tuple) else expect
                self.assertTrue(
                    np.allclose(
                        actual_t, expect_t, atol=atol),
                    "Output (" + out_name + ") has diff at " + str(place) +
                    str(actual_t) + "\n" + str(expect_t))
                if isinstance(expect, tuple):
                    self.assertListEqual(actual.lod(), expect[1],
                                         "Output (" + out_name +
                                         ") has different lod at " + str(place))
Esempio n. 42
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Variable
        height = 10
        rows = [0, 4, 4, 7]
        row_numel = 12

        # create and initialize W Variable
        W = scope.var('W').get_tensor()
        W_array = np.full((height, row_numel), 1.0).astype("float32")
        for i in range(height):
            W_array[i] *= i
        W.set(W_array, place)

        # create and initialize Ids Variable
        ids_selected_rows = scope.var('Ids').get_selected_rows()
        ids_selected_rows.set_height(len(rows))
        ids_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        ids_tensor = ids_selected_rows.get_tensor()
        ids_tensor.set(np_array, place)

        # create Out Variable
        Out = scope.var('Out').get_selected_rows()

        # create and run lookup_table operator
        concat_rows_op = Operator("lookup_table", W='W', Ids='Ids', Out='Out')
        concat_rows_op.run(scope, place)

        # get result from Out
        Out_tensor = Out.get_tensor()
        result_array = np.array(Out_tensor)

        # all(): return True if all elements of the iterable are true (or if the iterable is empty)
        for idx, row in enumerate(rows):
            assert (row == result_array[idx]).all()
Esempio n. 43
0
def set_input(scope, op, inputs, place):
    def __set_input__(var_name, var):
        if isinstance(var, tuple) or isinstance(var, np.ndarray):
            tensor = scope.find_var(var_name).get_tensor()
            if isinstance(var, tuple):
                tensor.set_lod(var[1])
                var = var[0]
            tensor.set_dims(var.shape)
            tensor.set(var, place)
        elif isinstance(var, float):
            scope.find_var(var_name).set_float(var)
        elif isinstance(var, int):
            scope.find_var(var_name).set_int(var)

    for in_name, in_dup in Operator.get_op_inputs(op.type()):
        if in_name in inputs:
            if in_dup:
                sub_in = inputs[in_name]
                for item in sub_in:
                    sub_in_name, sub_in_val = item[0], item[1]
                    __set_input__(sub_in_name, sub_in_val)
            else:
                __set_input__(in_name, inputs[in_name])
Esempio n. 44
0
    def check_with_place(self, place, data_layout, dtype, shape):
        epsilon = 0.00001
        if len(shape) == 2:
            x_shape = shape
            c = x_shape[1]
        else:
            n, h, w, c = shape[0], shape[1], shape[2], shape[3]
            if data_layout == "NHWC":
                x_shape = [n, h, w, c]
            elif data_layout == "NCHW":
                x_shape = [n, c, h, w]
            else:
                raise ValueError("Unknown data layout.")
        scale_shape = [c]

        x_val = np.random.random_sample(x_shape).astype(dtype)
        scale_val = np.random.random_sample(scale_shape).astype(np.float32)
        bias_val = np.random.random_sample(scale_shape).astype(np.float32)

        mean = np.zeros(scale_shape).astype(np.float32)
        variance = np.ones(scale_shape).astype(np.float32)

        y_out = _reference_testing(x_val, scale_val, bias_val, mean, variance,
                                   epsilon, data_layout).astype(dtype)

        scope = core.Scope()

        # create input
        x_tensor = create_or_get_tensor(scope, "x_val",
                                        OpTest.np_dtype_to_fluid_dtype(x_val),
                                        place)
        scale_tensor = create_or_get_tensor(
            scope, "scale_val",
            OpTest.np_dtype_to_fluid_dtype(scale_val), place)
        bias_tensor = create_or_get_tensor(
            scope, "bias_val", OpTest.np_dtype_to_fluid_dtype(bias_val), place)
        mean_tensor = create_or_get_tensor(scope, "mean",
                                           OpTest.np_dtype_to_fluid_dtype(mean),
                                           place)
        variance_tensor = create_or_get_tensor(
            scope, "variance", OpTest.np_dtype_to_fluid_dtype(variance), place)

        # create output
        y_tensor = create_or_get_tensor(scope, "y_out", None, place)
        saved_mean_tensor = create_or_get_tensor(scope, "saved_mean", None,
                                                 place)
        saved_variance_tensor = create_or_get_tensor(scope, "saved_variance",
                                                     None, place)
        mean_out_tensor = mean_tensor
        variance_out_tensor = variance_tensor

        batch_norm_op = Operator(
            "batch_norm",
            # inputs
            X="x_val",
            Scale="scale_val",
            Bias="bias_val",
            Mean="mean",
            Variance="variance",
            # outputs
            Y="y_out",
            MeanOut="mean",
            VarianceOut="variance",
            SavedMean="saved_mean",
            SavedVariance="saved_variance",
            # attrs
            is_test=True,
            data_layout=data_layout,
            use_mkldnn=self.use_mkldnn,
            epsilon=epsilon)

        batch_norm_op.run(scope, place)

        # check inference result
        self.__assert_close(
            y_tensor,
            y_out,
            "inference output are different at " + str(place) + ", " +
            data_layout + ", " + str(np.dtype(dtype)) +
            str(np.array(y_tensor)) + str(y_out),
            atol=1e-3)
Esempio n. 45
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Grad Variable   
        height = 10
        rows = [0, 4, 7, 4]
        row_numel = 12

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(height)
        grad_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(np_array, place)

        # create and initialize Param Variable
        param = scope.var('Param').get_tensor()
        param_array = np.full((height, row_numel), 5.0).astype("float32")
        param.set(param_array, place)

        # create and initialize LeraningRate Variable
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), 2.0).astype("float32")
        lr.set(lr_array, place)

        # create and initialize moment Variable
        moment = scope.var('Moment').get_tensor()
        moment_np_array = np.full((height, row_numel), 2.0).astype("float32")
        moment.set(moment_np_array, place)

        # create and run sgd operator
        adagrad_op = Operator(
            "adagrad",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            Moment='Moment',
            MomentOut='Moment',
            LearningRate='LearningRate',
            epsilon=2.0)

        adagrad_op.run(scope, place)

        # get and compare moment result
        moment_result_array = np.array(moment)

        self.assertAlmostEqual(6.0, moment_result_array[rows[0], 0])
        self.assertAlmostEqual(3.0, moment_result_array[rows[0], 2])
        self.assertAlmostEqual(2.0, moment_result_array[1, 0])
        # 2.0 + (1.0 + 1.0)^2
        self.assertAlmostEqual(6.0, moment_result_array[rows[1], 10])
        self.assertAlmostEqual(6.0, moment_result_array[rows[3], 4])

        self.assertAlmostEqual(2.0, moment_result_array[5, 8])
        self.assertAlmostEqual(3.0, moment_result_array[rows[2], 1])
        self.assertAlmostEqual(18.0, moment_result_array[rows[2], 8])

        # get and compare param result
        result_array = np.array(param)

        def get_out(param, lr, grad, m, epsilon):
            return param - lr * grad / (math.sqrt(m) + epsilon)

        self.assertAlmostEqual(
            get_out(5.0, 2.0, 2.0, 6.0, 2.0),
            result_array[rows[0], 0],
            places=5)
        self.assertAlmostEqual(
            get_out(5.0, 2.0, 1.0, 3.0, 2.0),
            result_array[rows[0], 2],
            places=5)
        self.assertAlmostEqual(
            get_out(5.0, 2.0, 0.0, 2.0, 2.0), result_array[1, 0], places=5)

        # grad_merge = 1.0 + 1.0
        # m = 6.0
        self.assertAlmostEqual(
            get_out(5.0, 2.0, 2.0, 6.0, 2.0),
            result_array[rows[1], 10],
            places=5)

        self.assertAlmostEqual(
            get_out(5.0, 2.0, 0.0, 2.0, 2.0), result_array[5, 8], places=5)
        self.assertAlmostEqual(
            get_out(5.0, 2.0, 1.0, 3.0, 2.0),
            result_array[rows[2], 1],
            places=5)
        self.assertAlmostEqual(
            get_out(5.0, 2.0, 4.0, 18.0, 2.0),
            result_array[rows[2], 8],
            places=5)