Example #1
0
    def test_rmsprop(self):
        places = [core.CPUPlace()]
        if core.is_compiled_with_cuda():
            places.append(core.CUDAPlace(0))

        size = (128, 320)
        for place in places:
            for centered in [False, True]:
                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(place,
                                          is_sparse=False,
                                          centered=centered,
                                          size=size)

                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(place,
                                          is_sparse=True,
                                          centered=centered,
                                          row_num=512,
                                          size=size)

                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(place,
                                          is_sparse=True,
                                          centered=centered,
                                          row_num=60,
                                          size=size)
Example #2
0
    def test_rmsprop(self):
        places = [paddle.XPUPlace(0)]

        size = (128, 320)
        for place in places:
            for centered in [False]:
                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(place,
                                          is_sparse=False,
                                          centered=centered,
                                          size=size)

                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(place,
                                          is_sparse=True,
                                          centered=centered,
                                          row_num=512,
                                          size=size)

                with fluid.scope_guard(core.Scope()):
                    self.check_with_place(
                        place,
                        is_sparse=True,
                        centered=centered,
                        row_num=60,
                        size=size,
                    )
Example #3
0
 def check_with_place(self, place, inplace):
     self.check_input_and_optput(core.Scope(), place, inplace, True, True,
                                 True)
     self.check_input_and_optput(core.Scope(), place, inplace, False, True,
                                 True)
     self.check_input_and_optput(core.Scope(), place, inplace, False, False,
                                 True)
     self.check_input_and_optput(core.Scope(), place, inplace, False, False,
                                 False)
Example #4
0
    def check_with_place(self, place, inplace):
        self.height = 10
        self.row_numel = 12
        self.rows = [0, 1, 2, 3, 4, 5, 6]

        self.check_input_and_optput(core.Scope(), place, inplace, True, True,
                                    True)
        self.check_input_and_optput(core.Scope(), place, inplace, False, True,
                                    True)
        self.check_input_and_optput(core.Scope(), place, inplace, False, False,
                                    True)
        self.check_input_and_optput(core.Scope(), place, inplace, False, False,
                                    False)
Example #5
0
    def check_with_place(self, place):
        scope = core.Scope()
        self.setup(scope, place)

        op_args = dict()
        for key, np_array in self.dense_inputs.items():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.outputs[s], place)
            op_args[s] = s
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.items():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])
            for idx, row_id in enumerate(self.rows):
                j = 0
                while j < self.row_numel:
                    pos = row_id * self.row_numel + j
                    self.assertLess(
                        (actual[pos] - np_array[pos]) / actual[pos], 0.00001)
                    j += 1
Example #6
0
    def check_grad_with_place(self,
                              place,
                              inputs_to_check,
                              output_names,
                              no_grad_set=None,
                              numeric_grad_delta=0.005,
                              in_place=False,
                              max_relative_error=0.005,
                              user_defined_grads=None,
                              check_dygraph=True,
                              transpose_input_list=[]):
        self.scope = core.Scope()
        op_inputs = self.inputs if hasattr(self, "inputs") else dict()
        op_outputs = self.outputs if hasattr(self, "outputs") else dict()
        op_attrs = self.attrs if hasattr(self, "attrs") else dict()

        self._check_grad_helper()

        cache_list = None
        if hasattr(self, "cache_name_list"):
            cache_list = self.cache_name_list
        self.op = create_op(self.scope,
                            self.op_type,
                            op_inputs,
                            op_outputs,
                            op_attrs,
                            cache_list=cache_list)

        if no_grad_set is None:
            no_grad_set = set()

        for input_to_check in inputs_to_check:
            set_input(self.scope, self.op, self.inputs, place)
            tensor_to_check = self.scope.find_var(input_to_check).get_tensor()
            tensor_size = six.moves.reduce(lambda a, b: a * b,
                                           tensor_to_check.shape(), 1)
            if tensor_size < 100:
                self.__class__.input_shape_is_large = False

        if not type(output_names) is list:
            output_names = [output_names]

        numeric_grads = user_defined_grads or [
            self.get_numeric_gradient(
                place,
                self.scope,
                self.op,
                self.inputs,
                input_to_check,
                output_names,
                delta=numeric_grad_delta,
                in_place=in_place,
                transpose_input_list=transpose_input_list)
            for input_to_check in inputs_to_check
        ]
        analytic_grads = self._get_gradient(inputs_to_check, place,
                                            output_names, no_grad_set)
        self._assert_is_close(numeric_grads, analytic_grads, inputs_to_check,
                              max_relative_error,
                              "Gradient Check On %s" % str(place))
Example #7
0
 def setUp(self):
     self.scope = core.Scope()
     self._create_ids()
     self._create_scores()
     self._create_pre_ids()
     self.scope.var('selected_ids')
     self.scope.var('selected_scores')
Example #8
0
    def test_get_set(self):
        scope = core.Scope()
        arr = scope.var('tmp_lod_tensor_array')
        tensor_array = arr.get_lod_tensor_array()
        self.assertEqual(0, len(tensor_array))
        cpu = core.CPUPlace()
        for i in range(10):
            t = core.LoDTensor()
            t.set(numpy.array([i], dtype='float32'), cpu)
            t.set_recursive_sequence_lengths([[1]])
            tensor_array.append(t)

        self.assertEqual(10, len(tensor_array))

        for i in range(10):
            t = tensor_array[i]
            self.assertEqual(numpy.array(t), numpy.array([i], dtype='float32'))
            self.assertEqual([[1]], t.recursive_sequence_lengths())

            t = core.LoDTensor()
            t.set(numpy.array([i + 10], dtype='float32'), cpu)
            t.set_recursive_sequence_lengths([[1]])
            tensor_array[i] = t
            t = tensor_array[i]
            self.assertEqual(numpy.array(t),
                             numpy.array([i + 10], dtype='float32'))
            self.assertEqual([[1]], t.recursive_sequence_lengths())
Example #9
0
 def get_numeric_grad(self, place, check_name):
     scope = core.Scope()
     self._check_grad_helper()
     op = create_op(scope, self.op_type, self.inputs, self.outputs,
                    self.attrs)
     return get_numeric_gradient(place, scope, op, self.inputs_fp32,
                                 check_name, ['Output'])
    def test_standalone_executor_statistics(self):
        if os.getenv("FLAGS_static_executor_perfstat_filepath") is None:
            return

        paddle.seed(2020)
        main_program, startup_program, fetch_list = build_program()
        fetch_list = [x.name for x in fetch_list]

        p = core.Place()
        p.set_place(self.place)
        executor = StandaloneExecutor(p, startup_program.desc,
                                      main_program.desc, core.Scope())

        helper_profiler = profiler.Profiler(
            targets=[profiler.ProfilerTarget.CPU], scheduler=(1, 2))
        helper_profiler.start()
        for i in range(self.iter_n):
            executor.run({}, fetch_list)
            helper_profiler.step()
        helper_profiler.stop()

        perfstat_filepath = os.environ[
            'FLAGS_static_executor_perfstat_filepath']
        self.assertTrue(os.path.exists(perfstat_filepath))
        with open(perfstat_filepath, 'r') as load_f:
            stat_res = json.load(load_f)
            self.assertTrue(len(stat_res) > 0)

        os.remove(perfstat_filepath)
        shutil.rmtree('./profiler_log')
Example #11
0
    def check_with_place(self, place):
        self.config_test_case()
        scope = core.Scope()

        # set input
        x_selected_rows = scope.var('X').get_selected_rows()
        x_selected_rows.set_rows(self.grad_rows)
        x_tensor = x_selected_rows.get_tensor()
        x_np = np.random.random(self.grad_shape).astype("float32")
        x_np[np.abs(x_np) < self.max_relative_error] = 0.5
        x_tensor.set(x_np, place)

        # set output
        out_selected_rows = scope.var('Out').get_selected_rows()

        # run clip_by_norm_op
        clip_by_norm_op = fluid.op.Operator(
            "clip_by_norm", max_norm=self.max_norm, X='X', Out='Out')
        clip_by_norm_op.run(scope, place)

        # check output
        self.assertEqual(out_selected_rows.rows(), self.grad_clipped_rows)
        out_tensor = out_selected_rows.get_tensor()
        y_np = np.zeros(self.grad_clipped_shape)
        y_np[0] = np.sum(x_np[0:2])
        y_np[1] = x_np[2]
        y_np[2] = x_np[3]
        norm = np.sqrt(np.sum(np.square(y_np)))
        if norm > self.max_norm:
            output = self.max_norm * y_np / norm
        else:
            output = y_np
        self.assertTrue(
            np.allclose(
                np.array(out_tensor), output, atol=1e-5, equal_nan=False))
Example #12
0
 def build_program(self):
     place = fluid.CPUPlace()
     if fluid.core.is_compiled_with_cuda():
         place = fluid.CUDAPlace(0)
     main = fluid.Program()
     startup = fluid.Program()
     with fluid.program_guard(main, startup):
         image, label, self.loss = residual_block(2)
         eval_program = main.clone()
         opt = fluid.optimizer.SGD(learning_rate=0.001)
         opt.minimize(self.loss)
     self.scope = core.Scope()
     exe = fluid.Executor(place)
     exe.run(startup, scope=self.scope)
     self.eval_graph = GraphWrapper(program=eval_program,
                                    in_nodes={
                                        'image': image.name,
                                        'label': label.name
                                    },
                                    out_nodes={'loss': self.loss.name})
     self.train_graph = GraphWrapper(program=main,
                                     in_nodes={
                                         'image': image.name,
                                         'label': label.name
                                     },
                                     out_nodes={'loss': self.loss.name})
Example #13
0
    def main(self, tensor, expect_array, expect_lod, expect_max_len, level=0):
        place = self.place()
        program = Program()
        with program_guard(program):
            x = layers.data(name='x', shape=[10])
            x.persistable = True
            table = layers.lod_rank_table(x, level=level)
            max_len = layers.max_sequence_len(table)
            max_len.persistable = True
            array = layers.lod_tensor_to_array(x, table)
            array.persistable = True

            result = layers.array_to_lod_tensor(array, table)
            result.persistable = True
        exe = Executor(place)
        scope = core.Scope()
        exe.run(program, feed={'x': tensor}, scope=scope)
        var = scope.find_var(array.name)
        array = var.get_lod_tensor_array()
        if expect_array is not None and expect_lod is not None:
            self.check_array_same(array, expect_array, expect_lod)
        self.check_tensor_same(
            scope.find_var(result.name).get_tensor(), tensor)

        self.assertEqual(
            numpy.array(scope.find_var(max_len.name).get_tensor())[0],
            expect_max_len)
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Grad Variable
        height = 10
        rows = [0, 4, 7]
        self.row_numel = 12

        x_selected_rows = scope.var('X1').get_selected_rows()
        x_selected_rows.set_height(height)
        x_selected_rows.set_rows(rows)
        np_array = np.ones((len(rows), self.row_numel)).astype("float32")
        np_array[0, 0] = 2.0
        np_array[2, 8] = 4.0

        x_tensor = x_selected_rows.get_tensor()
        x_tensor.set(np_array, place)

        out_selected_rows = scope.var('Out1').get_selected_rows()
        # create and run sqrt operator
        sqrt_op = Operator("sqrt", X='X1', Out='Out1')
        sqrt_op.run(scope, place)

        # get and compare result
        result_array = np.array(out_selected_rows.get_tensor())
        self.assertTrue(np.allclose(result_array, np.sqrt(np_array)))
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Id Variable
        ids_tensor = scope.var('Ids').get_tensor()
        ids_array = np.array([[0], [4], [3], [5]]).astype("int64")
        ids_tensor.set(ids_array, place)

        # create and initialize W Variable
        rows = [0, 1, 2, 3, 4, 5, 6]
        row_numel = 12

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(len(rows))
        w_selected_rows.set_rows(rows)
        w_array = np.ones((len(rows), row_numel)).astype("float32")
        for i in range(len(rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator("lookup_table", W='W', Ids='Ids', Out='Out')
        lookup_table.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)
        # all(): return True if all elements of the iterable are true (or if the iterable is empty)
        for idx, row in enumerate(ids_array):
            assert (row[0] == result_array[idx]).all()
    def check_with_place(self, place):
        scope = core.Scope()
        x_rows = [0, 5, 5, 4, 19]
        height = 20
        row_numel = 2

        np_array = np.ones((len(x_rows), row_numel)).astype("float32")
        np_array[1, :] = 2.0
        np_array[2, :] = 3.0
        np_array[3, :] = 4.0

        # initialize input variable X
        x = scope.var('X').get_selected_rows()
        x.set_rows(x_rows)
        x.set_height(height)
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        # initialize input variable Out
        out = scope.var("Out").get_tensor()

        op = Operator("get_tensor_from_selected_rows", X="X", Out="Out")

        op.run(scope, place)

        out_array = np.array(out)
        self.assertEqual((5, 2), out_array.shape)
        assert (out_array == np_array).all()
    def check_with_selected_rows(self, place):
        scope = core.Scope()
        x_rows = [0, 1, 5, 4, 19]
        x_height = 20
        row_numel = 2
        np_array = np.ones((len(x_rows), row_numel)).astype("float32")

        # initialize input variable
        x = scope.var('X').get_selected_rows()
        x.set_rows(x_rows)
        x.set_height(x_height)
        x_tensor = x.get_tensor()
        x_tensor.set(np_array, place)

        # initialize the Out variable
        out = scope.var("Out").get_selected_rows()
        out_tensor = out.get_tensor()

        op = Operator("share_data", X="X", Out="Out")
        op.run(scope, place)

        out_height = out.height()
        out_rows = out.rows()
        self.assertTrue(np.allclose(np_array, out_tensor))
        self.assertEqual(x_height, out_height)
        self.assertEqual(x_rows, out_rows)
Example #18
0
    def check_with_place(self, place, inplace):
        scope = core.Scope()
        if inplace:
            self.create_lod_tensor(scope, place, "x1")
            self.create_selected_rows(scope, place, "x2", True)
            out = scope.var("x1").get_tensor()
            out_name = "x1"
        else:
            self.create_selected_rows(scope, place, "x1", True)
            self.create_lod_tensor(scope, place, "x2")
            out = scope.var("out").get_tensor()
            out_name = "out"

        # create and run sum operator
        sum_op = Operator("sum", X=["x1", "x2"], Out=out_name)
        sum_op.run(scope, place)

        result = np.ones((1, self.height)).astype(np.int32).tolist()[0]
        for ele in self.rows:
            result[ele] += 1

        out_t = np.array(out)
        self.assertEqual(out_t.shape[0], self.height)
        self.assertTrue(
            np.array_equal(
                out_t,
                self._get_array([i
                                 for i in range(self.height)], self.row_numel)
                * np.tile(
                    np.array(result).reshape(self.height, 1), self.row_numel)))
    def check_with_place(self, place):
        scope = core.Scope()

        rows = [0, 1, 2, 3, 4, 5, 6]
        row_numel = 7

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(len(rows))
        w_selected_rows.set_rows(rows)
        w_array = np.ones((len(rows), row_numel)).astype("float32")
        for i in range(len(rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create and initialize Id Variable
        ids = scope.var("Ids").get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator("lookup_sparse_table_grad_split",
                                Grad='W',
                                Row={'Ids'},
                                Value={'W'},
                                is_entry=False,
                                tablename="sparse")
        lookup_table.run(scope, place)

        # get result from Out
        result_array1 = np.array(ids)
        print(result_array1)
        print("== = = == == = == ==== ==== === ")
        value = scope.var("W").get_tensor()
        result_array1 = np.array(value)
        print(result_array1.shape)
        print(result_array1)
Example #20
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize Variable
        feature_len = 12
        rows = [0, 4, 4, 7]
        np_array = np.ones((len(rows), feature_len)).astype("float32")

        in_x = scope.var('X').get_selected_rows()
        in_x.set_height(len(rows))
        in_x.set_rows(rows)
        in_x_tensor = in_x.get_tensor()
        in_x_tensor.set(np_array, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        extract_rows_op = Operator("extract_rows", X='X', Out='Out')
        extract_rows_op.run(scope, place)

        # get result from Out
        result_array = np.array(out_tensor)
        result_array = [ele[0] for ele in result_array]
        assert result_array == rows
Example #21
0
    def check_with_place(self, place, lazy_mode):
        scope = core.Scope()
        self.setup(scope, place, lazy_mode)

        op_args = dict()
        op_args['lazy_mode'] = lazy_mode
        for key, np_array in self.dense_inputs.items():
            var = scope.var(key).get_tensor()
            var.set(np_array, place)
            op_args[key] = key
        for s in self.sparse_inputs:
            op_args[s] = s
        for s in self.outputs:
            var = scope.var(s).get_tensor()
            var.set(self.init_output, place)
            op_args[s] = s
        for k in self.attrs:
            op_args[k] = self.attrs[k]

        # create and run sgd operator
        adam_op = Operator("adam", **op_args)
        adam_op.run(scope, place)

        for key, np_array in self.outputs.items():
            out_var = scope.var(key).get_tensor()
            actual = np.array(out_var)
            actual = actual.reshape([actual.size])
            np_array = np_array.reshape([np_array.size])

            for i in range(np_array.size):
                self.assertLess((actual[i] - np_array[i]), 0.00001)
Example #22
0
    def check_with_place(self, place, in_name, out_name):
        scope = core.Scope()

        # create and initialize Grad Variable
        in_height = 10
        in_rows = [0, 4, 7]
        in_row_numel = 12
        scale = 2.0

        in_selected_rows = scope.var(in_name).get_selected_rows()
        in_selected_rows.set_height(in_height)
        in_selected_rows.set_rows(in_rows)
        in_array = np.random.random(
            (len(in_rows), in_row_numel)).astype("float32")

        in_tensor = in_selected_rows.get_tensor()
        in_tensor.set(in_array, place)

        # create and initialize Param Variable
        out_selected_rows = scope.var(out_name).get_selected_rows()
        out_tensor = out_selected_rows.get_tensor()
        out_tensor._set_dims(in_tensor._get_dims())

        # create and run sgd operator
        scale_op = Operator("scale", X=in_name, Out=out_name, scale=scale)
        scale_op.run(scope, place)

        # get and compare result
        out_height = out_selected_rows.height()
        out_rows = out_selected_rows.rows()
        result_array = np.array(out_tensor)

        assert (in_array * scale == result_array).all()
        assert in_height == out_height
        assert in_rows == out_rows
Example #23
0
    def test_fetch_handler(self):
        place = core.CPUPlace()
        scope = core.Scope()

        table = np.random.random((3, 10)).astype("float32")

        prog = Program()
        block = prog.current_block()
        var_emb = block.create_var(name='emb', type=core.VarDesc.VarType.FP32)
        var_emb3 = block.create_var(name='emb3', type=core.VarDesc.VarType.FP32)

        class FH(fluid.executor.FetchHandler):
            def handler(self, fetch_dict):
                assert len(fetch_dict) == 1

        table_var = scope.var('emb').get_tensor()
        table_var.set(table, place)
        fh = FH(var_dict={'emb': var_emb}, period_secs=2)
        fm = fluid.trainer_factory.FetchHandlerMonitor(scope, fh)

        fm.start()
        time.sleep(3)
        fm.stop()

        default_fh = fluid.executor.FetchHandler(
            var_dict={'emb': var_emb,
                      'emb2': None,
                      'emb3': var_emb3},
            period_secs=1)
        default_fm = fluid.trainer_factory.FetchHandlerMonitor(scope,
                                                               default_fh)
        default_fm.start()
        time.sleep(5)
        default_fm.stop()
Example #24
0
    def test_network(self):
        if self.network is None or not core.is_compiled_with_cuda():
            return

        baseline_first_loss, baseline_last_loss = None, None
        for use_cuda in [True]:
            for use_python_mem_opt in [True, False]:
                print(
                    'network: {}, use_cuda: {}, use_python_mem_opt: {}, use_ir_mem_opt : {}'.
                    format(self.network.__name__, use_cuda, use_python_mem_opt,
                           not use_python_mem_opt))
                with fluid.program_guard(fluid.Program(), fluid.Program()):
                    with fluid.scope_guard(core.Scope()):
                        if use_cuda is True and use_python_mem_opt is True:
                            baseline_first_loss, baseline_last_loss = self.check_network_convergence(
                                self.network,
                                use_cuda=use_cuda,
                                memory_opt=use_python_mem_opt)
                        else:
                            cur_first_loss, cur_last_loss = self.check_network_convergence(
                                self.network,
                                use_cuda=use_cuda,
                                memory_opt=use_python_mem_opt)

                            self.assertAlmostEquals(
                                np.mean(baseline_last_loss),
                                np.mean(cur_last_loss),
                                delta=1e-2)
                            self.assertAlmostEquals(
                                np.mean(baseline_first_loss),
                                np.mean(cur_first_loss),
                                delta=1e-2)
Example #25
0
    def check_with_place(self, place):
        scope = core.Scope()

        row_width = 12
        # create and initialize Grad Variable
        grad_height = 10
        grad_rows = [0, 4, 7]

        grad_selected_rows = scope.var('Grad').get_selected_rows()
        grad_selected_rows.set_height(grad_height)
        grad_selected_rows.set_rows(grad_rows)
        grad_array = np.ones((len(grad_rows), row_width)).astype("float32")
        grad_array[0, 0] = 2.0
        grad_array[2, 8] = 4.0

        grad_tensor = grad_selected_rows.get_tensor()
        grad_tensor.set(grad_array, place)

        # create and initialize Param Variable
        # create and initialize W Variable
        param_rows = [0, 1, 2, 3, 4, 5, 6, 7]

        # init Param
        w_selected_rows = scope.var('Param').get_selected_rows()
        w_selected_rows.set_height(len(param_rows))
        w_selected_rows.set_rows(param_rows)
        w_selected_rows.sync_index()
        w_array = np.ones((len(param_rows), row_width)).astype("float32")
        for i in range(len(param_rows)):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        w_before_optimize = np.array(w_tensor)

        # create and initialize LeraningRate Variable
        lr_value = 0.1
        lr = scope.var('LearningRate').get_tensor()
        lr_array = np.full((1), lr_value).astype("float32")
        lr.set(lr_array, place)

        # optimize with Python
        w_after_optimize = np.copy(w_before_optimize)
        for index, id in enumerate(grad_rows):
            w_after_optimize[id] = w_before_optimize[
                id] - lr_value * grad_array[index]

        # create and run sgd operator
        sgd_op = Operator(
            "sgd",
            Param='Param',
            Grad='Grad',
            ParamOut='Param',
            LearningRate='LearningRate')
        sgd_op.run(scope, place)

        # get and compare result
        result_array = np.array(w_tensor)
        assert (result_array == w_after_optimize).all()
Example #26
0
    def _create_scope_vec(self):
        # Hold forward variables
        tmp_scope_vec = core.VarBase(core.VarDesc.VarType.FP32, [],
                                     "program_out_scope",
                                     core.VarDesc.VarType.STEP_SCOPES, True)

        inner_scope = core.Scope()
        tmp_scope_vec.value().set_scope(inner_scope)
        return tmp_scope_vec
 def setUp(self):
     self.init_test()
     self.ids = np.random.randint(low=0, high=15,
                                  size=self.ids_shape).astype("int64")
     self.flat_ids = self.ids.flatten()
     self.w_fp32 = np.random.random((15, 32)).astype("float32")
     self.w_bf16 = convert_float_to_uint16(self.w_fp32)
     self.scope = core.Scope()
     self.place = core.CPUPlace()
Example #28
0
    def test_grad(self):
        place = core.CPUPlace()
        program = Program()
        with program_guard(program):
            x = layers.data(name='x',
                            shape=[1],
                            dtype='float32',
                            stop_gradient=False)
            y = layers.data(name='y',
                            shape=[1],
                            dtype='bool',
                            stop_gradient=False)

            level = 0

            out_true, out_false = split_lod_tensor(input=x,
                                                   mask=y,
                                                   level=level)
            out = merge_lod_tensor(in_true=out_true,
                                   in_false=out_false,
                                   mask=y,
                                   x=x,
                                   level=level)
            mean = layers.mean(out)

            append_backward(mean)

        tensor = core.LoDTensor()
        tensor.set(np.arange(10).reshape(10, 1).astype('float32'), place)
        tensor.set_recursive_sequence_lengths([[3, 6, 1]])

        mask_np = np.array([0, 1, 0]).astype('bool')
        mask_np = np.expand_dims(mask_np, axis=1)

        mask = core.LoDTensor()
        mask.set(mask_np, place)

        exe = Executor(place)
        scope = core.Scope()

        g_vars = program.global_block().var(x.name + "@GRAD")
        g_out = [
            item.sum() for item in map(
                np.array,
                exe.run(program,
                        feed={
                            'x': tensor,
                            'y': mask
                        },
                        fetch_list=[g_vars],
                        scope=scope,
                        return_numpy=False))
        ]

        g_out_sum = np.array(g_out).sum()

        self.assertAlmostEqual(1.0, g_out_sum, delta=0.1)
Example #29
0
 def setUp(self):
     self.scope = core.Scope()
     self._create_ids()
     self._create_pre_scores()
     self._create_scores()
     self._create_pre_ids()
     self.scope.var('selected_ids').get_tensor()
     self.scope.var('selected_scores').get_tensor()
     self.scope.var('parent_idx').get_tensor()
Example #30
0
    def check_with_place(self, place):
        scope = core.Scope()

        # create and initialize W Variable
        table_size = 10000
        row_numel = 8

        w_selected_rows = scope.var('W').get_selected_rows()
        w_selected_rows.set_height(table_size)
        w_array = np.ones((table_size, row_numel)).astype("float32")
        for i in range(table_size):
            w_array[i] *= i
        w_tensor = w_selected_rows.get_tensor()
        w_tensor.set(w_array, place)

        # create and initialize Id Variable
        ids = scope.var("Ids").get_tensor()
        ids_array1 = np.array([0, 2, 3, 2, 5, 0, 100]).astype("int64")
        ids.set(ids_array1, place)

        # create Out Variable
        out_tensor = scope.var('Out').get_tensor()

        # create and run lookup_table operator
        lookup_table = Operator("lookup_sparse_table",
                                W='W',
                                Ids='Ids',
                                Out='Out',
                                min=-5.0,
                                max=10.0,
                                seed=10)
        lookup_table.run(scope, place)

        # get result from Out
        result_array1 = np.array(out_tensor)
        # all(): return True if all elements of the iterable are true (or if the iterable is empty)
        assert (result_array1[0] == w_array[0]).all()
        assert (result_array1[1] == w_array[1]).all()
        assert (result_array1[2] == w_array[2]).all()
        assert (result_array1[3] == w_array[1]).all()
        assert (result_array1[4] == w_array[3]).all()
        assert (result_array1[5] == w_array[0]).all()
        assert (result_array1[6] == w_array[4]).all()

        # create and initialize Id Variable
        ids = scope.var("Ids").get_tensor()
        ids_array2 = np.array([4, 2, 3, 7, 100000]).astype("int64")
        ids.set(ids_array2, place)
        lookup_table.run(scope, place)

        result_array2 = np.array(out_tensor)
        assert (result_array2[0] == w_array[5]).all()
        assert (result_array2[1] == w_array[1]).all()
        assert (result_array2[2] == w_array[2]).all()
        assert (result_array2[3] == w_array[6]).all()
        assert (result_array2[4] == w_array[7]).all()