Esempio n. 1
0
    def calc_dygraph_grad(self, place):
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()
        self.attrs['program_id'] = _hash_with_id(self.program_desc)

        with fluid.dygraph.guard(place):
            # Step 1. run forward
            inputs, input_param_list = self.prepare_dygraph_input(place, True)
            outputs = self.prepare_dygraph_output()

            framework._dygraph_tracer().trace_op(type=self.op_type,
                                                 inputs=inputs,
                                                 outputs=outputs,
                                                 attrs=self.attrs)

            for param in input_param_list:
                var_type = self._get_grad_vartype(param.name)
                if var_type is None:
                    continue
                param._set_grad_type(var_type)

            # Step 2. run backward
            # NOTE: in unittest, only support single output now
            actual_outs = outputs['Out']
            assert len(actual_outs) == 1
            actual_outs[0].backward()

            # Step 3. prepare grads
            grads = []
            for param in input_param_list:
                grad = param.gradient()
                grads.append(grad)
            return grads
Esempio n. 2
0
    def calc_dygraph_output(self, place):
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()
        self.attrs['program_id'] = _hash_with_id(self.program_desc)

        with fluid.dygraph.guard(place):
            inputs = self.prepare_dygraph_input(place)
            outputs = self.prepare_dygraph_output()

            framework._dygraph_tracer().trace_op(type=self.op_type,
                                                 inputs=inputs,
                                                 outputs=outputs,
                                                 attrs=self.attrs)
            return outputs['Out']
Esempio n. 3
0
    def test_eager(self):
        paddle.set_device('cpu')
        paddle.enable_static()
        # step 1: construct program
        x = paddle.static.data(shape=[2, 4], name='x')
        x.stop_gradient = False
        y = paddle.static.data(shape=[4, 2], name='y')
        y.stop_gradient = False
        out = paddle.matmul(x, y)

        main_program = paddle.static.default_main_program()
        program = _append_backward_desc(main_program, [out])

        paddle.disable_static('cpu')
        # step 2: call run_program in eager mode
        with _test_eager_guard():
            x_t = paddle.ones([2, 4])
            x_t.name = "x"
            x_t.stop_gradient = False
            y_t = paddle.ones([4, 2])
            y_t.name = "y"
            y_t.stop_gradient = False

            fake_var = paddle.zeros([1])
            fake_var.name = 'Fake_var'

            out_t = _create_out(out)

            scope = core.Scope()
            attrs = ('global_block', program.desc.block(0), 'start_op_index', 0,
                     'end_op_index', main_program.desc.block(0).op_size(),
                     'is_test', False, 'program_id', _hash_with_id(program))

            _C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
                               [fake_var], None, *attrs)

            loss = paddle.mean(out_t)
            loss.backward()

            self.assertTrue(np.array_equal(np.ones([2, 2]) * 4, out_t.numpy()))
            self.assertTrue(
                np.array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy()))
            self.assertTrue(
                np.array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy()))
Esempio n. 4
0
    def _train_pure_fp16_program_id(self):
        program_id = _hash_with_id(self._train_pure_fp16_program, self)
        core._set_cached_executor_build_strategy(program_id,
                                                 self._build_strategy)

        return program_id
Esempio n. 5
0
 def _infer_program_id(self):
     return _hash_with_id(self._infer_program, self)
Esempio n. 6
0
 def prepare_attrs(self):
     return ('global_block', self.program_desc.block(0), 'start_op_index',
             0, 'end_op_index', self.fwd_op_num, 'program_id',
             _hash_with_id(self.program_desc, self))