コード例 #1
0
    def calc_dygraph_grad(self, place):
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()

        with fluid.dygraph.guard(place):
            # Step 1. run forward
            inputs, input_param_list = self.prepare_dygraph_input(place, True)
            outputs = self.prepare_dygraph_output()

            _C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
                               outputs['OutScope'], outputs['DOut'], None,
                               *self.attrs)

            for param in input_param_list:
                var_type = self._get_grad_vartype(param.name)
                if var_type is None:
                    continue
                param._set_grad_type(var_type)

            # Step 2. run backward
            # NOTE: in unittest, only support single output now
            actual_outs = outputs['Out']
            assert len(actual_outs) == 1
            actual_outs[0].backward()

            # Step 3. prepare grads
            grads = []
            for param in input_param_list:
                grad = param.gradient()
                grads.append(grad)
            return grads
コード例 #2
0
    def calc_dygraph_output(self, place):
        self.program_desc, self.fwd_op_num = self.get_program_desc()
        self.attrs = self.prepare_attrs()

        with fluid.dygraph.guard(place):
            inputs = self.prepare_dygraph_input(place)
            outputs = self.prepare_dygraph_output()

            _C_ops.run_program(inputs['X'], inputs['Params'], outputs['Out'],
                               outputs['OutScope'], outputs['DOut'], None,
                               *self.attrs)
            return outputs['Out']
コード例 #3
0
    def __call__(self, inputs):
        in_vars, out_vars = self._prepare(inputs)

        attrs = ('global_block', self.program.desc.block(0), 'start_op_index',
                 0, 'end_op_index', self._get_end_op_index(), 'is_test',
                 not self.training, 'program_id', self.program_id)

        self._cast_fp16_if_pure_fp16(in_vars)

        _C_ops.run_program(self._valid_vars(in_vars),
                           self._valid_vars(self._params),
                           self._valid_vars(out_vars), self._tmp_scope_vec,
                           self._double_grads, *attrs)
        self.drop_scope_if_no_grad()
        restored_nest_out = self._restore_out(out_vars)
        return self._remove_no_value(restored_nest_out)
コード例 #4
0
    def test_eager(self):
        paddle.set_device('cpu')
        paddle.enable_static()
        # step 1: construct program
        x = paddle.static.data(shape=[2, 4], name='x')
        x.stop_gradient = False
        y = paddle.static.data(shape=[4, 2], name='y')
        y.stop_gradient = False
        out = paddle.matmul(x, y)

        main_program = paddle.static.default_main_program()
        program = _append_backward_desc(main_program, [out])

        paddle.disable_static('cpu')
        # step 2: call run_program in eager mode
        with _test_eager_guard():
            x_t = paddle.ones([2, 4])
            x_t.name = "x"
            x_t.stop_gradient = False
            y_t = paddle.ones([4, 2])
            y_t.name = "y"
            y_t.stop_gradient = False

            fake_var = paddle.zeros([1])
            fake_var.name = 'Fake_var'

            out_t = _create_out(out)

            scope = core.Scope()
            attrs = ('global_block', program.desc.block(0), 'start_op_index', 0,
                     'end_op_index', main_program.desc.block(0).op_size(),
                     'is_test', False, 'program_id', _hash_with_id(program))

            _C_ops.run_program([x_t, y_t], [fake_var], [out_t], [scope],
                               [fake_var], None, *attrs)

            loss = paddle.mean(out_t)
            loss.backward()

            self.assertTrue(np.array_equal(np.ones([2, 2]) * 4, out_t.numpy()))
            self.assertTrue(
                np.array_equal(np.ones([2, 4]) * 0.5, x_t.grad.numpy()))
            self.assertTrue(
                np.array_equal(np.ones([4, 2]) * 0.5, y_t.grad.numpy()))