def test_dygraph(self):
            with fluid.dygraph.guard():
                np_x = np.random.rand(22, 128, 3).astype('int64')
                np_y = np.random.rand(22, 128, 3).astype('int64')
                x = paddle.to_tensor(np_x)
                y = paddle.to_tensor(np_y)
                z = paddle.remainder(x, y)
                np_z = z.numpy()
                z_expected = np.mod(np_x, np_y)
                self.assertEqual((np_z == z_expected).all(), True)

                np_x = np.array([-3.3, 11.5, -2, 3.5])
                np_y = np.array([-1.2, 2., 3.3, -2.3])
                x = paddle.to_tensor(np_x)
                y = paddle.to_tensor(np_y)
                z = x % y
                z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
                self.assertEqual(np.allclose(z_expected, z.numpy()), True)

                np_x = np.random.rand(22, 128, 3).astype('int32')
                np_y = np.random.rand(22, 128, 3).astype('int32')
                x = paddle.to_tensor(np_x)
                y = paddle.to_tensor(np_y)
                z = paddle.remainder(x, y)
                np_z = z.numpy()
                z_expected = np.mod(np_x, np_y)
                self.assertEqual((np_z == z_expected).all(), True)

                np_x = np.array([-3, 11, -2, 3])
                np_y = np.array([-1, 2, 3, -2])
                x = paddle.to_tensor(np_x, dtype="float16")
                y = paddle.to_tensor(np_y, dtype="float16")
                z = x % y
                z_expected = np.array([0, 1, 1, -1])
                self.assertEqual(np.allclose(z_expected, z.numpy()), True)
Пример #2
0
    def test_name(self):
        with fluid.program_guard(fluid.Program()):
            x = fluid.data(name="x", shape=[2, 3], dtype="int64")
            y = fluid.data(name='y', shape=[2, 3], dtype='int64')

            y_1 = paddle.remainder(x, y, name='div_res')
            self.assertEqual(('div_res' in y_1.name), True)
    def test_dygraph(self):
        paddle.set_device('npu:0')
        with fluid.dygraph.guard():
            np_x = np.array([2, 3, 8, 7]).astype('int64')
            np_y = np.array([1, 5, 3, 3]).astype('int64')
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = paddle.remainder(x, y)
            np_z = z.numpy()
            z_expected = np.array([0, 3, 2, 1])
            self.assertEqual((np_z == z_expected).all(), True)

            np_x = np.array([-3.3, 11.5, -2, 3.5])
            np_y = np.array([-1.2, 2., 3.3, -2.3])
            x = paddle.to_tensor(np_x)
            y = paddle.to_tensor(np_y)
            z = x % y
            z_expected = np.array([-0.9, 1.5, 1.3, -1.1])
            self.assertEqual(np.allclose(z_expected, z.numpy()), True)

            np_x = np.array([-3, 11, -2, 3])
            np_y = np.array([-1, 2, 3, -2])
            x = paddle.to_tensor(np_x, dtype="int64")
            y = paddle.to_tensor(np_y, dtype="int64")
            z = x % y
            z_expected = np.array([0, 1, 1, -1])
            self.assertEqual(np.allclose(z_expected, z.numpy()), True)
Пример #4
0
    def _append_optimize_op(self, block, param_and_grad):
        one_var = paddle.ones(shape=[1], dtype='int32', name='lookahead_ones')
        zero_var = paddle.zeros(shape=[1],
                                dtype='int32',
                                name='lookahead_zeros')
        k_var = layers.create_global_var(
            name=unique_name.generate("lookahead_k"),
            shape=[1],
            value=self.k,
            dtype='int32',
            persistable=True)

        mod = paddle.remainder(self._global_step_var, k_var)

        cond_1 = paddle.equal(self._global_step_var, one_var)
        cond_1 = paddle.cast(cond_1, dtype='float32')

        cond_2 = paddle.equal(mod, zero_var)
        cond_2 = paddle.cast(cond_2, dtype='float32')

        slow_var = self._get_accumulator(self._slow_str, param_and_grad[0])

        tmp_var = cond_1 * param_and_grad[0] + (1 - cond_1) * slow_var
        paddle.assign(tmp_var, slow_var)

        tmp_var = self.alpha * param_and_grad[0] + (1.0 -
                                                    self.alpha) * slow_var
        tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * param_and_grad[0]
        paddle.assign(tmp_var_1, param_and_grad[0])

        tmp_var_1 = cond_2 * tmp_var + (1 - cond_2) * slow_var
        paddle.assign(tmp_var_1, slow_var)