Exemplo n.º 1
0
 def init_dtype(self):
     if core.is_compiled_with_cuda():
         self.dtype = "float64"
         self.ids_dtype = "int64"
     elif core.is_compiled_with_npu():
         self.dtype = "float32"
         self.ids_dtype = "int32"
Exemplo n.º 2
0
 def setUp(self):
     self.init_dtype()
     self.initcase()
     if core.is_compiled_with_npu():
         self.__class__.use_npu = True
     elif core.is_compiled_with_cuda():
         self.__class__.exist_fp64_check_grad = True
Exemplo n.º 3
0
 def setUp(self):
     np.random.seed(0)
     self.shape = [3, 3]
     self.index_shape = [1, 3]
     self.index_np = np.array([[0, 1, 2]]).astype('int64')
     self.x_np = np.random.random(self.shape).astype(np.float32)
     self.place = [paddle.CPUPlace()]
     self.axis = 0
     if core.is_compiled_with_cuda():
         self.place.append(paddle.CUDAPlace(0))
Exemplo n.º 4
0
 def setUp(self):
     np.random.seed(0)
     self.shape = [1, 3]
     self.index_shape = [1, 1]
     self.index_np = np.array([[0]]).astype('int64')
     self.x_np = np.random.random(self.shape).astype(np.float32)
     self.place = [paddle.CPUPlace()]
     self.axis = 0
     self.value_np = 99.0
     self.value_shape = [1]
     self.x_feed = copy.deepcopy(self.x_np)
     if core.is_compiled_with_cuda():
         self.place.append(paddle.CUDAPlace(0))
Exemplo n.º 5
0
    def initcase(self):
        self.op_type = "c_embedding"
        table = np.random.random((17, 64)).astype(self.dtype)
        ids = np.random.randint(low=0, high=17 * 2,
                                size=(2, 4)).astype(self.ids_dtype)
        self.start_index = 10
        ids[0][1] = 12
        ids[0][2] = 12
        ids[1][2] = 12
        ids[1][3] = 12
        self.end_index = self.start_index + 17

        self.inputs = {'W': table, 'Ids': ids}
        np_out = get_c_embedding(self.start_index, self.end_index, table, ids)
        self.outputs = {'Out': np_out.reshape((2, 4, 64))}
        self.attrs = {'start_index': self.start_index}

        if core.is_compiled_with_npu():
            self.__class__.use_npu = True
        elif core.is_compiled_with_cuda():
            self.__class__.exist_fp64_check_grad = True
Exemplo n.º 6
0
        param_out, moment1_out, \
            moment2_out = adamw_step(self.inputs, self.attrs)

        self.outputs = {
            'Moment1Out': moment1_out,
            'Moment2Out': moment2_out,
            'ParamOut': param_out,
            'Beta1PowOut': np.array([beta1_pow]).astype("float32") * beta1,
            'Beta2PowOut': np.array([beta2_pow]).astype("float32") * beta2
        }

    def test_check_output(self):
        self.check_output()


@unittest.skipIf(not core.is_compiled_with_cuda(),
                 "core is not compiled with CUDA")
class TestAdamW2(OpTest):
    def setUp(self):
        '''Test AdamW Op with supplied attributes
        '''
        self.op_type = "adamw"
        param = np.random.uniform(-1, 1, (2, 2)).astype("float32")
        grad = np.random.uniform(-1, 1, (2, 2)).astype("float32")
        moment1 = np.random.uniform(-1, 1, (2, 2)).astype("float32")
        # The second moment is positive
        moment2 = np.random.random((2, 2)).astype("float32")

        learning_rate = 0.004
        beta1 = 0.78
        beta2 = 0.836
Exemplo n.º 7
0
 def test_check_grad(self):
     if core.is_compiled_with_cuda():
         self.check_grad_with_place(core.CUDAPlace(0), ['W'], 'Out')
     elif core.is_compiled_with_npu():
         self.check_grad_with_place(core.NPUPlace(0), ['W'], 'Out')
Exemplo n.º 8
0
 def test_check_output(self):
     if core.is_compiled_with_cuda():
         self.check_output_with_place(core.CUDAPlace(0))
     elif core.is_compiled_with_npu():
         self.check_output_with_place(core.NPUPlace(0))