示例#1
0
 def test_other_rand(self):
     a = jt.array([1.0, 2.0, 3.0])
     b = jt.rand_like(a)
     c = jt.randn_like(a)
     assert b.shape == c.shape
     assert b.shape == a.shape
     print(b, c)
     assert jt.randint(10, 20, (2000, )).min() == 10
     assert jt.randint(10, 20, (2000, )).max() == 19
     assert jt.randint(10, shape=(2000, )).max() == 9
     assert jt.randint_like(a, 10).shape == a.shape
示例#2
0
        def check_gpu_with_cpu(T, C, N, S, S_min):
            jt.set_global_seed(1)

            # Initialize random batch of input vectors, for *size = (T,N,C)
            input = jt.randn(T, N, C).log_softmax(2)
            # input = -jt.ones((T, N, C))
            # input[0,0,1] += 0.01

            # Initialize random batch of targets (0 = blank, 1:C = classes)
            target = jt.randint(low=1, high=C, shape=(N, S), dtype=jt.int)
            _input_jt = input

            input_lengths = jt.full((N, ), T, dtype=jt.int)
            target_lengths = jt.randint(low=S_min,
                                        high=S + 1,
                                        shape=(N, ),
                                        dtype=jt.int)
            # ctc_loss = nn.CTCLoss()
            loss = jt.ctc_loss(input,
                               target,
                               input_lengths,
                               target_lengths,
                               reduction='none')
            _loss_jt = loss

            loss_jt = loss.numpy()

            dinput_jt = jt.grad(_loss_jt, _input_jt)
            dinput_jt.sync()

            with jt.flag_scope(use_cuda=1):
                input = input.copy()
                target = target.copy()
                input_lengths = input_lengths.copy()
                target_lengths = target_lengths.copy()
                loss = jt.ctc_loss(input,
                                   target,
                                   input_lengths,
                                   target_lengths,
                                   reduction='none')
                grad = jt.grad(loss, input)
                np.testing.assert_allclose(_loss_jt.numpy(),
                                           loss.numpy(),
                                           atol=1e-5,
                                           rtol=1e-5)
                np.testing.assert_allclose(dinput_jt.numpy(),
                                           grad.numpy(),
                                           atol=1e-5,
                                           rtol=1e-5)
示例#3
0
        def check(T, C, N, S, S_min):
            jt.set_global_seed(0)

            # Initialize random batch of input vectors, for *size = (T,N,C)
            input = jt.randn(T, N, C).log_softmax(2)
            # input = -jt.ones((T, N, C))
            # input[0,0,1] += 0.01

            # Initialize random batch of targets (0 = blank, 1:C = classes)
            target = jt.randint(low=1, high=C, shape=(N, S), dtype=jt.int)
            _input_jt = input

            input_lengths = jt.full((N, ), T, dtype=jt.int)
            target_lengths = jt.randint(low=S_min,
                                        high=S + 1,
                                        shape=(N, ),
                                        dtype=jt.int)
            # ctc_loss = nn.CTCLoss()
            loss = jt.ctc_loss(input,
                               target,
                               input_lengths,
                               target_lengths,
                               reduction='none')
            _loss_jt = loss

            loss_jt = loss.numpy()

            input = torch.Tensor(input.numpy()).detach().requires_grad_()
            input_lengths = torch.full(size=(N, ),
                                       fill_value=T,
                                       dtype=torch.long)
            target_lengths = torch.LongTensor(target_lengths.numpy())
            input_lengths = torch.LongTensor(input_lengths.numpy())
            target = torch.LongTensor(target.numpy())
            loss = tnn.CTCLoss(reduction='none')(input, target, input_lengths,
                                                 target_lengths)
            np.testing.assert_allclose(loss.detach().numpy(),
                                       loss_jt,
                                       rtol=1e-5,
                                       atol=1e-5)

            dinput_jt = jt.grad(_loss_jt, _input_jt)
            dinput_jt.sync()

            loss.sum().backward()