def test_crossentropy_softmax_and_gradient_z_sparse_viterbi(): n_time = 3 n_batch = 2 n_dim = 5 alignment = np.array([[0, 1], [1, 2], [2, 3]], dtype="int32") mask = np.array([[1, 1], [1, 1], [1, 1]], dtype=f32) y_t, y_i, y_w, y_mask = NativeOp.onehot_to_sparse(alignment, mask) np.random.seed(123) z = np.random.randn(n_time, n_batch, n_dim).astype(f32) z_mask = np.array([[1,1], [1,1], [1,1]], dtype=f32) nll1, _pcx1 = T.nnet.crossentropy_softmax_1hot(x=T.as_tensor_variable(z).reshape((n_time * n_batch, n_dim)), y_idx=T.as_tensor_variable(alignment).reshape((n_time * n_batch,))) nll2, _gradz2 = NativeOp.crossentropy_softmax_and_gradient_z_sparse(z, z_mask, y_t, y_i, y_w, y_mask) nll1 = nll1.eval() nll2 = nll2.eval() print("nll1:\n%r" % nll1) print("nll2:\n%r" % nll2)
def test_crossentropy_softmax_and_gradient_z_sparse_viterbi(): n_time = 3 n_batch = 2 n_dim = 5 alignment = np.array([[0, 1], [1, 2], [2, 3]], dtype="int32") mask = np.array([[1, 1], [1, 1], [1, 1]], dtype=f32) y_t, y_i, y_w, y_mask = NativeOp.onehot_to_sparse(alignment, mask) np.random.seed(123) z = np.random.randn(n_time, n_batch, n_dim).astype(f32) z_mask = np.array([[1, 1], [1, 1], [1, 1]], dtype=f32) nll1, _pcx1 = T.nnet.crossentropy_softmax_1hot( x=T.as_tensor_variable(z).reshape((n_time * n_batch, n_dim)), y_idx=T.as_tensor_variable(alignment).reshape((n_time * n_batch, ))) nll2, _gradz2 = NativeOp.crossentropy_softmax_and_gradient_z_sparse( z, z_mask, y_t, y_i, y_w, y_mask) nll1 = nll1.eval() nll2 = nll2.eval() print("nll1:\n%r" % nll1) print("nll2:\n%r" % nll2)