Exemplo n.º 1
0
 def map_fn(self, splited_logits):
     _d = []
     for value in splited_logits:
         _d.append(
             ctc_greedy_decoder(probs_seq=value,
                                vocabulary=self.vocab_array))
     return _d
Exemplo n.º 2
0
    def _perform_greedy(
        self,
        probs: np.ndarray,
    ):
        from ctc_decoders import ctc_greedy_decoder

        decoded = ctc_greedy_decoder(
            probs, vocabulary=self.text_featurizer.non_blank_tokens)
        return tf.convert_to_tensor(decoded, dtype=tf.string)
Exemplo n.º 3
0
 def perform_greedy(self, probs: np.ndarray):
     decoded = ctc_greedy_decoder(
         probs, vocabulary=self.text_featurizer.vocab_array)
     return tf.convert_to_tensor(decoded, dtype=tf.string)
Exemplo n.º 4
0
        0.04139363,
    ],
    [
        0.15882358,
        0.1235788,
        0.23376776,
        0.20510435,
        0.00279306,
        0.05294827,
        0.22298418,
    ],
]
greedy_result = ["ac'bdc", "b'da"]
beam_search_result = ['acdc', "b'a"]

ctc_greedy_decoder(np.array(probs_seq1), vocab_list) == greedy_result[0]

ctc_greedy_decoder(np.array(probs_seq2), vocab_list) == greedy_result[1]

ctc_beam_search_decoder(
    probs_seq=np.array(probs_seq1),
    beam_size=beam_size,
    vocabulary=vocab_list,
)

ctc_beam_search_decoder(
    probs_seq=np.array(probs_seq2),
    beam_size=beam_size,
    vocabulary=vocab_list,
)