def predict_line(text): dataset = tf.keras.preprocessing.sequence.pad_sequences([[word_to_id.get(char, 0) for char in text]], padding='post') logits, text_lens = model.predict(dataset) paths = [] for logit, text_len in zip(logits, text_lens): viterbi_path, _ = tf_ad.text.viterbi_decode(logit[:text_len], model.transition_params) paths.append(viterbi_path) entities_result = data_utils.format_result(list(text), [id_to_tag[id] for id in paths[0]]) return entities_result
config.n_vocab = len(word_to_id) time_dif = data_utils.get_time_dif(start_time) print("Time usage:", time_dif) # train model = x.MyModel(config) optimizer = tf.keras.optimizers.Adam(config.learning_rate) ckpt = tf.train.Checkpoint(optimizer=optimizer, model=model) ckpt.restore(tf.train.latest_checkpoint(config.save_path)) while True: text = input("input:") dataset = tf.keras.preprocessing.sequence.pad_sequences( [[word_to_id.get(char, 0) for char in text]], padding='post') print(dataset) logits, text_lens = model.predict(dataset) paths = [] for logit, text_len in zip(logits, text_lens): viterbi_path, _ = tf_ad.text.viterbi_decode( logit[:text_len], model.transition_params) paths.append(viterbi_path) print(paths[0]) print([id_to_tag[id] for id in paths[0]]) entities_result = data_utils.format_result( list(text), [id_to_tag[id] for id in paths[0]]) print(json.dumps(entities_result, indent=4, ensure_ascii=False))