def demo(): word2int, int2word = read_dict() tag2label = {'0': 0, '1': 1} int2tag = {l: t for t, l in tag2label.items()} model_path = os.path.join(MODEL_PATH, FLAGS.DEMO, 'checkpoints') ckpt_file = tf.train.latest_checkpoint(model_path) logger.info("load model from {}".format(ckpt_file)) textCNN = TextCNN( model_path=ckpt_file, vocab=word2int, tag2label=tag2label, eopches=FLAGS.epoches, ) saver = tf.compat.v1.train.Saver() with tf.compat.v1.Session(config=cfg()) as sess: print('============= demo =============') saver.restore(sess, ckpt_file) while True: print('Please input your sentence:') inp = input() if inp == '' or inp.isspace(): print('See you next time!') break else: inps = [inp.strip()] pred = textCNN.predict(sess, inps)[0] probs = textCNN.predict_prob(sess, inps)[0] print("\n{}".format(inps)) for idx, prob in enumerate(probs): print("\t{} -> {}".format(int2tag[idx], prob)) print("\tTag: {}".format(int2tag[pred]))
parser.add_argument("--decay_rate", type=float, default=0.9, help="the decay rate for lr") parser.add_argument("--sequence_length", type=int, default=50, help="sequence length") parser.add_argument("--vocab_size", type=int, default=150346, help="the num of vocabs") parser.add_argument("--embed_size", type=int, default=200, help="embedding size") parser.add_argument("--is_training", type=bool, default=True, help='training or not') parser.add_argument("--keep_prob", type=float, default=0.9, help='keep prob') parser.add_argument("--clip_gradients", type=float, default=5.0, help='clip gradients') parser.add_argument("--filter_sizes", type=list, default=[2, 3, 4], help='filter size') parser.add_argument("--num_filters", type=int, default=128, help='num filters') parser.add_argument('--mode', type=str, default='train', help='train|test|demo') parser.add_argument('--DEMO', type=str, default='tf_rnn', help='model for test and demo') return parser.parse_known_args() FLAGS, unparsed = args() word2int, int2word = read_dict() tag2label = {'0': 0, '1': 1} int2tag = {l: t for t, l in tag2label.items()} def cfg(): config = tf.compat.v1.ConfigProto() config.gpu_options.allow_growth = True config.gpu_options.per_process_gpu_memory_fraction = 0.2 # need ~700MB GPU memory return config def train(): iter = -1 iter_size = 20000 train, dev = read_corpus(random_state=1234, separator='\t', iter=iter, iter_size=iter_size)