Exemple #1
0
    def demo_backend(self, model, config, run_event):
        global query, response

        with open(config.word_dictionary, "r") as fh:
            word_dictionary = json.load(fh)
        with open(config.char_dictionary, "r") as fh:
            char_dictionary = json.load(fh)

        sess_config = tf.ConfigProto(allow_soft_placement=True)
        sess_config.gpu_options.allow_growth = True

        with model.graph.as_default():

            with tf.Session(config=sess_config) as sess:
                sess.run(tf.global_variables_initializer())
                saver = tf.train.Saver()
                saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
                if config.decay < 1.0:
                    sess.run(model.assign_vars)
                while run_event.is_set():
                    sleep(0.1)
                    if query:
                        context = word_tokenize(query[0].replace("''", '" ').replace("``", '" '))
                        c,ch,q,qh,co,qo = convert_to_features(config, query, word_dictionary, char_dictionary, Batcher(config.elmo_vocab_file, config.cont_char_limit))
                        fd = {'context:0': [c],
                              'question:0': [q],
                              'context_char:0': [ch],
                              'question_char:0': [qh],
                              'context_cont:0': [co],
                              'question_cont:0': [qo]}
                        yp1,yp2 = sess.run([model.yp1, model.yp2], feed_dict = fd)
                        yp2[0] += 1
                        response = " ".join(context[yp1[0]:yp2[0]])
                        query = []
Exemple #2
0
    def demo_backend(self, model, config, run_event):
        global query, response

        with open(config.word_dictionary, "r") as fh:
            word_dictionary = json.load(fh)
        with open(config.char_dictionary, "r") as fh:
            char_dictionary = json.load(fh)

        sess_config = tf.ConfigProto(allow_soft_placement=True)
        sess_config.gpu_options.allow_growth = True

        with model.graph.as_default():

            with tf.Session(config=sess_config) as sess:
                sess.run(tf.global_variables_initializer())
                saver = tf.train.Saver()
                saver.restore(sess, tf.train.latest_checkpoint(config.save_dir))
                if config.decay < 1.0:
                    sess.run(model.assign_vars)
                while run_event.is_set():
                    sleep(0.1)
                    if query:
                        context = word_tokenize(query[0].replace("''", '" ').replace("``", '" '))
                        c,ch,q,qh = convert_to_features(config, query, word_dictionary, char_dictionary)
                        fd = {'context:0': [c],
                              'question:0': [q],
                              'context_char:0': [ch],
                              'question_char:0': [qh]}
                        yp1,yp2 = sess.run([model.yp1, model.yp2], feed_dict = fd)
                        yp2[0] += 1
                        response = " ".join(context[yp1[0]:yp2[0]])
                        query = []
 def answer():
     context = bottle.request.forms.get('context')
     c, q, context_num, batch_size = convert_to_features(config, context, token2idx)
     fd = {'context:0': c,
           'query:0': q,
           'batch_size:0': batch_size}
     pred = sess.run([model.pred], feed_dict=fd)
     pred = np.sum(np.array(pred).reshape([context_num, 10]), 0)
     pred = ''.join(pred.astype(bool).astype(int).astype(str))
     return pred
Exemple #4
0
    def demo_backend(self, model, config, run_event):
        global query, response

        with open(config.word_dictionary, "r") as fh:
            word_dictionary = json.load(fh)
        with open(config.char_dictionary, "r") as fh:
            char_dictionary = json.load(fh)

        sess_config = tf.ConfigProto(allow_soft_placement=True)
        sess_config.gpu_options.allow_growth = True

        with model.graph.as_default():

            with tf.Session(config=sess_config) as sess:
                sess.run(tf.global_variables_initializer())
                saver = tf.train.Saver()
                saver.restore(sess,
                              tf.train.latest_checkpoint(config.save_dir))
                if config.decay < 1.0:
                    sess.run(model.assign_vars)
                while run_event.is_set():
                    sleep(0.1)
                    if query:
                        context = word_tokenize(query[0].replace(
                            "''", '" ').replace("``", '" '))
                        c, ch, q, qh = convert_to_features(
                            config, query, word_dictionary, char_dictionary)
                        fd = {
                            'context:0': [c],
                            'question:0': [q],
                            'context_char:0': [ch],
                            'question_char:0': [qh]
                        }
                        yp1, yp2, logits1, logits2 = sess.run([
                            model.yp1, model.yp2, model.logits1, model.logits2
                        ],
                                                              feed_dict=fd)
                        log_prob1 = logits1[0][yp1[0]]
                        log_prob2 = logits2[0][yp2[0]]
                        score1 = 1 / (1 + math.exp(-log_prob1))
                        score2 = 1 / (1 + math.exp(-log_prob2))
                        print("SV Confidence: ", score1, "EV Confidence: ",
                              score2)
                        yp2[0] += 1
                        response = " ".join(context[yp1[0]:yp2[0]])
                        query = []
 def get_pred(self, context):
     keras.backend.clear_session()  # 清除原本数据
     config = flags.FLAGS
     cs = convert_to_features(config, context, self.token2idx)
     pred = np.array
     for i, c in enumerate(cs):
         fd = {
             'context:0': np.expand_dims(c, 0),
             'pre_query:0': self.q_rnn,
             'pre_query_mask:0': self.q_mask,
             'batch_size:0': 1
         }
         p = self.sess.run([self.model_yunnan.pred_probability],
                           feed_dict=fd)
         if i == 0:
             pred = p
         else:
             pred = np.concatenate((pred, p), axis=0)
     return pred