def infer(sess, model, decoder, source_sent, id_to_vocab, end_id, temp): """ Perform inferencing. In other words, generate a paraphrase for the source sentence. Args: sess : Tensorflow session. model : dict of tensor to value decoder : 0 for greedy, 1 for sampling source_sent : source sentence to generate a paraphrase for id_to_vocab : dict of vocabulary index to word end_id : the end token temp : the sampling temperature to use when `decoder` is 1 Returns: str : for the generated paraphrase """ seq_source_words, seq_source_ids = preprocess_batch([ source_sent ]) seq_source_len = [ len(seq_source) for seq_source in seq_source_ids ] feed_dict = { model['seq_source_ids']: seq_source_ids, model['seq_source_lengths']: seq_source_len, model['decoder_technique']: decoder, model['sampling_temperature']: temp } feeds = [ model['predictions'] #model['final_sequence_lengths'] ] predictions = sess.run(feeds, feed_dict)[0][0] print("PREDICTIONS ARE {}".format(predictions)) return translate(predictions, decoder, id_to_vocab, end_id)
def infer(sess, args, model, id_to_vocab, end_id): """Perform inference on a model. This is intended to be interactive. A user will run this from the command line to provide an input sentence and receive a paraphrase as output continuously within a loop. Args: sess: Tensorflow session args: ArgumentParser object configuration model: a dictionary containing the model tensors id_to_vocab: vocabulary index of id_to_vocab end_id: the end of sentence token """ from preprocess_data import preprocess_batch while 1: source_sent = input("Enter source sentence: ") seq_source_words, seq_source_ids = preprocess_batch([source_sent]) seq_source_len = [len(seq_source) for seq_source in seq_source_ids] if args.decoder == 'greedy': decoder = 0 elif args.decoder == 'sample': decoder = 1 feed_dict = { model['seq_source_ids']: seq_source_ids, model['seq_source_lengths']: seq_source_len, model['decoder_technique']: decoder, model['sampling_temperature']: args.sampling_temperature, } feeds = [model['predictions'], model['final_sequence_lengths']] predictions, final_sequence_lengths = sess.run(feeds, feed_dict) for sent_pred in predictions: if sent_pred[-1] == end_id: sent_pred = sent_pred[0:-1] print("Paraphrase : {}".format(' '.join( [id_to_vocab[pred] for pred in sent_pred])))
def infer(self, decoder, source_sent, id_to_vocab, temp, how_many): """ Perform inferencing. In other words, generate a paraphrase for the source sentence. Args: decoder : 0 for greedy, 1 for sampling source_sent : source sentence to generate a paraphrase for id_to_vocab : dict of vocabulary index to word end_id : the end token temp : the sampling temperature to use when `decoder` is 1 Returns: str : for the generated paraphrase """ seq_source_words, seq_source_ids = preprocess_batch([source_sent] * how_many) #print(seq_source_words) #print(seq_source_ids) seq_source_len = [len(seq_source) for seq_source in seq_source_ids] #print(seq_source_len) feed_dict = { self.model['seq_source_ids']: seq_source_ids, self.model['seq_source_lengths']: seq_source_len, self.model['decoder_technique']: decoder, self.model['sampling_temperature']: temp } feeds = [ self.model['predictions'] #model['final_sequence_lengths'] ] predictions = self.sess.run(feeds, feed_dict)[0] #print(predictions) return self.translate(predictions, decoder, id_to_vocab, seq_source_words[0])