def AddEvaluation(self, task_context, batch_size, evaluation_max_steps=300, corpus_name=None, value=""): with tf.name_scope('evaluation'): n = self.evaluation n.update( self._AddBeamReader(task_context, batch_size, corpus_name, until_all_final=True, always_start_new_sentences=True, value=value)) self._BuildNetwork(list(n['features']), return_average=self._use_averaging) n.update( self._BuildSequence(batch_size, evaluation_max_steps, n['features'], n['state'], use_average=self._use_averaging)) n['eval_metrics'], n['documents'] = ( gen_parser_ops.beam_eval_output(n['state'])) return n
def AddEvaluation(self, task_context, batch_size, evaluation_max_steps=300, corpus_name=None): with tf.name_scope('evaluation'): n = self.evaluation n.update(self._AddBeamReader(task_context, batch_size, corpus_name, until_all_final=True, always_start_new_sentences=True)) self._BuildNetwork( list(n['features']), return_average=self._use_averaging) n.update(self._BuildSequence(batch_size, evaluation_max_steps, n[ 'features'], n['state'], use_average=self._use_averaging)) n['eval_metrics'], n['documents'] = ( gen_parser_ops.beam_eval_output(n['state'])) return n