示例#1
0
 def _single_run(self, graph):
     # called for optional debug logging
     GraphProcess._single_run(self, graph)
     seg_counts = self.counts.next()
     
     for u in graph.source_nodes_iter():
         self._add_normalized_scores(
             *self._score_translations(graph, u, seg_counts))
示例#2
0
 def _single_run(self, graph):
     # TODO: handle hypernodes  
     GraphProcess._single_run(self, graph)
     
     # skip normalization, because base scores are already normalized
     for u in graph.source_nodes_iter():
         # first pass to figure out which score attr is present
         base_score_attr = self._find_base_score_attr(graph, u)  
         # second pass to add the best scores
         for u, v, data in graph.trans_edges_iter(u):
             data[self.score_attr] = data.get(base_score_attr, 0.0)
示例#3
0
 def _single_run(self, graph):
     # called for optional debug logging
     GraphProcess._single_run(self, graph)
     seg_counts = self.counts.next()
     
     # skip if source lempos is not in ambiguity map or is filtered
     for u in graph.source_nodes_iter():
         try:
             source_lempos = u" ".join(graph.node[u]["lex_lempos"])
         except:
             # source lempos was not in lexicon (e.g. punctuation)
             continue
         
         if source_lempos not in self.ambig_map:
             continue
         
         if self.filter(graph, u):
             continue
         
         self._add_normalized_scores(
             *self._score_translations(graph, u, seg_counts))