def handle_cop_with_np(self, relations, index, context, engine, info): """Handle copular verbs with NP complements.""" subjs = self.process_subj(relations, index, context, engine, info) cop_index = Relation.get_children_with_dep('cop', relations, index)[0] cop = engine.analyze(relations, cop_index, context + [index]) auxs = self.process_auxs(relations, index, context, engine, info) verb = ' '.join([word for word in auxs + [cop] if word is not None]) self.process_ignorables(relations, index, context, engine, info) this = NounPhraseRuleset.extract(self, relations, index, context, engine, info) # TODO: handle cc/conj and preconj. complms = this['return_list'] prop_ids = [] for subj in subjs['return_list']: for compl in complms: # engine.emit((verb, subj, relations[index].word)) prop_id = engine.emit((verb, subj, compl), 'P') prop_ids.append(prop_id) self.subjs = subjs self.auxs = auxs return {'return_value': None, 'prop_ids': prop_ids, 'this': this}
def extract(self, relations, index, context, engine, info={}): """extract(relations, index, context, engine, info) -> list(str) OUTDATED This ruleset returns a list of strings, corresponding to the return_list value of NounPhraseRuleset. Examples: * Mary and John conj(Mary, John) -> return ['John'] """ if info['class'] == 'NP': logger.debug('ConjRuleset is processing node as NP') # TODO: Maybe just return the first element in the list. d = NounPhraseRuleset.extract(self, relations, index, context, engine) if d['ids_for_preconj'] == []: return d['return_list'] elif info['class'] == 'VP': logger.debug('ConjRuleset is processing node as VP') d = VerbPhraseRuleset.extract(self, relations, index, context, engine, info) return d
def extract(self, relations, index, context, engine, info=None): """extract(relations, index, context, engine, info) -> list(str) OUTDATED This ruleset returns a list of strings, corresponding to the return_list value of NounPhraseRuleset. Examples: * Mary and John conj(Mary, John) -> return ['John'] """ if not info: info = {} if info['class'] == 'NP': logger.debug('ConjRuleset is processing node as NP') # TODO: Maybe just return the first element in the list. d = NounPhraseRuleset.extract(self, relations, index, context, engine) if d['ids_for_preconj'] == []: return d['return_list'] elif info['class'] == 'VP': logger.debug('ConjRuleset is processing node as VP') d = VerbPhraseRuleset.extract(self, relations, index, context, engine, info) return d
def extract(self, relations, index, context, engine, info={}): if relations[index].tag in ('NN', 'NNS', 'NNP', 'NNPS'): this = NounPhraseRuleset.extract(self, relations, index, context, engine, info) for noun in this['return_list']: engine.emit((noun, ), 'WHAT') elif relations[index].tag == 'JJ': this = AdjectivalPhraseRuleset.extract(self, relations, index, context, engine, info) for adj in this: engine.emit((adj, ), 'WHAT') else: # In case something weird happens, we just emit the word. engine.emit((relations[index].word, ), 'WHAT')
def extract(self, relations, index, context, engine, info={}): if relations[index].tag in ('NN', 'NNS', 'NNP', 'NNPS'): this = NounPhraseRuleset.extract(self, relations, index, context, engine, info) for noun in this['return_list']: engine.emit((noun,), 'WHAT') elif relations[index].tag == 'JJ': this = AdjectivalPhraseRuleset.extract(self, relations, index, context, engine, info) for adj in this: engine.emit((adj,), 'WHAT') else: # In case something weird happens, we just emit the word. engine.emit((relations[index].word,), 'WHAT')
def handle_cop_with_np(self, relations, index, context, engine, info): """Handle copular verbs with NP complements.""" subjs = self.process_subj(relations, index, context, engine, info) try: cop_index = Relation.get_children_with_dep('cop', relations, index)[0] cop = engine.analyze(relations, cop_index, context + [index]) except IndexError: return {'return_value': None, 'prop_ids': []} # print(relations[index]) # raise auxs = self.process_auxs(relations, index, context, engine, info) verb = ' '.join([word for word in auxs + [cop] if word is not None]) self.process_ignorables(relations, index, context, engine, info) this = NounPhraseRuleset.extract(self, relations, index, context, engine, info) if not this: return {'return_value': None, 'prop_ids': []} # TODO: handle cc/conj and preconj. complms = this['return_list'] prop_ids = [] for subj in subjs['return_list']: for compl in complms: # engine.emit((verb, subj, relations[index].word)) prop_id = engine.emit((verb, subj, compl), 'P') prop_ids.append(prop_id) self.subjs = subjs self.auxs = auxs return {'return_value': None, 'prop_ids': prop_ids, 'this': this}