Ejemplo n.º 1
0
    def run(self, key, ctx: Context):
        from sagas.nlu.ruleset_procs import list_words, cached_chunks, get_main_domains
        from sagas.conf.conf import cf

        logger.debug(f".. check against {key}")
        if key not in ctx.indexes:
            return False

        # lemma = ctx.lemmas[key]
        sents = ctx.sents
        lang = ctx.lang
        chunks = cached_chunks(sents, lang, cf.engine(lang))
        doc = chunks['doc']
        ents = get_entities(sents)

        prt = ctx.indexes[key]
        indexes = get_children_index(doc, prt)
        idx_ent = {
            el['index']: el['entity']
            for el in get_entity_mapping(sents, doc, ents)
        }
        children_ents = [(idx, idx_ent[idx] if idx in idx_ent else '_')
                         for idx in indexes]

        result = self.test_ent in {e[1] for e in children_ents}
        if result:
            ctx.add_result(self.name(), 'default', key, idx_ent)
        return result
Ejemplo n.º 2
0
def vis_domains(sents, lang, domain=None, engine=None, all_subsents=False):
    """
    >>> from sagas.kit.analysis_kit import vis_domains
    >>> sents='What do you think about the war?'
    >>> lang='en'
    >>> domain='subj_domains' # 'verb_domains', 'aux_domains'
    >>> vis_domains(sents, lang, domain)

    :param sents:
    :param lang:
    :param domain:
    :return:
    """
    from sagas.nlu.ruleset_procs import cached_chunks, get_main_domains
    from sagas.conf.conf import cf

    engine = cf.engine(lang) if engine is None else engine
    if domain is None:
        domain, domains = get_main_domains(sents, lang, engine)
    else:
        chunks = cached_chunks(sents, lang, engine)
        domains = chunks[domain]

    if len(domains) == 0:
        return None

    if not all_subsents:
        el = domains[0]
        return vis_domains_data(domain, el)
    else:
        return [vis_domains_data(domain, el) for el in domains]
Ejemplo n.º 3
0
def vis_doc(sents, lang):
    from sagas.nlu.ruleset_procs import cached_chunks
    from sagas.nlu.uni_remote_viz import list_contrast, display_doc_deps
    from sagas.conf.conf import cf

    chunks = cached_chunks(sents, lang, cf.engine(lang))
    return display_doc_deps(chunks['doc'], None)
Ejemplo n.º 4
0
def check_clause_sub(sents:Text, lang:Text, domain:Text, cla:Text,
          rel:Text, cats:Union[Text, Set, List]):
    """
    >>> from sagas.nlu.inspector_clauses import check_clause_sub
    >>> check_clause_sub(sents, 'pt', 'verb_domains', 'obl', 'cop', {'be'})
    :param sents:
    :param lang:
    :param domain:
    :param cla:
    :param rel:
    :param cats:
    :return:
    """
    from sagas.nlu.uni_chunks import get_chunk
    from sagas.nlu.ruleset_procs import cached_chunks

    # cla = 'obl', rel = 'cop', cat='be'
    chunks = cached_chunks(sents, lang, cf.engine(lang))
    result = get_chunk(chunks, domain, cla,
                       lambda w: {'rel': w.dependency_relation,
                                  'pos': w.upos.lower(),
                                  'word': f"{w.text}/{w.lemma}"})

    word = next((w['word'] for w in result if w['rel'] == rel), None)
    if word:
        if isinstance(cats, str):
            return check_chain(cats, word, '*', lang)
        else:
            return any([check_chain(cat, word, '*', lang) for cat in cats])
    return False
Ejemplo n.º 5
0
    def run(self, key, ctx: Context):
        from jsonpath_ng import jsonpath, parse
        from sagas.nlu.inspector_wordnet import predicate
        from sagas.nlu.ruleset_procs import cached_chunks

        lang = ctx.lang
        domain_name = f"{self.domains}_domains"  # like: 'verb_domains'
        parsers = [parse(normal_path(expr)) for expr in self.paths]
        results = []
        engine = cf.engine(lang) if self.engine is None else self.engine
        chunks = cached_chunks(ctx.sents, lang, engine)
        for chunk in chunks[domain_name]:
            json_data = chunk
            # for expr in exprs:
            for idx, parser in enumerate(parsers):
                # print([(match.value, str(match.full_path)) for match in parser.find(json_data)])
                word = '/'.join(
                    [match.value for match in parser.find(json_data)])
                pred_r = predicate(self.kind, word, lang, self.pos)
                # tc.emp('yellow' if not pred_r else 'green', f".. {word} is {self.kind}: {pred_r}")
                logger.debug(f".. {word} is {self.kind}: {pred_r}")
                results.append(pred_r)
                if pred_r:
                    ctx.add_result(self.name(),
                                   'default',
                                   f"{self.domains}:{self.paths[idx]}", {
                                       'category': self.kind,
                                       'pos': self.pos,
                                       **word_values(word, lang)
                                   },
                                   delivery_type='sentence')

        logger.debug(f"{results}")
        return any(results) if self.match_method == 'any' else all(results)
Ejemplo n.º 6
0
def test_class_matcher():
    from sagas.nlu.uni_chunks import get_chunk
    from pampy import match, _
    from dataclasses import dataclass

    @dataclass
    class WordData:
        index: int
        rel: str
        pos: str
        word: str

        # She denied being my mother

    sents = 'Ela negou ser minha mãe.'
    lang = 'pt'
    domain = 'verb_domains'
    chunks = cached_chunks(sents, lang, cf.engine(lang))

    cla = 'obl'
    ana = get_chunk(
        chunks, domain, cla, lambda w: WordData(index=w.index,
                                                rel=w.dependency_relation,
                                                pos=w.upos.lower(),
                                                word=f"{w.text}/{w.lemma}"))
    t_rs = []
    for word_data in ana:
        r = match(word_data, WordData(_, _, 'aux',
                                      _), lambda *arg: f"aux: {arg[2]}",
                  WordData(_, 'obl', 'noun', _), lambda *arg: arg, _, None)
        t_rs.append(r)
    assert t_rs == ['aux: ser/ser', None, (5, 'mãe/mãe')]
Ejemplo n.º 7
0
    def get_domains(self, ctx:Context):
        from sagas.nlu.ruleset_procs import cached_chunks
        from sagas.conf.conf import cf

        # dn = lambda domain: f'{domain}_domains' if domain != 'predicts' else domain
        chunks = cached_chunks(ctx.sents, ctx.lang, cf.engine(ctx.lang))
        domains = chunks[ctx.domain_type]
        return domains
Ejemplo n.º 8
0
 def root_tree(self):
     from sagas.nlu.nlu_tools import vis_tree
     from sagas.nlu.ruleset_procs import cached_chunks
     chunks = cached_chunks(self.meta.sents,
                            source=self.meta.lang,
                            engine=self.meta.engine)
     tc.emp('cyan', f"✁ root tree {self.meta.engine} {'-' * 25}")
     ds = chunks['root_domains'][0]
     vis_tree(ds, self.meta.lang, trans=cf.is_enabled('trans_tree'))
Ejemplo n.º 9
0
def is_noun_desc(ctx: Context, domain):
    sents, lang = ctx.sents, ctx.lang
    chunks = cached_chunks(sents, lang, cf.engine(lang))
    domains = chunks[domain]
    domain = domains[0]
    comps = [k for k, v in domain.items() if isinstance(v, list)]
    logger.debug(f'.. {comps}')
    return domain['upos']=='NOUN' and \
            all(c for c in comps if c.endswith('mod') or c in ('punct'))
Ejemplo n.º 10
0
 def run(self, key, ctx:Context):
     from sagas.nlu.ruleset_procs import list_words, cached_chunks, get_main_domains
     from sagas.conf.conf import cf
     chunks = cached_chunks(ctx.sents, ctx.lang, cf.engine(ctx.lang))
     index = next((x[1] for x in ctx.domains if x[0] == self.part), -1)
     if index!=-1:
         rs=self.collect_children(chunks, ctx.lang, index+1)
         if rs:
             ctx.add_result(self.name(), 'default', self.part, rs)
     return True
Ejemplo n.º 11
0
    def analyse_domains(self, sents, lang, engine=None, domain=None):
        from sagas.nlu.ruleset_procs import cached_chunks, get_main_domains
        from sagas.conf.conf import cf

        engine = cf.engine(lang) if engine is None else engine
        if domain is None:
            domain, domains = get_main_domains(sents, lang, engine)
        else:
            chunks = cached_chunks(sents, lang, engine)
            domains = chunks[domain]
        return domains
Ejemplo n.º 12
0
def get_feats_map(sents, lang, domain, path):
    domain_name = f'{domain}_domains' if domain != 'predicts' else domain
    from sagas.nlu.ruleset_procs import cached_chunks
    chunks = cached_chunks(sents, lang, cf.engine(lang))
    parser = parse(feats_for_path(path))
    results = []
    for chunk in chunks[domain_name]:
        vals = [match.value for match in parser.find(chunk)]
        if vals:
            results.extend([feats_map(val) for val in vals])
    return results
Ejemplo n.º 13
0
    def doc(self, sents, lang='en', engine='stanza'):
        """
        $ python -m sagas.nlu.anal doc 'Nosotros estamos en la escuela.' es stanza
        $ python -m sagas.nlu.anal doc '우리는 사람들을 이해하고 싶어요.' ko

        :param sents:
        :param lang:
        :param engine:
        :return:
        """
        chunks = cached_chunks(sents, source=lang, engine=engine)
        return chunks['doc'].as_json
Ejemplo n.º 14
0
def get_source(sents, lang, domain_type=None)-> Observable:
    from sagas.nlu.ruleset_procs import cached_chunks, get_main_domains
    from sagas.conf.conf import cf
    import rx

    engine=cf.engine(lang)
    if domain_type is None:
        domain_type, domains=get_main_domains(sents, lang, engine)
    else:
        chunks = cached_chunks(sents, lang, engine)
        domains = chunks[domain_type]
    table_rs = []
    for ds in domains:
        flat_table(ds, '', table_rs)
    return rx.of(*table_rs)
Ejemplo n.º 15
0
def ex_chunk(key: Text, cnt: Text, comp: Text, ctx: cla_meta_intf, clo):
    from sagas.nlu.uni_chunks import get_chunk
    from sagas.nlu.ruleset_procs import list_words, cached_chunks
    from sagas.conf.conf import cf
    # get_chunk(f'verb_domains', 'xcomp/obj', lambda w: w.upos)
    chunks = cached_chunks(ctx.sents, ctx.lang, cf.engine(ctx.lang))
    domain, path = key.split(':')
    result = get_chunk(chunks,
                       f'{domain}_domains' if domain != 'predicts' else domain,
                       path,
                       clo=clo)
    logger.debug(f"extract chunk: {domain}, {path}, {result}")
    if len(result) > 0:
        ctx.add_result(extractor, comp, key, result)
        return True
    return False
Ejemplo n.º 16
0
    def run(self, key: Text, ctx: Context) -> bool:
        from sagas.nlu.predicts import predicate
        from sagas.nlu.operators import ud

        final_rs = []

        sents, lang = ctx.sents, ctx.lang
        chunks = cached_chunks(sents, lang, cf.engine(lang))
        domains = chunks[self.domain]
        for el in domains:
            # logger.debug(f"`{el['lemma']}` >> *{el['dc']['lemma']}*")
            # r1 = predicate(el, ud.__text('will') >> [ud.nsubj('what'), ud.dc_cat('weather')], lang)
            rs: List[Any] = predicate(el, self.checker, lang)
            # r2=predicate(el, ud.__cat('be') >> [ud.nsubj('what'), ud.dc_cat('animal/object')], lang)
            result = all([r[0] for r in rs])
            final_rs.append(result)
            logger.debug(f'{[r[0] for r in rs]}, {result}')
        return any(final_rs)
Ejemplo n.º 17
0
 def has_pos_in_part(part: Text, pos: Union[list, str]):
     from sagas.nlu.uni_chunks import get_chunk
     from sagas.nlu.ruleset_procs import list_words, cached_chunks
     from sagas.conf.conf import cf
     chunks = cached_chunks(ctx.sents, ctx.lang, cf.engine(ctx.lang))
     domain, path = part.split(':')
     result = get_chunk(
         chunks,
         f'{domain}_domains' if domain != 'predicts' else domain, path,
         lambda w: (w.upos.lower(), w.text))
     if isinstance(pos, str):
         pos = [pos]
     succ = False
     for el in result:
         if el[0] in pos:
             ctx.add_result(self.name(), f'has_pos_{"_or_".join(pos)}',
                            part, el[1])
             succ = True
     return succ
Ejemplo n.º 18
0
    def chunks(self, sents, lang, domain, path):
        """
        $ python -m sagas.nlu.extractor_cli chunks 'I like to eat sweet corn.' en verb 'xcomp/obj'
        $ python -m sagas.nlu.extractor_cli chunks 'A casa tem dezenove quartos.' pt verb 'obj'
            ☇ [('dezenove', 'num'), ('quartos', 'noun')]

        :param sents:
        :param lang:
        :param domain:
        :param path:
        :return:
        """
        from sagas.nlu.uni_chunks import get_chunk
        from sagas.nlu.ruleset_procs import list_words, cached_chunks
        from sagas.conf.conf import cf
        # get_chunk(f'verb_domains', 'xcomp/obj', lambda w: w.upos)
        # get_chunk(f'domain_domains', path, lambda w: w.upos)
        chunks = cached_chunks(sents, lang, cf.engine(lang))
        result = get_chunk(
            chunks, f'{domain}_domains' if domain != 'predicts' else domain,
            path, lambda w: (w.text, w.upos.lower()))
        print(result)
Ejemplo n.º 19
0
def test_chunk_matcher():
    from sagas.nlu.uni_chunks import get_chunk
    from pampy import match, _

    # She denied being my mother
    sents = 'Ela negou ser minha mãe.'
    lang = 'pt'
    domain = 'verb_domains'
    chunks = cached_chunks(sents, lang, cf.engine(lang))

    cla = 'obl'
    raw = get_chunk(
        chunks, domain, cla, lambda w: {
            'rel': w.dependency_relation,
            'pos': w.upos.lower(),
            'word': f"{w.text}/{w.lemma}"
        })
    rs = {e['rel']: e for e in raw}
    r = match(rs, {
        'cop': {
            'word': _
        },
        'obl': {
            'pos': 'noun',
            'word': _
        }
    }, lambda *arg: arg, _, "anything else")
    assert r == ('ser/ser', 'mãe/mãe')

    r = match(rs, {
        _: {
            'pos': 'aux'
        },
        'obl': {
            'pos': 'noun',
            'word': _
        }
    }, lambda *arg: arg, _, "anything else")
    assert r == ('cop', 'mãe/mãe')
Ejemplo n.º 20
0
def domains_as_tree(sents, lang, engine='stanza', domain='root_domains'):
    chunks = cached_chunks(sents, source=lang, engine=engine)
    root = chunks[domain]
    ds = treeing(root[0])
    f = importer.import_(ds)
    return f
Ejemplo n.º 21
0
def request_intents(sents, lang, domain):
    chunks = cached_chunks(sents, lang, cf.engine(lang))
    domains = chunks[domain]
    return get_intents(domains, lang)