Ejemplo n.º 1
0
def dump_dep_rstdt(corpus_dir, out_dir, nary_enc):
    """Convert and dump the RST-DT corpus as dependency trees."""
    # convert and dump RST trees from train
    dir_train = os.path.join(corpus_dir, TRAIN_FOLDER)
    if not os.path.isdir(dir_train):
        raise ValueError('No such folder: {}'.format(dir_train))
    reader_train = Reader(dir_train)
    trees_train = reader_train.slurp()
    dtrees_train = {doc_name: RstDepTree.from_rst_tree(rst_tree,
                                                       nary_enc=nary_enc)
                    for doc_name, rst_tree in trees_train.items()}
    dump_disdep_files(dtrees_train.values(),
                      os.path.join(out_dir, os.path.basename(dir_train)))

    # convert and dump RST trees from test
    dir_test = os.path.join(corpus_dir, TEST_FOLDER)
    if not os.path.isdir(dir_test):
        raise ValueError('No such folder: {}'.format(dir_test))
    reader_test = Reader(dir_test)
    trees_test = reader_test.slurp()
    dtrees_test = {doc_name: RstDepTree.from_rst_tree(rst_tree,
                                                      nary_enc=nary_enc)
                   for doc_name, rst_tree in trees_test.items()}
    dump_disdep_files(dtrees_test.values(),
                      os.path.join(out_dir, os.path.basename(dir_test)))
Ejemplo n.º 2
0
def read_deps(corpus,
              section='all',
              nary_enc='chain',
              rew_pseudo_rels=False,
              mrg_same_units=False):
    """Collect dependencies from the corpus.

    Parameters
    ----------
    corpus : dict from str to dict from FileId to RSTTree
        Corpus of RST c-trees indexed by {'train', 'test'} then FileId.
    section : str, one of {'train', 'test', 'all'}
        Section of interest in the RST-DT.
    nary_enc : str, one of {'tree', 'chain'}
        Encoding of n-ary relations used in the c-to-d conversion.
    rew_pseudo_rels : boolean, defaults to False
        If True, rewrite pseudo relations ; see
        `educe.rst_dt.pseudo_relations`.
    mrg_same_units : boolean, defaults to False
        If True, merge fragmented EDUs ; see
        `educe.rst_dt.pseudo_relations`.

    Returns
    -------
    edu_df : pandas.DataFrame
        Table of EDUs read from the corpus.
    dep_df : pandas.DataFrame
        Table of dependencies read from the corpus.
    """
    # experimental: rewrite pseudo-relations
    if rew_pseudo_rels:
        for sec_name, sec_corpus in corpus.items():
            corpus[sec_name] = {
                doc_id: rewrite_pseudo_rels(doc_id, rst_ctree)
                for doc_id, rst_ctree in sec_corpus.items()
            }
    if mrg_same_units:
        for sec_name, sec_corpus in corpus.items():
            corpus[sec_name] = {
                doc_id: merge_same_units(doc_id, rst_ctree)
                for doc_id, rst_ctree in sec_corpus.items()
            }
    # convert to d-trees, collect dependencies
    edus = []
    deps = []
    for sec_name, sec_corpus in corpus.items():
        for doc_id, rst_ctree in sorted(sec_corpus.items()):
            doc_name = doc_id.doc
            doc_text = rst_ctree.text()
            # DIRTY infer (approximate) sentence and paragraph indices
            # from newlines in the text (\n and \n\n)
            sent_idx = 0
            para_idx = 0
            # end DIRTY
            rst_dtree = RstDepTree.from_rst_tree(rst_ctree, nary_enc='chain')
            for dep_idx, (edu, hd_idx, lbl, nuc, hd_order) in enumerate(
                    zip(rst_dtree.edus[1:], rst_dtree.heads[1:],
                        rst_dtree.labels[1:], rst_dtree.nucs[1:],
                        rst_dtree.ranks[1:]),
                    start=1):
                char_beg = edu.span.char_start
                char_end = edu.span.char_end
                edus.append((sec_name, doc_name, dep_idx, char_beg, char_end,
                             sent_idx, para_idx))
                deps.append((doc_name, dep_idx, hd_idx, lbl, nuc, hd_order))
                # DIRTY search for paragraph or sentence breaks in the
                # text of the EDU *plus the next three characters* (yerk)
                edu_txt_plus = doc_text[char_beg:char_end + 3]
                if '\n\n' in edu_txt_plus:
                    para_idx += 1
                    sent_idx += 1  # sometimes wrong ; to be fixed
                elif '\n' in edu_txt_plus:
                    sent_idx += 1
                # end DIRTY
    # turn into DataFrame
    edu_df = pd.DataFrame(edus,
                          columns=[
                              'section', 'doc_name', 'dep_idx', 'char_beg',
                              'char_end', 'sent_idx', 'para_idx'
                          ])
    dep_df = pd.DataFrame(
        deps,
        columns=['doc_name', 'dep_idx', 'hd_idx', 'rel', 'nuc', 'hd_order'])
    # additional columns
    # * attachment length in EDUs
    dep_df['len_edu'] = dep_df['dep_idx'] - dep_df['hd_idx']
    dep_df['len_edu_abs'] = abs(dep_df['len_edu'])
    # * attachment length, in sentences and paragraphs
    if False:
        # TODO rewrite in a pandas-ic manner ; my previous attempts have
        # failed but I think I got pretty close
        # NB: the current implementation is *extremely* slow: 155 seconds
        # on my laptop for the RST-DT, just for this (minor) computation
        len_sent = []
        len_para = []
        for _, row in dep_df[['doc_name', 'dep_idx', 'hd_idx']].iterrows():
            edu_dep = edu_df[(edu_df['doc_name'] == row['doc_name'])
                             & (edu_df['dep_idx'] == row['dep_idx'])]
            if row['hd_idx'] == 0:
                # {sent,para}_idx + 1 for dependents of the fake root
                lsent = edu_dep['sent_idx'].values[0] + 1
                lpara = edu_dep['para_idx'].values[0] + 1
            else:
                edu_hd = edu_df[(edu_df['doc_name'] == row['doc_name'])
                                & (edu_df['dep_idx'] == row['hd_idx'])]
                lsent = (edu_dep['sent_idx'].values[0] -
                         edu_hd['sent_idx'].values[0])
                lpara = (edu_dep['para_idx'].values[0] -
                         edu_hd['para_idx'].values[0])
            len_sent.append(lsent)
            len_para.append(lpara)
        dep_df['len_sent'] = pd.Series(len_sent)
        dep_df['len_sent_abs'] = abs(dep_df['len_sent'])
        dep_df['len_para'] = pd.Series(len_para)
        dep_df['len_para_abs'] = abs(dep_df['len_para'])
    # * class of relation (FIXME we need to handle interaction with
    #   rewrite_pseudo_rels)
    rel_conv = RstRelationConverter(RELMAP_112_18_FILE).convert_label
    dep_df['rel_class'] = dep_df['rel'].apply(rel_conv)
    # * boolean indicator for pseudo-relations ; NB: the 'Style-' prefix
    # can only apply if rew_pseudo_rels (otherwise no occurrence)
    dep_df['pseudo_rel'] = (
        (dep_df['rel'].str.startswith('Style')) |
        (dep_df['rel'].str.endswith('Same-Unit')) |
        (dep_df['rel'].str.endswith('TextualOrganization')))
    return edu_df, dep_df
Ejemplo n.º 3
0
                                      doc_name + '.out.xml')
            core_reader = PreprocessingSource()
            core_reader.read(core_fname, suffix='')
            corenlp_doc = read_corenlp_result(None, core_reader)
            core_toks = corenlp_doc.tokens
            core_toks_beg = [x.span.char_start for x in core_toks]
            core_toks_end = [x.span.char_end for x in core_toks]

            # PTB stuff
            # * create DocumentPlus (adapted from educe.rst_dt.corpus)
            rst_context = rst_tree.label().context
            ptb_docp = DocumentPlus(key, doc_name, rst_context)
            # * attach EDUs (yerk)
            # FIXME we currently get them via an RstDepTree created from
            # the original RSTTree, so as to get the left padding EDU
            rst_dtree = RstDepTree.from_rst_tree(rst_tree)
            ptb_docp.edus = rst_dtree.edus
            # * setup a PtbParser (re-yerk)
            ptb_parser = PtbParser(PTB_DIR)
            ptb_parser.tokenize(ptb_docp)
            # get PTB toks ; skip left padding token
            ptb_toks = ptb_docp.tkd_tokens[1:]
            ptb_toks_beg = ptb_docp.toks_beg[1:]
            ptb_toks_end = ptb_docp.toks_end[1:]

            # compare !
            core2ptb_beg = np.searchsorted(ptb_toks_beg, core_toks_beg,
                                           side='left')
            core2ptb_end = np.searchsorted(ptb_toks_end, core_toks_end,
                                           side='right') - 1
            # TODO maybe use np.diff?
Ejemplo n.º 4
0
                                      doc_name + '.out.xml')
            core_reader = PreprocessingSource()
            core_reader.read(core_fname, suffix='')
            corenlp_doc = read_corenlp_result(None, core_reader)
            core_toks = corenlp_doc.tokens
            core_toks_beg = [x.span.char_start for x in core_toks]
            core_toks_end = [x.span.char_end for x in core_toks]

            # PTB stuff
            # * create DocumentPlus (adapted from educe.rst_dt.corpus)
            rst_context = rst_tree.label().context
            ptb_docp = DocumentPlus(key, doc_name, rst_context)
            # * attach EDUs (yerk)
            # FIXME we currently get them via an RstDepTree created from
            # the original RSTTree, so as to get the left padding EDU
            rst_dtree = RstDepTree.from_rst_tree(rst_tree)
            ptb_docp.edus = rst_dtree.edus
            # * setup a PtbParser (re-yerk)
            ptb_parser = PtbParser(PTB_DIR)
            ptb_parser.tokenize(ptb_docp)
            # get PTB toks ; skip left padding token
            ptb_toks = ptb_docp.tkd_tokens[1:]
            ptb_toks_beg = ptb_docp.toks_beg[1:]
            ptb_toks_end = ptb_docp.toks_end[1:]

            # compare !
            core2ptb_beg = np.searchsorted(ptb_toks_beg,
                                           core_toks_beg,
                                           side='left')
            core2ptb_end = np.searchsorted(
                ptb_toks_end, core_toks_end, side='right') - 1
Ejemplo n.º 5
0
def read_deps(corpus, section='all', nary_enc='chain',
              rew_pseudo_rels=False, mrg_same_units=False):
    """Collect dependencies from the corpus.

    Parameters
    ----------
    corpus : dict from str to dict from FileId to RSTTree
        Corpus of RST c-trees indexed by {'train', 'test'} then FileId.
    section : str, one of {'train', 'test', 'all'}
        Section of interest in the RST-DT.
    nary_enc : str, one of {'tree', 'chain'}
        Encoding of n-ary relations used in the c-to-d conversion.
    rew_pseudo_rels : boolean, defaults to False
        If True, rewrite pseudo relations ; see
        `educe.rst_dt.pseudo_relations`.
    mrg_same_units : boolean, defaults to False
        If True, merge fragmented EDUs ; see
        `educe.rst_dt.pseudo_relations`.

    Returns
    -------
    edu_df : pandas.DataFrame
        Table of EDUs read from the corpus.
    dep_df : pandas.DataFrame
        Table of dependencies read from the corpus.
    """
    # experimental: rewrite pseudo-relations
    if rew_pseudo_rels:
        for sec_name, sec_corpus in corpus.items():
            corpus[sec_name] = {
                doc_id: rewrite_pseudo_rels(doc_id, rst_ctree)
                for doc_id, rst_ctree in sec_corpus.items()
            }
    if mrg_same_units:
        for sec_name, sec_corpus in corpus.items():
            corpus[sec_name] = {
                doc_id: merge_same_units(doc_id, rst_ctree)
                for doc_id, rst_ctree in sec_corpus.items()
            }
    # convert to d-trees, collect dependencies
    edus = []
    deps = []
    for sec_name, sec_corpus in corpus.items():
        for doc_id, rst_ctree in sorted(sec_corpus.items()):
            doc_name = doc_id.doc
            doc_text = rst_ctree.text()
            # DIRTY infer (approximate) sentence and paragraph indices
            # from newlines in the text (\n and \n\n)
            sent_idx = 0
            para_idx = 0
            # end DIRTY
            rst_dtree = RstDepTree.from_rst_tree(rst_ctree, nary_enc='chain')
            for dep_idx, (edu, hd_idx, lbl, nuc, hd_order) in enumerate(
                    zip(rst_dtree.edus[1:],
                        rst_dtree.heads[1:], rst_dtree.labels[1:],
                        rst_dtree.nucs[1:], rst_dtree.ranks[1:]),
                    start=1):
                char_beg = edu.span.char_start
                char_end = edu.span.char_end
                edus.append(
                    (sec_name, doc_name,
                     dep_idx, char_beg, char_end, sent_idx, para_idx)
                )
                deps.append(
                    (doc_name,
                     dep_idx, hd_idx, lbl, nuc, hd_order)
                )
                # DIRTY search for paragraph or sentence breaks in the
                # text of the EDU *plus the next three characters* (yerk)
                edu_txt_plus = doc_text[char_beg:char_end + 3]
                if '\n\n' in edu_txt_plus:
                    para_idx += 1
                    sent_idx += 1  # sometimes wrong ; to be fixed
                elif '\n' in edu_txt_plus:
                    sent_idx += 1
                # end DIRTY
    # turn into DataFrame
    edu_df = pd.DataFrame(edus, columns=[
        'section', 'doc_name', 'dep_idx', 'char_beg', 'char_end',
        'sent_idx', 'para_idx']
    )
    dep_df = pd.DataFrame(deps, columns=[
        'doc_name', 'dep_idx',
        'hd_idx', 'rel', 'nuc', 'hd_order']
    )
    # additional columns
    # * attachment length in EDUs
    dep_df['len_edu'] = dep_df['dep_idx'] - dep_df['hd_idx']
    dep_df['len_edu_abs'] = abs(dep_df['len_edu'])
    # * attachment length, in sentences and paragraphs
    if False:
        # TODO rewrite in a pandas-ic manner ; my previous attempts have
        # failed but I think I got pretty close
        # NB: the current implementation is *extremely* slow: 155 seconds
        # on my laptop for the RST-DT, just for this (minor) computation
        len_sent = []
        len_para = []
        for _, row in dep_df[['doc_name', 'dep_idx', 'hd_idx']].iterrows():
            edu_dep = edu_df[
                (edu_df['doc_name'] == row['doc_name']) &
                (edu_df['dep_idx'] == row['dep_idx'])
            ]
            if row['hd_idx'] == 0:
                # {sent,para}_idx + 1 for dependents of the fake root
                lsent = edu_dep['sent_idx'].values[0] + 1
                lpara = edu_dep['para_idx'].values[0] + 1
            else:
                edu_hd = edu_df[
                    (edu_df['doc_name'] == row['doc_name']) &
                    (edu_df['dep_idx'] == row['hd_idx'])
                ]
                lsent = (edu_dep['sent_idx'].values[0] -
                         edu_hd['sent_idx'].values[0])
                lpara = (edu_dep['para_idx'].values[0] -
                         edu_hd['para_idx'].values[0])
            len_sent.append(lsent)
            len_para.append(lpara)
        dep_df['len_sent'] = pd.Series(len_sent)
        dep_df['len_sent_abs'] = abs(dep_df['len_sent'])
        dep_df['len_para'] = pd.Series(len_para)
        dep_df['len_para_abs'] = abs(dep_df['len_para'])
    # * class of relation (FIXME we need to handle interaction with
    #   rewrite_pseudo_rels)
    rel_conv = RstRelationConverter(RELMAP_112_18_FILE).convert_label
    dep_df['rel_class'] = dep_df['rel'].apply(rel_conv)
    # * boolean indicator for pseudo-relations ; NB: the 'Style-' prefix
    # can only apply if rew_pseudo_rels (otherwise no occurrence)
    dep_df['pseudo_rel'] = (
        (dep_df['rel'].str.startswith('Style')) | 
        (dep_df['rel'].str.endswith('Same-Unit')) |
        (dep_df['rel'].str.endswith('TextualOrganization'))
    )
    return edu_df, dep_df