예제 #1
0
파일: __main__.py 프로젝트: ajenhl/tacl
def excise(args, parser):
    logger = colorlog.getLogger('tacl')
    tokenizer = utils.get_tokenizer(args)
    corpus = tacl.Corpus(args.corpus, tokenizer)
    with open(args.ngrams) as fh:
        ngrams = [line.strip() for line in fh.readlines()]
    # It is no issue if the output directory already exists; it is a
    # reasonable use case to create an excised corpus from multiple
    # excise operations.
    try:
        os.mkdir(args.output)
    except FileExistsError:
        pass
    for work in args.works:
        # It is worth warning about writing in existing work
        # directories, however, since that might be unintended. Do not
        # prevent this, however, since it is a reasonable use case.
        try:
            os.mkdir(os.path.join(args.output, work))
        except FileExistsError:
            logger.warning(constants.EXCISE_OVERWRITE_WORK_WARNING, work)
        for witness in corpus.get_witnesses(work):
            path = os.path.join(args.output, witness.get_filename())
            content = witness.excise(ngrams, args.replacement)
            with open(path, 'w') as fh:
                fh.write(content)
예제 #2
0
def highlight_text(args, parser):
    """Outputs the result of highlighting a text."""
    tokenizer = utils.get_tokenizer(args)
    corpus = utils.get_corpus(args)
    output_dir = os.path.abspath(args.output)
    if os.path.exists(output_dir):
        parser.exit(status=3,
                    message='Output directory already exists, '
                    'aborting.\n')
    os.makedirs(output_dir, exist_ok=True)
    if args.ngrams:
        if args.label is None or len(args.label) != len(args.ngrams):
            parser.error('There must be as many labels as there are files '
                         'of n-grams')
        report = tacl.NgramHighlightReport(corpus, tokenizer)
        ngrams = []
        for ngram_file in args.ngrams:
            ngrams.append(utils.get_ngrams(ngram_file))
        minus_ngrams = []
        if args.minus_ngrams:
            minus_ngrams = utils.get_ngrams(args.minus_ngrams)
        report.generate(args.output, args.base_name, ngrams, args.label,
                        minus_ngrams)
    else:
        report = tacl.ResultsHighlightReport(corpus, tokenizer)
        report.generate(args.output, args.base_name, args.results)
예제 #3
0
def excise(args, parser):
    logger = colorlog.getLogger('tacl')
    tokenizer = utils.get_tokenizer(args)
    corpus = tacl.Corpus(args.corpus, tokenizer)
    with open(args.ngrams) as fh:
        ngrams = [line.strip() for line in fh.readlines()]
    # It is no issue if the output directory already exists; it is a
    # reasonable use case to create an excised corpus from multiple
    # excise operations.
    try:
        os.mkdir(args.output)
    except FileExistsError:
        pass
    for work in args.works:
        # It is worth warning about writing in existing work
        # directories, however, since that might be unintended. Do not
        # prevent this, however, since it is a reasonable use case.
        try:
            os.mkdir(os.path.join(args.output, work))
        except FileExistsError:
            logger.warning(constants.EXCISE_OVERWRITE_WORK_WARNING, work)
        for witness in corpus.get_witnesses(work):
            path = os.path.join(args.output, witness.get_filename())
            content = witness.excise(ngrams, args.replacement)
            with open(path, 'w') as fh:
                fh.write(content)
예제 #4
0
파일: __main__.py 프로젝트: ajenhl/tacl
def highlight_text(args, parser):
    """Outputs the result of highlighting a text."""
    tokenizer = utils.get_tokenizer(args)
    corpus = utils.get_corpus(args)
    output_dir = os.path.abspath(args.output)
    if os.path.exists(output_dir):
        parser.exit(status=3, message='Output directory already exists, '
                    'aborting.\n')
    os.makedirs(output_dir, exist_ok=True)
    if args.ngrams:
        if args.label is None or len(args.label) != len(args.ngrams):
            parser.error('There must be as many labels as there are files '
                         'of n-grams')
        report = tacl.NgramHighlightReport(corpus, tokenizer)
        ngrams = []
        for ngram_file in args.ngrams:
            ngrams.append(utils.get_ngrams(ngram_file))
        minus_ngrams = []
        if args.minus_ngrams:
            minus_ngrams = utils.get_ngrams(args.minus_ngrams)
        report.generate(args.output, args.base_name, ngrams, args.label,
                        minus_ngrams)
    else:
        report = tacl.ResultsHighlightReport(corpus, tokenizer)
        report.generate(args.output, args.base_name, args.results)
예제 #5
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION,
                                     epilog=EPILOG,
                                     formatter_class=ParagraphFormatter)
    utils.add_db_arguments(parser)
    utils.add_tokenizer_argument(parser)
    utils.add_query_arguments(parser)
    parser.add_argument('parent',
                        help=PARENT_LABEL_HELP,
                        metavar='PARENT_LABEL')
    parser.add_argument('child', help=CHILD_LABEL_HELP, metavar='CHILD_LABEL')
    parser.add_argument('unrelated',
                        help=UNRELATED_LABEL_HELP,
                        metavar='UNRELATED_LABEL')
    parser.add_argument('max_works',
                        help=MAX_WORKS_HELP,
                        metavar='MAXIMUM',
                        type=int)
    parser.add_argument('output_dir',
                        help=OUTPUT_DIR_HELP,
                        metavar='DIRECTORY')
    args = parser.parse_args()
    catalogue = utils.get_catalogue(args)
    data_store = utils.get_data_store(args)
    tokenizer = utils.get_tokenizer(args)
    try:
        test = taclextra.paternity.PaternityTest(data_store, catalogue,
                                                 tokenizer, args.parent,
                                                 args.child, args.unrelated,
                                                 args.max_works,
                                                 args.output_dir)
        test.process()
    except Exception as e:
        parser.error(e)
예제 #6
0
def lifetime_report(args, parser):
    """Generates a lifetime report."""
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    results = tacl.Results(args.results, tokenizer)
    output_dir = os.path.abspath(args.output)
    os.makedirs(output_dir, exist_ok=True)
    report = tacl.LifetimeReport()
    report.generate(output_dir, catalogue, results, args.label)
예제 #7
0
파일: __main__.py 프로젝트: ajenhl/tacl
def lifetime_report(args, parser):
    """Generates a lifetime report."""
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    results = tacl.Results(args.results, tokenizer)
    output_dir = os.path.abspath(args.output)
    os.makedirs(output_dir, exist_ok=True)
    report = tacl.LifetimeReport()
    report.generate(output_dir, catalogue, results, args.label)
예제 #8
0
파일: __main__.py 프로젝트: ajenhl/tacl
def align_results(args, parser):
    if args.results == '-':
        results = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8',
                                   newline='')
    else:
        results = open(args.results, 'r', encoding='utf-8', newline='')
    tokenizer = utils.get_tokenizer(args)
    corpus = tacl.Corpus(args.corpus, tokenizer)
    report = tacl.SequenceReport(corpus, tokenizer, results)
    report.generate(args.output, args.minimum)
예제 #9
0
파일: __main__.py 프로젝트: ajenhl/tacl
def normalise_corpus(args, parser):
    """Outputs a normalised version of a corpus."""
    corpus = utils.get_corpus(args)
    tokenizer = utils.get_tokenizer(args)
    output_dir = os.path.abspath(args.output)
    if os.path.exists(output_dir):
        parser.exit(status=3, message='Output directory already exists, '
                    'aborting.\n')
    mapping = normaliser.VariantMapping(args.mapping, tokenizer)
    corpus.normalise(mapping, output_dir)
예제 #10
0
def align_results(args, parser):
    if args.results == '-':
        results = io.TextIOWrapper(sys.stdin.buffer,
                                   encoding='utf-8',
                                   newline='')
    else:
        results = open(args.results, 'r', encoding='utf-8', newline='')
    tokenizer = utils.get_tokenizer(args)
    corpus = tacl.Corpus(args.corpus, tokenizer)
    report = tacl.SequenceReport(corpus, tokenizer, results)
    report.generate(args.output, args.minimum)
예제 #11
0
def normalise_corpus(args, parser):
    """Outputs a normalised version of a corpus."""
    corpus = utils.get_corpus(args)
    tokenizer = utils.get_tokenizer(args)
    output_dir = os.path.abspath(args.output)
    if os.path.exists(output_dir):
        parser.exit(status=3,
                    message='Output directory already exists, '
                    'aborting.\n')
    mapping = normaliser.VariantMapping(args.mapping, tokenizer)
    corpus.normalise(mapping, output_dir)
예제 #12
0
파일: __main__.py 프로젝트: ajenhl/tacl
def ngram_diff(args, parser):
    """Outputs the results of performing a diff query."""
    store = utils.get_data_store(args)
    corpus = utils.get_corpus(args)
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    store.validate(corpus, catalogue)
    if args.asymmetric:
        store.diff_asymmetric(catalogue, args.asymmetric, tokenizer,
                              sys.stdout)
    else:
        store.diff(catalogue, tokenizer, sys.stdout)
예제 #13
0
def ngram_diff(args, parser):
    """Outputs the results of performing a diff query."""
    store = utils.get_data_store(args)
    corpus = utils.get_corpus(args)
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    store.validate(corpus, catalogue)
    if args.asymmetric:
        store.diff_asymmetric(catalogue, args.asymmetric, tokenizer,
                              sys.stdout)
    else:
        store.diff(catalogue, tokenizer, sys.stdout)
예제 #14
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION)
    utils.add_db_arguments(parser)
    utils.add_tokenizer_argument(parser)
    utils.add_query_arguments(parser)
    parser.add_argument('output', help=HELP_OUTPUT, metavar='DIRECTORY')
    args = parser.parse_args()
    data_store = utils.get_data_store(args)
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    output_dir = os.path.abspath(args.output)
    reporter = lifetime.LifetimeReporter(data_store, catalogue, tokenizer,
                                         output_dir)
    reporter.process()
예제 #15
0
def main():
    parser = generate_parser()
    args = parser.parse_args()
    if hasattr(args, 'verbose'):
        utils.configure_logging(args.verbose, logger)
    store = utils.get_data_store(args)
    corpus = utils.get_corpus(args)
    catalogue = utils.get_catalogue(args)
    tokenizer = utils.get_tokenizer(args)
    try:
        check_catalogue(catalogue, args.label)
    except Exception as e:
        parser.error(str(e))
    store.validate(corpus, catalogue)
    output_dir = os.path.abspath(args.output)
    if os.path.exists(output_dir):
        logger.warning('Output directory already exists; any results therein '
                       'will be reused rather than regenerated.')
    os.makedirs(output_dir, exist_ok=True)
    report = jitc.JitCReport(store, corpus, tokenizer)
    report.generate(output_dir, catalogue, args.label)
예제 #16
0
def main():
    parser = argparse.ArgumentParser(description=DESCRIPTION,
                                     epilog=EPILOG,
                                     formatter_class=ParagraphFormatter)
    parser.add_argument('--min_size',
                        default=1,
                        help=MINIMUM_HELP,
                        metavar='MINIMUM',
                        type=int)
    parser.add_argument('--max_size',
                        default=10,
                        help=MAXIMUM_HELP,
                        metavar='MAXIMUM',
                        type=int)
    utils.add_common_arguments(parser)
    utils.add_db_arguments(parser)
    utils.add_corpus_arguments(parser)
    utils.add_query_arguments(parser)
    parser.add_argument('output_dir',
                        help='Path to output directory',
                        metavar='DIRECTORY')
    parser.add_argument('tracker_path',
                        help='Path to tracking file',
                        metavar='TRACKING')
    args = parser.parse_args()
    logger = logging.getLogger('taclextra')
    if hasattr(args, 'verbose'):
        utils.configure_logging(args.verbose, logger)
    corpus = utils.get_corpus(args)
    if args.db == 'memory':
        data_store = None
    else:
        data_store = utils.get_data_store(args)
    tokenizer = utils.get_tokenizer(args)
    catalogue = utils.get_catalogue(args)
    pi = paired_intersector.PairedIntersector(data_store, corpus, tokenizer,
                                              catalogue, args.output_dir,
                                              args.tracker_path, args.min_size,
                                              args.max_size)
    pi.intersect_all()
예제 #17
0
def generate_statistics(args, parser):
    corpus = utils.get_corpus(args)
    tokenizer = utils.get_tokenizer(args)
    report = tacl.StatisticsReport(corpus, tokenizer, args.results)
    report.generate_statistics()
    report.csv(sys.stdout)
예제 #18
0
파일: __main__.py 프로젝트: ajenhl/tacl
def generate_statistics(args, parser):
    corpus = utils.get_corpus(args)
    tokenizer = utils.get_tokenizer(args)
    report = tacl.StatisticsReport(corpus, tokenizer, args.results)
    report.generate_statistics()
    report.csv(sys.stdout)
예제 #19
0
파일: __main__.py 프로젝트: ajenhl/tacl
def results(args, parser):
    if args.results == '-':
        results_fh = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8',
                                      newline='')
    else:
        results_fh = open(args.results, 'r', encoding='utf-8', newline='')
    tokenizer = utils.get_tokenizer(args)
    results = tacl.Results(results_fh, tokenizer)
    if args.extend:
        corpus = tacl.Corpus(args.extend, tokenizer)
        results.extend(corpus)
    if args.bifurcated_extend:
        if not args.bifurcated_extend_size:
            parser.error('The bifurcated extend option requires that the '
                         '--max-be-count option also be supplied')
        corpus = tacl.Corpus(args.bifurcated_extend, tokenizer)
        results.bifurcated_extend(corpus, args.bifurcated_extend_size)
    if args.denormalised_corpus and args.denormalise_mapping:
        unnormalised_corpus = tacl.Corpus(args.denormalised_corpus, tokenizer)
        mapping = tacl.VariantMapping(args.denormalise_mapping, tokenizer)
        results.denormalise(unnormalised_corpus, mapping)
    elif args.denormalised_corpus or args.denormalise_mapping:
        parser.error('Both --denormalised-corpus and --denormalise-mapping '
                     'must be specified in order to denormalise results')
    if args.reduce:
        results.reduce()
    if args.reciprocal:
        results.reciprocal_remove()
    if args.excise:
        results.excise(args.excise)
    if args.zero_fill:
        corpus = tacl.Corpus(args.zero_fill, tokenizer)
        results.zero_fill(corpus)
    if args.ngrams:
        with open(args.ngrams, encoding='utf-8') as fh:
            ngrams = fh.read().split()
        results.prune_by_ngram(ngrams)
    label = args.label or None
    if args.min_works or args.max_works:
        results.prune_by_work_count(args.min_works, args.max_works, label)
    if args.min_size or args.max_size:
        results.prune_by_ngram_size(args.min_size, args.max_size)
    if args.min_count or args.max_count:
        results.prune_by_ngram_count(args.min_count, args.max_count, label)
    if args.min_count_work or args.max_count_work:
        results.prune_by_ngram_count_per_work(args.min_count_work,
                                              args.max_count_work, label)
    if args.remove:
        results.remove_label(args.remove)
    if args.relabel:
        catalogue = tacl.Catalogue()
        catalogue.load(args.relabel)
        results.relabel(catalogue)
    if args.sort:
        results.sort()
    # Run format-changing operations last.
    if args.add_label_count:
        results.add_label_count()
    if args.add_label_work_count:
        results.add_label_work_count()
    if args.group_by_ngram:
        catalogue = tacl.Catalogue()
        catalogue.load(args.group_by_ngram)
        results.group_by_ngram(catalogue.ordered_labels)
    if args.group_by_witness:
        results.group_by_witness()
    if args.collapse_witnesses:
        results.collapse_witnesses()
    results.csv(sys.stdout)
예제 #20
0
def supplied_diff(args, parser):
    labels = args.labels
    results = args.supplied
    store = utils.get_data_store(args, must_exist=False)
    tokenizer = utils.get_tokenizer(args)
    store.diff_supplied(results, labels, tokenizer, sys.stdout)
예제 #21
0
파일: __main__.py 프로젝트: ajenhl/tacl
def supplied_diff(args, parser):
    labels = args.labels
    results = args.supplied
    store = utils.get_data_store(args)
    tokenizer = utils.get_tokenizer(args)
    store.diff_supplied(results, labels, tokenizer, sys.stdout)
예제 #22
0
def results(args, parser):
    if args.results == '-':
        results_fh = io.TextIOWrapper(sys.stdin.buffer,
                                      encoding='utf-8',
                                      newline='')
    else:
        results_fh = open(args.results, 'r', encoding='utf-8', newline='')
    tokenizer = utils.get_tokenizer(args)
    results = tacl.Results(results_fh, tokenizer)
    if args.extend:
        corpus = tacl.Corpus(args.extend, tokenizer)
        results.extend(corpus)
    if args.bifurcated_extend:
        if not args.bifurcated_extend_size:
            parser.error('The bifurcated extend option requires that the '
                         '--max-be-count option also be supplied')
        corpus = tacl.Corpus(args.bifurcated_extend, tokenizer)
        results.bifurcated_extend(corpus, args.bifurcated_extend_size)
    if args.denormalised_corpus and args.denormalise_mapping:
        unnormalised_corpus = tacl.Corpus(args.denormalised_corpus, tokenizer)
        mapping = tacl.VariantMapping(args.denormalise_mapping, tokenizer)
        results.denormalise(unnormalised_corpus, mapping)
    elif args.denormalised_corpus or args.denormalise_mapping:
        parser.error('Both --denormalised-corpus and --denormalise-mapping '
                     'must be specified in order to denormalise results')
    if args.reduce:
        results.reduce()
    if args.reciprocal:
        results.reciprocal_remove()
    if args.excise:
        results.excise(args.excise)
    if args.zero_fill:
        corpus = tacl.Corpus(args.zero_fill, tokenizer)
        results.zero_fill(corpus)
    if args.ngrams:
        with open(args.ngrams, encoding='utf-8') as fh:
            ngrams = fh.read().split()
        results.prune_by_ngram(ngrams)
    label = args.label or None
    if args.min_works or args.max_works:
        results.prune_by_work_count(args.min_works, args.max_works, label)
    if args.min_size or args.max_size:
        results.prune_by_ngram_size(args.min_size, args.max_size)
    if args.min_count or args.max_count:
        results.prune_by_ngram_count(args.min_count, args.max_count, label)
    if args.min_count_work or args.max_count_work:
        results.prune_by_ngram_count_per_work(args.min_count_work,
                                              args.max_count_work, label)
    if args.remove:
        results.remove_label(args.remove)
    if args.relabel:
        catalogue = tacl.Catalogue()
        catalogue.load(args.relabel)
        results.relabel(catalogue)
    if args.sort:
        results.sort()
    # Run format-changing operations last.
    if args.add_label_count:
        results.add_label_count()
    if args.add_label_work_count:
        results.add_label_work_count()
    if args.group_by_ngram:
        catalogue = tacl.Catalogue()
        catalogue.load(args.group_by_ngram)
        results.group_by_ngram(catalogue.ordered_labels)
    if args.group_by_witness:
        results.group_by_witness()
    if args.collapse_witnesses:
        results.collapse_witnesses()
    results.csv(sys.stdout)