def main(): parser = argparse.ArgumentParser(description='Create Non-projective CRF Dependency Parser.') parser.set_defaults(Parser=CRFNPDependencyParser) parser.add_argument('--mbr', action='store_true', help='whether to use MBR decoding') subparsers = parser.add_subparsers(title='Commands', dest='mode') subparser = subparsers.add_parser('train', help='Train a parser.') subparser.add_argument('--feat', '-f', choices=['tag', 'char', 'bert'], help='choices of additional features') subparser.add_argument('--build', '-b', action='store_true', help='whether to build the model first') subparser.add_argument('--punct', action='store_true', help='whether to include punctuation') subparser.add_argument('--max-len', type=int, help='max length of the sentences') subparser.add_argument('--buckets', default=32, type=int, help='max num of buckets to use') subparser.add_argument('--train', default='data/ptb/train.conllx', help='path to train file') subparser.add_argument('--dev', default='data/ptb/dev.conllx', help='path to dev file') subparser.add_argument('--test', default='data/ptb/test.conllx', help='path to test file') subparser.add_argument('--embed', default='data/glove.6B.100d.txt', help='path to pretrained embeddings') subparser.add_argument('--unk', default='unk', help='unk token in pretrained embeddings') subparser.add_argument('--n-embed', default=100, type=int, help='dimension of embeddings') subparser.add_argument('--bert', default='bert-base-cased', help='which bert model to use') # evaluate subparser = subparsers.add_parser('evaluate', help='Evaluate the specified parser and dataset.') subparser.add_argument('--punct', action='store_true', help='whether to include punctuation') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/ptb/test.conllx', help='path to dataset') # predict subparser = subparsers.add_parser('predict', help='Use a trained parser to make predictions.') subparser.add_argument('--prob', action='store_true', help='whether to output probs') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/ptb/test.conllx', help='path to dataset') subparser.add_argument('--pred', default='pred.conllx', help='path to predicted result') parse(parser)
def main(): parser = argparse.ArgumentParser(description='Create Biaffine Semantic Dependency Parser.') parser.set_defaults(Parser=BiaffineSemanticDependencyParser) subparsers = parser.add_subparsers(title='Commands', dest='mode') # train subparser = subparsers.add_parser('train', help='Train a parser.') subparser.add_argument('--feat', '-f', choices=['tag', 'char', 'lemma', 'bert'], nargs='+', help='features to use') subparser.add_argument('--build', '-b', action='store_true', help='whether to build the model first') subparser.add_argument('--encoder', choices=['lstm', 'bert'], default='lstm', help='encoder to use') subparser.add_argument('--max-len', type=int, help='max length of the sentences') subparser.add_argument('--buckets', default=32, type=int, help='max num of buckets to use') subparser.add_argument('--train', default='data/sdp/DM/train.conllu', help='path to train file') subparser.add_argument('--dev', default='data/sdp/DM/dev.conllu', help='path to dev file') subparser.add_argument('--test', default='data/sdp/DM/test.conllu', help='path to test file') subparser.add_argument('--embed', default='data/glove.6B.100d.txt', help='path to pretrained embeddings') subparser.add_argument('--unk', default='unk', help='unk token in pretrained embeddings') subparser.add_argument('--n-embed', default=100, type=int, help='dimension of embeddings') subparser.add_argument('--n-embed-proj', default=125, type=int, help='dimension of projected embeddings') subparser.add_argument('--bert', default='bert-base-cased', help='which BERT model to use') # evaluate subparser = subparsers.add_parser('evaluate', help='Evaluate the specified parser and dataset.') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') # predict subparser = subparsers.add_parser('predict', help='Use a trained parser to make predictions.') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') subparser.add_argument('--pred', default='pred.conllu', help='path to predicted result') subparser.add_argument('--prob', action='store_true', help='whether to output probs') parse(parser)
def main(): parser = argparse.ArgumentParser(description='Create Semantic Dependency Parser using Variational Inference.') parser.set_defaults(Parser=VISemanticDependencyParser) subparsers = parser.add_subparsers(title='Commands', dest='mode') # train subparser = subparsers.add_parser('train', help='Train a parser.') subparser.add_argument('--feat', '-f', default='tag,char,lemma', help='additional features to useļ¼separated by commas.') subparser.add_argument('--build', '-b', action='store_true', help='whether to build the model first') subparser.add_argument('--max-len', type=int, help='max length of the sentences') subparser.add_argument('--buckets', default=32, type=int, help='max num of buckets to use') subparser.add_argument('--train', default='data/sdp/DM/train.conllu', help='path to train file') subparser.add_argument('--dev', default='data/sdp/DM/dev.conllu', help='path to dev file') subparser.add_argument('--test', default='data/sdp/DM/test.conllu', help='path to test file') subparser.add_argument('--embed', default='data/glove.6B.100d.txt', help='path to pretrained embeddings') subparser.add_argument('--unk', default='unk', help='unk token in pretrained embeddings') subparser.add_argument('--n-embed', default=100, type=int, help='dimension of embeddings') subparser.add_argument('--bert', default='bert-base-cased', help='which bert model to use') subparser.add_argument('--inference', default='mfvi', choices=['mfvi', 'lbp'], help='approximate inference methods') # evaluate subparser = subparsers.add_parser('evaluate', help='Evaluate the specified parser and dataset.') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') # predict subparser = subparsers.add_parser('predict', help='Use a trained parser to make predictions.') subparser.add_argument('--prob', action='store_true', help='whether to output probs') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/sdp/DM/test.conllu', help='path to dataset') subparser.add_argument('--pred', default='pred.conllu', help='path to predicted result') parse(parser)
def main(): parser = argparse.ArgumentParser(description='Create Dependency Parser using Variational Inference.') parser.add_argument('--tree', action='store_true', help='whether to ensure well-formedness') parser.add_argument('--proj', action='store_true', help='whether to projectivise the data') parser.add_argument('--partial', action='store_true', help='whether partial annotation is included') parser.set_defaults(Parser=VIDependencyParser) subparsers = parser.add_subparsers(title='Commands', dest='mode') # train subparser = subparsers.add_parser('train', help='Train a parser.') subparser.add_argument('--feat', '-f', choices=['tag', 'char', 'bert'], nargs='+', help='features to use') subparser.add_argument('--build', '-b', action='store_true', help='whether to build the model first') subparser.add_argument('--encoder', choices=['lstm', 'bert'], default='lstm', help='encoder to use') subparser.add_argument('--punct', action='store_true', help='whether to include punctuation') subparser.add_argument('--max-len', type=int, help='max length of the sentences') subparser.add_argument('--buckets', default=32, type=int, help='max num of buckets to use') subparser.add_argument('--train', default='data/ptb/train.conllx', help='path to train file') subparser.add_argument('--dev', default='data/ptb/dev.conllx', help='path to dev file') subparser.add_argument('--test', default='data/ptb/test.conllx', help='path to test file') subparser.add_argument('--embed', default='data/glove.6B.100d.txt', help='path to pretrained embeddings') subparser.add_argument('--unk', default='unk', help='unk token in pretrained embeddings') subparser.add_argument('--n-embed', default=100, type=int, help='dimension of embeddings') subparser.add_argument('--bert', default='bert-base-cased', help='which BERT model to use') subparser.add_argument('--inference', default='mfvi', choices=['mfvi', 'lbp'], help='approximate inference methods') # evaluate subparser = subparsers.add_parser('evaluate', help='Evaluate the specified parser and dataset.') subparser.add_argument('--punct', action='store_true', help='whether to include punctuation') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/ptb/test.conllx', help='path to dataset') # predict subparser = subparsers.add_parser('predict', help='Use a trained parser to make predictions.') subparser.add_argument('--buckets', default=8, type=int, help='max num of buckets to use') subparser.add_argument('--data', default='data/ptb/test.conllx', help='path to dataset') subparser.add_argument('--pred', default='pred.conllx', help='path to predicted result') subparser.add_argument('--prob', action='store_true', help='whether to output probs') parse(parser)