def main(args=None): torch.multiprocessing.set_start_method('fork') torch.set_num_threads(1) torch.set_num_interop_threads(1) stanza.download('en') run_processor(StanzaSelectiveParser(), mp=True, mp_context=torch.multiprocessing, args=args)
def main(args=None): parser = ArgumentParser(parents=[processor_parser()]) parser.add_argument( '--torchserve-address', default='http://localhost:8080/predictions/sentences-bilstm', help="The endpoint for the torchserve deployment of the sentences model." ) conf = parser.parse_args(args) logging.basicConfig(level=getattr(logging, conf.log_level, logging.INFO)) run_processor(proc=SentencesProcessor(conf.torchserve_address), namespace=conf)
def main(args=None): parser = argparse.ArgumentParser( description='Deploys a processor that copies one document to another.', parents=[mtap.processor_parser()]) parser.add_argument('--index-names', nargs='*', metavar='INDEX_NAME', default=..., help='') parser.add_argument('source_document_name', metavar='SOURCE_NAME', help='Name of source document.') parser.add_argument('target_document_name', metavar='TARGET_NAME', help='Name of target document.') ns = parser.parse_args(args) p = CopyDocument(ns.source_document_name, ns.target_document_name, ns.index_names) mtap.run_processor(p, options=ns)
def main(args=None): proc = NegexTriggersProcessor() mtap.run_processor(proc, args=args)
def main(args=None): proc = DeepenProcessor() mtap.run_processor(proc, args=args)
def main(args=None): run_processor(StanzaParser(), args=args)
def processor(conf): logging.basicConfig(level=logging.INFO) check_data(conf.download_data) proc = create_processor(conf) run_processor(proc, namespace=conf)
def main(args=None): run_processor(StanzaSelectiveParser(), args=args)
def main(args=None): parser = ArgumentParser(parents=[processor_parser()]) parser.add_argument('--wait', default=50, type=int) conf = parser.parse_args(args) run_processor(WaitProcessor(conf.wait), namespace=conf)
def main(args=None): mtap.run_processor(NegexTriggersProcessor(), mp=True, args=args)
def main(args=None): proc = OnePerLineSentencesProcessor() mtap.run_processor(proc, args=args)
def run_serializer_processor(args): ser = _serializers[args.serializer] proc = SerializationProcessor(ser, args.output_dir) run_processor(proc, args=args)
# Copyright 2019 Regents of the University of Minnesota. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tutorial hello world processor.""" import mtap from mtap.processing import DocumentProcessor @mtap.processor('hello') class HelloProcessor(DocumentProcessor): def process_document(self, document, params): with document.get_labeler('hello') as add_hello: text = document.text add_hello(0, len(text), response='Hello ' + text + '!') if __name__ == '__main__': mtap.run_processor(HelloProcessor())
outputs=[ labels('mtap.examples.letter_counts', properties=[label_property('letter', data_type='str'), label_property('count', data_type='int')]) ]) class ExampleProcessor(mtap.DocumentProcessor): """Does some labeling of the counts of the letter 'a' and 'b' in a document. """ def process_document(self, document: mtap.Document, params: Dict[str, Any]) -> Optional[Dict[str, Any]]: if params['do_work']: with self.started_stopwatch('fetch_time'): text = document.text a_count = text.count('a') b_count = text.count('b') with document.get_labeler('mtap.examples.letter_counts') as label_letter_count: label_letter_count(start_index=0, end_index=len(document.text), letter='a', count=a_count) label_letter_count(start_index=0, end_index=len(document.text), letter='b', count=b_count) return {'answer': 42} if __name__ == '__main__': mtap.run_processor(ExampleProcessor(), mp=True)
def processor(conf): check_data(conf.download_data) proc = create_processor(conf) run_processor(proc, namespace=conf)
def processor(conf): processor = create_processor(conf) run_processor(processor, options=conf, mp=True, mp_context=torch.multiprocessing)
# references can be a map of strings to labels with document.get_labeler('map_references') as label_map_references: label_map_references(0, 4, ref={ 'a': referenced[0], 'b': referenced[1], 'c': referenced[2], 'd': referenced[3] }) # references can be a list of labels with document.get_labeler('list_references') as label_list_references: label_list_references(0, 2, ref=[referenced[0], referenced[1]]) label_list_references(2, 3, ref=[referenced[2], referenced[3]]) # references can be direct with document.get_labeler('references') as label_references: label_references(0, 2, a=referenced[0], b=referenced[1]) label_references(2, 3, a=referenced[2], b=referenced[3]) # referenced labels don't need to be added via "addLabels" or "Labeler.close" before label # indices that reference them. # The Document will delay uploading any label indices to the server until they are. document.add_labels('referenced', referenced) if __name__ == '__main__': mtap.run_processor(ReferencesExampleProcessor())
def main(args=None): mtap.run_processor(DeepenProcessor(), args=args, mp=True)