def exec_list_thread(self, lid, *all_gid): ### 标记为任务开始 with DBContext('default') as session: session.query(TaskList.list_id).filter( TaskList.list_id == lid).update({TaskList.schedule: 'start'}) session.query(TaskSched).filter( TaskSched.list_id == lid, TaskSched.trigger == 'hand').update( {TaskSched.task_status: '5'}) session.commit() threads = [] #####取所有IP### for i in all_gid: i = i[0] if i: threads.append( multiprocessing.Process(target=self.my_run, args=( lid, i, ))) Logger.info("current has %d threads group execution " % len(threads)) ###开始多线程 for start_t in threads: try: start_t.start() except UnboundLocalError: print('error') ###阻塞线程 for join_t in threads: join_t.join()
def call_back(self, ch, method, properties, body): try: Logger.info('get message') self.on_message(body) if not self.__no_ack: ch.basic_ack(delivery_tag=method.delivery_tag) except: Logger.error(traceback.format_exc()) if not self.__no_ack: ch.basic_nack(delivery_tag=method.delivery_tag)
def start_server(self): """ 启动 tornado 服务 :return: """ try: print('progressid: %(progid)s' % dict(progid=options.progid)) print('server address: %(addr)s:%(port)d' % dict(addr=options.addr, port=options.port)) print('web server start sucessfuled.') self.io_loop.start() except KeyboardInterrupt: self.io_loop.stop() except: import traceback Logger.error(traceback.format_exc())
def publish_message(self, body, durable=True): self.__channel.exchange_declare(exchange=self.__exchange, exchange_type=self.__exchange_type) if self.__queue_name: result = self.__channel.queue_declare(queue=self.__queue_name) else: result = self.__channel.queue_declare(exclusive=True) self.__channel.queue_bind(exchange=self.__exchange, queue=result.method.queue) if durable: properties = pika.BasicProperties(delivery_mode=2) self.__channel.basic_publish(exchange=self.__exchange, routing_key=self.__routing_key, body=body, properties=properties) else: self.__channel.basic_publish(exchange=self.__exchange, routing_key=self.__routing_key, body=body) Logger.info('Publish message %s sucessfuled.' % body)
def start_consuming(self): channel = self.create_channel() channel.exchange_declare(exchange=self.__exchange, exchange_type=self.__exchange_type) if self.__queue_name: result = channel.queue_declare(queue=self.__queue_name, durable=True) else: result = channel.queue_declare(exclusive=True) channel.queue_bind(exchange=self.__exchange, queue=result.method.queue, routing_key=self.__routing_key) channel.basic_qos(prefetch_count=1) channel.basic_consume(self.call_back, queue=result.method.queue, no_ack=self.__no_ack) Logger.info('[*]Queue %s started.' % (result.method.queue)) channel.start_consuming()
def on_message(self, body): time.sleep(2) try: args = int(body) except ValueError: Logger.error('[*]body type error, must be int,body:(%s)' % str(body, encoding='utf-8')) args = 0 except UnboundLocalError: Logger.error('[*]body type error, must be int,body:(%s)' % str(body, encoding='utf-8')) args = 0 print('flow_id is ', args) if type(args) == int: flow_id = args with DBContext('readonly') as session: is_exist = session.query(TaskList.list_id).filter( TaskList.list_id == flow_id, TaskList.schedule == 'ready').first() ###查询ID是否存在并且未执行 if is_exist: all_group = session.query(TaskSched.task_group).filter( TaskSched.list_id == flow_id).group_by( TaskSched.task_group).all() ### 多进程分组执行任务 self.exec_list_thread(flow_id, *all_group) ### 记录并修改状态 with DBContext('default') as session: session.query(TaskList.list_id).filter( TaskList.list_id == flow_id).update( {TaskList.schedule: 'OK'}) session.commit() Logger.info("list{0} end of task".format(flow_id)) else: with DBContext('readonly') as session: exist = session.query(TaskList.list_id).filter( TaskList.list_id == flow_id, or_(TaskList.schedule == 'OK', TaskList.schedule == 'start')).first() if exist: Logger.info( 'task list id {0} is start or OK !!!'.format(body)) else: Logger.error( 'task list id {0} is not ready !!!'.format(body)) time.sleep(8) raise SystemExit(-2) else: Logger.error('[*]body type error, must be int,body:(%s)' % str(body, encoding='utf-8'))
--offset ... 0 Skip first N tokens from GC. --tagparser (optional) Name of a tagparser class if your trainer need it. --confs config.json Address to file with configurations. ...Plus additional parameters needed for the trainer you chose. """ # noqa E122 ) raise SystemExit predef = Predefinator( fp=open( argv.get("--confs", default="config.json"), encoding="utf-8" ) ) logger = Logger( fp=open(argv.get("--logfile", default="xpostrainlog.md"), mode="a+"), stream=sys.stdout ) logger.output("Loading...") from libs.db import DB # noqa E402 db = DB( host=argv.get("--dbhost", default="atlas"), dbname="syntextua" ) if argv.has("--tagparser"): argv.bundle["--tagparser"] = predef.inited(argv.get("--tagparser"))
Expected parameters: Name Default Description --dbhost ... atlas DB which will be used. --logfile ... ccrlog.md File to write full logs in. --limit ... 0 Limit of sentences to be processed. Set to '0' to set it to infinite. --offset ... 0 Skip first N sentences from UD file you've specified. --confs config.json Address to file with configurations. """ # noqa E122 ) raise SystemExit logger = Logger( fp=open( argv.get("--logfile", default="ccrlog.md"), mode="a+", encoding="utf-8" ), stream=sys.stdout ) logger.output("Loading...") from libs.db import DB # noqa E402 predef = Predefinator( fp=open( argv.get("--confs", default="config.json"), encoding="utf-8" ) )
--confs config.json Path to .json file with configurations. --limit ... 0 Limit of tokens to be processed. Set to '0' to set it to infinite. --offset ... 0 Skip first N tokens from UD file you've specified. --confs config.json Address to file with configurations. """ # noqa E122 ) raise SystemExit predef = Predefinator( fp=open(argv.get("--confs", default="config.json"), encoding="utf-8")) logger = Logger(fp=open(argv.get("--logfile", default="amalog.md"), mode="a+", encoding="utf-8"), stream=sys.stdout) logger.output("Loading...") from libs.db import DB # noqa E402 db = DB(host=argv.get("--dbhost", default="atlas")) analyzer = predef.inited( "XPOSRecognitionAnalyzer", limit=int(argv.get("--limit", default=9e999)), recognizer=predef.inited( "MorphologyRecognizer", collection=lambda name: db.cli.get_collection(name)))