def main(): """ 主函数 """ conf_file = '' # 用户指定一个配置文件 if len(sys.argv) > 1: conf_file = sys.argv[1] init_config(conf_file) server = get_configs('server') alias = safe_get_host('server', 'alias') active_config = server.get('active_config', 'false') # 动态注册task for module in server['modules'].split(): try: importlib.import_module(module) except ImportError: raise Exception('Not imported the %s module' % module) # 连结服务器 ztq_core.setup_redis('default', host=server['host'], port=int(server['port']), db=int(server['db'])) # 开启一个命令线程 command_thread = CommandThread(worker_name=alias) sys.stdout.write('Starting server in PID %s\n'%os.getpid()) worker_state = ztq_core.get_worker_state() if active_config.lower() == 'true' and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 queue = ztq_core.get_worker_config() if command_thread.worker_name in queue: set_job_threads(queue[command_thread.worker_name]) elif get_configs('queues'): # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 job_threads = {} queue_config = ztq_core.get_queue_config() for queue_name, sleeps in get_configs('queues').items(): queue_config[queue_name] = {'title': queue_name} # for ztq_app job_threads[queue_name] = [ {'interval': int(sleep)} for sleep in sleeps.split(',') ] queue_config[queue_name] = {'name':queue_name, 'title':queue_name, 'widget': 5} init_job_threads(job_threads) loggers = get_configs('log') initlog( loggers.get('key', 'ztq_worker'), loggers.get('handler_file'), loggers.get('level', 'ERROR'), ) # 不是以线程启动 command_thread.run()
def main(config): """ 主函数 config: {'server': {host:, port:, db:} } """ server = config["server"] # 动态注册task for module in server["modules"].split(): try: __import__(module) except ImportError: modules = module.split(".") __import__(modules[0], globals(), locals(), modules[1]) # 连结服务器 redis_host = server["host"] redis_port = int(server["port"]) redis_db = int(server["db"]) ztq_core.setup_redis("default", host=redis_host, port=redis_port, db=redis_db) # 开启一个命令线程 alias = server.get("alias", "") if not alias: alias = get_ip() server["alias"] = alias command_thread = CommandThread(worker_name=alias) sys.stdout.write("Starting server in PID %s\n" % os.getpid()) worker_state = ztq_core.get_worker_state() active_config = server.get("active_config", "false") if active_config.lower() == "true" and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 queue = ztq_core.get_worker_config() if command_thread.worker_name in queue: set_job_threads(queue[command_thread.worker_name]) elif config["queues"]: # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 job_threads = {} for queue_name, sleeps in config["queues"].items(): job_threads[queue_name] = [{"interval": int(sleep)} for sleep in sleeps.split(",")] if not queue_config.get(queue_name, []): queue_config[queue_name] = {"name": queue_name, "title": queue_name, "widget": 5} init_job_threads(job_threads) loggers = config["log"] initlog(loggers.get("key", "ztq_worker"), loggers.get("handler_file"), loggers.get("level", "ERROR")) # 不是以线程启动 command_thread.run()
def main(global_config, redis_host='127.0.0.1', redis_port='6379', \ redis_db='0', frs_root='frs', init_dispatcher_config='true', \ frs_cache='frscache', addon_config=None, work_enable=True, **settings): """ This function returns a Pyramid WSGI application. """ # 初始化Redis连接 ztq_core.setup_redis('default', redis_host, port=int(redis_port), db=int(redis_db),) # 初始化权重数据数据,如果权重配置已经存在则pass if init_dispatcher_config.lower() == 'true': # init_dispatcher_config 是因为控制台可能没有运行服务, 这里去读取redis数据,会导致控制台起不来 dispatcher_config = ztq_core.get_dispatcher_config() if not dispatcher_config: dispatcher_config = weight = {'queue_weight':{},'worker_weight':{}} ztq_core.set_dispatcher_config(weight) queue_weight = dispatcher_config['queue_weight'] if not queue_weight: queues_list = ztq_core.get_queue_config() for queue_name, queue_config in queues_list.items(): queue_weight[queue_name] = queue_config.get('weight', 0) ztq_core.set_dispatcher_config(dispatcher_config) # # 开启后台服务 # 初始化fts_web配置 settings = dict(settings) settings.setdefault('jinja2.directories', 'ztq_console:templates') config = Configurator(settings=settings) config.begin() config.add_renderer('.html', pyramid_jinja2.renderer_factory) config.add_static_view('static', 'ztq_console:static') config.scan('ztq_console.views') config.add_route('worker', '/worker/{id}', view='ztq_console.views.config_worker') config.add_route('end_thread', '/worker/{id}/{thread}/{pid}', view='ztq_console.views.stop_working_job') config.add_route('taskqueue', '/taskqueues/{id}') config.add_route('taskqueues_config', '/taskqueues/{id}/config', view='ztq_console.views.config_queue') config.add_route('taskqueue_action', '/taskqueues_action/{id}') config.add_route('errorqueues_job', '/errorqueues/{id}/job', view='ztq_console.views.error_jobs_handler') config.add_route('workerlog', '/workerlog/{page}') config.add_route('syslog', '/syslog/{page}') config.add_route('errorlog', '/errorlog/{page}') config.add_route('errorqueue', '/errorqueue/{id}/{page}') config.add_route('redo_all_error_for_queue', '/redo_all_error_for_queue/{id}') config.add_route('del_all_error_for_queue', '/del_all_error_for_queue/{id}') if addon_config is not None: addon_config(config) config.end() return config.make_wsgi_app()
def get_taskqueues_list(): # 队列情况列表 dispatcher_config = ztq_core.get_dispatcher_config() queue_weight = dispatcher_config['queue_weight'] queues_list = ztq_core.get_queue_config() # 排序 sort_queue_name = {} for queue_name, queue_config in queues_list.items(): sort_queue_name[queue_name] = len(ztq_core.get_error_queue(queue_name)) for queue_name in sorted(sort_queue_name, key=lambda x: sort_queue_name[x], reverse=True): task_queue = {} task_queue['name'] = queue_name #task_queue['tags'] = queue_config.get('tags',()) queue = ztq_core.get_task_queue(queue_name) # 任务数/错误数 task_queue['length'] = len(queue) task_queue['error_length'] = sort_queue_name[queue_name] #任务首个时间 task_queue['error_end'] = task_queue['first'] = '' first_job = queue[0] first_job = ztq_core.get_task_hash(queue_name).get(first_job) if first_job: task_queue['first'] = datetime.datetime.fromtimestamp( first_job['runtime'].get('create', 0)) #错误最末一个的时间 error_first_job = ztq_core.get_error_queue(queue_name)[0] error_first_job = ztq_core.get_error_hash(queue_name).get( error_first_job) if error_first_job: task_queue['error_end'] = datetime.datetime.fromtimestamp( error_first_job['runtime'].get('create', 0)) task_queue['weight'] = queue_weight.get(queue_name, 0) # 获取worker工作线程配置 workers_config = ztq_core.get_worker_config() task_queue['from_right'] = True for worker_name, worker_config in workers_config.items(): task_queue['workers'] = [] for config in worker_config.get(queue_name, []): task_queue['workers'].append( [worker_name + ':', config['interval']]) if 'from_right' in config: task_queue['from_right'] = config['from_right'] task_queue['buffer_length'] = len( ztq_core.get_buffer_queue(queue_name)) yield task_queue
def task_queues(request): """查看转换队列运行状态 传出参数:所有原子队列的运行转换 """ task_job_length = 0 error_job_length = 0 # 计算原子队列,原始队列和错误队列的总长度 queues_list = ztq_core.get_queue_config() for queue_name, queue_config in queues_list.items(): task_job_length += len(ztq_core.get_task_queue(queue_name)) error_job_length += len(ztq_core.get_error_queue(queue_name)) task_queues = utils.get_taskqueues_list() return {'task_queues':task_queues, 'task_job_length':task_job_length, 'error_job_length':error_job_length, }
def get_taskqueues_list(): # 队列情况列表 dispatcher_config = ztq_core.get_dispatcher_config() queue_weight = dispatcher_config['queue_weight'] queues_list = ztq_core.get_queue_config() # 排序 sort_queue_name = {} for queue_name, queue_config in queues_list.items(): sort_queue_name[queue_name] = len(ztq_core.get_error_queue(queue_name)) for queue_name in sorted(sort_queue_name, key=lambda x: sort_queue_name[x], reverse=True): task_queue = {} task_queue['name'] = queue_name #task_queue['tags'] = queue_config.get('tags',()) queue = ztq_core.get_task_queue(queue_name) # 任务数/错误数 task_queue['length'] = len(queue) task_queue['error_length'] = sort_queue_name[queue_name] #任务首个时间 task_queue['error_end'] = task_queue['first'] = '' first_job = queue[0] first_job= ztq_core.get_task_hash(queue_name).get(first_job) if first_job: task_queue['first'] = datetime.datetime.fromtimestamp(first_job['runtime'].get('create', 0)) #错误最末一个的时间 error_first_job = ztq_core.get_error_queue(queue_name)[0] error_first_job = ztq_core.get_error_hash(queue_name).get(error_first_job) if error_first_job: task_queue['error_end'] = datetime.datetime.fromtimestamp(error_first_job['runtime'].get('create', 0)) task_queue['weight'] = queue_weight.get(queue_name, 0) # 获取worker工作线程配置 workers_config = ztq_core.get_worker_config() task_queue['from_right'] = True for worker_name,worker_config in workers_config.items(): task_queue['workers'] = [] for config in worker_config.get(queue_name,[]): task_queue['workers'].append([worker_name+':', config['interval']]) if 'from_right' in config: task_queue['from_right'] = config['from_right'] task_queue['buffer_length'] = len(ztq_core.get_buffer_queue(queue_name)) yield task_queue
def get_all_error_jobs(sindex=0, eindex=-1): queues_list = ztq_core.get_queue_config() index = 0 count = eindex - sindex for queue_name in queues_list.keys(): error_len = len(ztq_core.get_error_queue(queue_name)) if error_len == 0: continue # 确定从哪里开始 index += error_len if index < sindex: continue start_index = 0 if sindex-(index-error_len) < 0 else sindex-(index-error_len) yield get_error_queue_jobs(queue_name, start_index, count+start_index) # 是否应该结束 count -= error_len - start_index if count < 0: break
def task_queues(request): """查看转换队列运行状态 传出参数:所有原子队列的运行转换 """ task_job_length = 0 error_job_length = 0 # 计算原子队列,原始队列和错误队列的总长度 queues_list = ztq_core.get_queue_config() for queue_name, queue_config in queues_list.items(): task_job_length += len(ztq_core.get_task_queue(queue_name)) error_job_length += len(ztq_core.get_error_queue(queue_name)) task_queues = utils.get_taskqueues_list() return { 'task_queues': task_queues, 'task_job_length': task_job_length, 'error_job_length': error_job_length, }
def get_all_error_jobs(sindex=0, eindex=-1): queues_list = ztq_core.get_queue_config() index = 0 count = eindex - sindex for queue_name in queues_list.keys(): error_len = len(ztq_core.get_error_queue(queue_name)) if error_len == 0: continue # 确定从哪里开始 index += error_len if index < sindex: continue start_index = 0 if sindex - (index - error_len) < 0 else sindex - ( index - error_len) yield get_error_queue_jobs(queue_name, start_index, count + start_index) # 是否应该结束 count -= error_len - start_index if count < 0: break
def main(config, thread=False): """ 主函数 config: {'server': {host:, port:, db:} } """ server = config['server'] # 动态注册task for module in server['modules'].split(): try: __import__(module) except ImportError: modules = module.split('.') __import__(modules[0], globals(), locals(), modules[1]) # 连结服务器 redis_host = server['host'] redis_port = int(server['port']) redis_db = int(server['db']) ztq_core.setup_redis('default', host=redis_host, port=redis_port, db=redis_db) # 开启一个命令线程 alias = server.get('alias', '') if not alias: alias = get_ip() server['alias'] = alias command_thread = CommandThread(worker_name=alias) sys.stdout.write('Starting server in PID %s\n' % os.getpid()) worker_state = ztq_core.get_worker_state() active_config = server.get('active_config', 'false') # 计算那些是需要根据线上配置启动的队列 active_queue_config = {} if active_config.lower( ) == 'true' and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 worker_config = ztq_core.get_worker_config() active_queue_config = worker_config.get(command_thread.worker_name, {}) # 根据本地配置,启动的队列 local_queue_config = {} if config['queues']: # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 for queue_name, sleeps in config['queues'].items(): # 线上配置稍后再设置 if queue_name in active_queue_config: continue local_queue_config[queue_name] = [{ 'interval': int(sleep) } for sleep in sleeps.split(',')] if not queue_config.get(queue_name, []): queue_config[queue_name] = { 'name': queue_name, 'title': queue_name, 'widget': 5 } # 合并线上和线下的配置 active_queue_config.update(local_queue_config) init_job_threads(active_queue_config) loggers = config['log'] initlog( loggers.get('key', 'ztq_worker'), loggers.get('handler_file'), loggers.get('level', 'ERROR'), ) # 不是以线程启动 if thread: command_thread.setDaemon(True) command_thread.start() else: command_thread.run()
def main(config): """ 主函数 config: {'server': {host:, port:, db:} } """ server = config['server'] # 动态注册task for module in server['modules'].split(): try: __import__(module) except ImportError: modules = module.split('.') __import__(modules[0], globals(), locals(), modules[1]) # 连结服务器 redis_host = server['host'] redis_port = int(server['port']) redis_db = int(server['db']) ztq_core.setup_redis('default', host=redis_host, port=redis_port, db=redis_db) # 开启一个命令线程 alias = server.get('alias', '') if not alias: alias = get_ip() server['alias'] = alias command_thread = CommandThread(worker_name=alias) sys.stdout.write('Starting server in PID %s\n'%os.getpid()) worker_state = ztq_core.get_worker_state() active_config = server.get('active_config', 'false') # 计算那些是需要根据线上配置启动的队列 active_queue_config = {} if active_config.lower() == 'true' and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 worker_config = ztq_core.get_worker_config() active_queue_config = worker_config.get(command_thread.worker_name, {}) # 根据本地配置,启动的队列 local_queue_config = {} if config['queues']: # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 for queue_name, sleeps in config['queues'].items(): # 线上配置稍后再设置 if queue_name in active_queue_config: continue local_queue_config[queue_name] = [ {'interval': int(sleep)} for sleep in sleeps.split(',') ] if not queue_config.get(queue_name, []): queue_config[queue_name] = {'name':queue_name, 'title':queue_name, 'widget': 5} # 合并线上和线下的配置 active_queue_config.update(local_queue_config) init_job_threads(active_queue_config) loggers = config['log'] initlog( loggers.get('key', 'ztq_worker'), loggers.get('handler_file'), loggers.get('level', 'ERROR'), ) # 不是以线程启动 command_thread.run()
def main(global_config, frs_root='frs', init_dispatcher_config='true', \ frs_cache='frscache', addon_config=None, work_enable=True, **settings): """ This function returns a Pyramid WSGI application. """ # 是否启用sentinel enable_sentinel = settings.get('enable_sentinel', 'false').lower() == 'true' # 如果启用sentinel,则关于redis的host,port,db都为对sentinel的配置 if enable_sentinel: # 主机列表 hosts = settings.get('sentinel_hosts', None) assert(hosts) # sentinel的所有services name services = settings.get('sentinel_names', None) assert(services) # 使用的数据库 db = int(settings.get('sentinel_db', '0')) assert(db >= 0) services = services.split(',') ztq_core.setup_sentinel('default', map(lambda x: (x[0], int(x[1])), [host.split(':') for host in hosts.split(',')]), services, db = db) # 如果启用了sentinel # servers 列表为所有的 services MENU_CONFIG['servers'] = services MENU_CONFIG['current_redis'] = services[0] MENU_CONFIG['enable_sentinel'] = True else: # 初始化servers # servers 格式 # name:host:port:db:title, ...... servers = settings.get('servers', None) # servers 作为必须的配置项 # 取消原来的redis_host,redis_port,redis_db配置 assert(servers) for server in servers.split(','): texts = server.split(':') # 单个server的配置项必须介于4-5之间 assert(len(texts) >= 4 and len(texts) <= 5) # 添加到待管理的服务器列表中 MENU_CONFIG['servers'].append({ 'name' : texts[0], 'host' : texts[1], 'port' : int(texts[2]), 'db' : int(texts[3]), 'title' : texts[4] if len(texts) == 5 else texts[0], }) # 默认将列表中的第一个服务器作为默认服务器 current_redis = MENU_CONFIG['servers'][0] MENU_CONFIG['current_redis'] = current_redis['name'] # # 初始化Redis连接 ztq_core.setup_redis('default' , current_redis['host'] , current_redis['port'], current_redis['db']) MENU_CONFIG['enable_sentinel'] = False # 初始化权重数据数据,如果权重配置已经存在则pass if init_dispatcher_config.lower() == 'true': # init_dispatcher_config 是因为控制台可能没有运行服务, 这里去读取redis数据,会导致控制台起不来 dispatcher_config = ztq_core.get_dispatcher_config() if not dispatcher_config: dispatcher_config = weight = {'queue_weight':{},'worker_weight':{}} ztq_core.set_dispatcher_config(weight) queue_weight = dispatcher_config['queue_weight'] if not queue_weight: queues_list = ztq_core.get_queue_config() for queue_name, queue_config in queues_list.items(): queue_weight[queue_name] = queue_config.get('weight', 0) ztq_core.set_dispatcher_config(dispatcher_config) # # 开启后台服务 # 初始化fts_web配置 authn_policy = AuthTktAuthenticationPolicy('sosecret', callback=groupfinder, hashalg='sha512') authz_policy = ACLAuthorizationPolicy() settings = dict(settings) settings.setdefault('jinja2.directories', 'ztq_console:templates') config = Configurator(settings=settings, root_factory='ztq_console.utils.models.RootFactory') config.set_authentication_policy(authn_policy) config.set_authorization_policy(authz_policy) config.begin() config.add_renderer('.html', pyramid_jinja2.renderer_factory) config.add_static_view('static', 'ztq_console:static') config.scan('ztq_console.views') config.add_route('login', '/login') config.add_route('logout', '/logout') config.add_route('password', '/password' ) config.add_route('worker', '/worker/{id}', view='ztq_console.views.config_worker') config.add_route('end_thread', '/worker/{id}/{thread}/{pid}', view='ztq_console.views.stop_working_job') config.add_route('taskqueue', '/taskqueues/{id}') config.add_route('taskqueues_config', '/taskqueues/{id}/config', view='ztq_console.views.config_queue') config.add_route('taskqueue_action', '/taskqueues_action/{id}') config.add_route('errorqueues_job', '/errorqueues/{id}/job', view='ztq_console.views.error_jobs_handler') config.add_route('workerlog', '/workerlog/{page}') config.add_route('syslog', '/syslog/{page}') config.add_route('errorlog', '/errorlog/{page}') config.add_route('errorqueue', '/errorqueue/{id}/{page}') config.add_route('redo_all_error_for_queue', '/redo_all_error_for_queue/{id}') config.add_route('del_all_error_for_queue', '/del_all_error_for_queue/{id}') if addon_config is not None: addon_config(config) config.end() return config.make_wsgi_app()
def main(): """ 主函数 """ conf_file = '' # 用户指定一个配置文件 if len(sys.argv) > 1: conf_file = sys.argv[1] init_config(conf_file) server = get_configs('server') alias = safe_get_host('server', 'alias') active_config = server.get('active_config', 'false') # 动态注册task for module in server['modules'].split(): try: __import__(module) except ImportError: raise Exception('Not imported the %s module' % module) # 连结服务器 ztq_core.setup_redis('default', host=server['host'], port=int(server['port']), db=int(server['db'])) # 开启一个命令线程 command_thread = CommandThread(worker_name=alias) sys.stdout.write('Starting server in PID %s\n' % os.getpid()) worker_state = ztq_core.get_worker_state() if active_config.lower( ) == 'true' and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 queue = ztq_core.get_worker_config() if command_thread.worker_name in queue: set_job_threads(queue[command_thread.worker_name]) elif get_configs('queues'): # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 job_threads = {} for queue_name, sleeps in get_configs('queues').items(): job_threads[queue_name] = [{ 'interval': int(sleep) } for sleep in sleeps.split(',')] if not queue_config.get(queue_name, []): queue_config[queue_name] = { 'name': queue_name, 'title': queue_name, 'widget': 5 } init_job_threads(job_threads) loggers = get_configs('log') initlog( loggers.get('key', 'ztq_worker'), loggers.get('handler_file'), loggers.get('level', 'ERROR'), ) # 不是以线程启动 command_thread.run()
def main(global_config, redis_host='127.0.0.1', redis_port='6379', \ redis_db='0', frs_root='frs', init_dispatcher_config='true', \ frs_cache='frscache', addon_config=None, work_enable=True, **settings): """ This function returns a Pyramid WSGI application. """ # 初始化Redis连接 ztq_core.setup_redis( 'default', redis_host, port=int(redis_port), db=int(redis_db), ) # 初始化权重数据数据,如果权重配置已经存在则pass if init_dispatcher_config.lower() == 'true': # init_dispatcher_config 是因为控制台可能没有运行服务, 这里去读取redis数据,会导致控制台起不来 dispatcher_config = ztq_core.get_dispatcher_config() if not dispatcher_config: dispatcher_config = weight = { 'queue_weight': {}, 'worker_weight': {} } ztq_core.set_dispatcher_config(weight) queue_weight = dispatcher_config['queue_weight'] if not queue_weight: queues_list = ztq_core.get_queue_config() for queue_name, queue_config in queues_list.items(): queue_weight[queue_name] = queue_config.get('weight', 0) ztq_core.set_dispatcher_config(dispatcher_config) # # 开启后台服务 # 初始化fts_web配置 settings = dict(settings) settings.setdefault('jinja2.directories', 'ztq_console:templates') config = Configurator(settings=settings) config.begin() config.add_renderer('.html', pyramid_jinja2.renderer_factory) config.add_static_view('static', 'ztq_console:static') config.scan('ztq_console.views') config.add_route('worker', '/worker/{id}', view='ztq_console.views.config_worker') config.add_route('end_thread', '/worker/{id}/{thread}/{pid}', view='ztq_console.views.stop_working_job') config.add_route('taskqueue', '/taskqueues/{id}') config.add_route('taskqueues_config', '/taskqueues/{id}/config', view='ztq_console.views.config_queue') config.add_route('taskqueue_action', '/taskqueues_action/{id}') config.add_route('errorqueues_job', '/errorqueues/{id}/job', view='ztq_console.views.error_jobs_handler') config.add_route('workerlog', '/workerlog/{page}') config.add_route('syslog', '/syslog/{page}') config.add_route('errorlog', '/errorlog/{page}') config.add_route('errorqueue', '/errorqueue/{id}/{page}') config.add_route('redo_all_error_for_queue', '/redo_all_error_for_queue/{id}') config.add_route('del_all_error_for_queue', '/del_all_error_for_queue/{id}') if addon_config is not None: addon_config(config) config.end() return config.make_wsgi_app()
def main(config): """ 主函数 config: {'server': {host:, port:, db:} } """ server = config['server'] module_path = server['module_path'] sys.path.append(module_path) # 动态注册task for module in server['modules'].split(): try: __import__(module) except ImportError: modules = module.split('.') __import__(modules[0], globals(), locals(), modules[1]) # 连结服务器 redis_host = server['host'] redis_port = int(server['port']) redis_db = int(server['db']) # 是否启用 sentinel enable_sentinel = server['enable_sentinel'].lower() == 'true' # 对应的 sentinel service_name sentinel_name = server['sentinel_name'] if enable_sentinel: # 当启用 sentinel 时 # 配置的host, port, db等信息变为了sentinel的主机信息 ztq_core.setup_sentinel('default', [(redis_host, redis_port)], [sentinel_name], db = redis_db) else: ztq_core.setup_redis('default', host=redis_host, port=redis_port, db=redis_db) # 开启一个命令线程 alias = server.get('alias', '') if not alias: alias = get_ip() server['alias'] = alias command_thread = CommandThread(worker_name=alias) sys.stdout.write('Starting server in PID %s\n'%os.getpid()) worker_state = ztq_core.get_worker_state() active_config = server.get('active_config', 'false') if active_config.lower() == 'true' and command_thread.worker_name in worker_state: # 如果服务器有这个机器的配置信息,需要自动启动工作线程 queue = ztq_core.get_worker_config() if command_thread.worker_name in queue: set_job_threads(queue[command_thread.worker_name]) elif config['queues']: # 把worker监视队列的情况上报到服务器 queue_config = ztq_core.get_queue_config() # 如果配置有queues,自动启动线程监视 job_threads = {} for queue_name, sleeps in config['queues'].items(): job_threads[queue_name] = [ {'interval': int(sleep)} for sleep in sleeps.split(',') ] if not queue_config.get(queue_name, []): queue_config[queue_name] = {'name':queue_name, 'title':queue_name, 'widget': 5} init_job_threads(job_threads) loggers = config['log'] initlog( loggers.get('key', 'ztq_worker'), loggers.get('handler_file'), loggers.get('level', 'ERROR'), ) # 不是以线程启动 command_thread.run()