def __getattr__(self, name): """获取组件 :parameter: - `name`: 组件名称 """ if name in self._blacklist: recorder('WARN', '{attr} in blacklist'.format(attr=name)) raise AttributeError # 缓冲池中存在则使用缓冲池中的组件 component = self._components.get(name) if not component: component = fastweb.manager.Manager.get_component(name, self) if not component: self.recorder( 'ERROR', "can't acquire idle component <{name}>".format(name=name)) raise ComponentError self._components[name] = component self.recorder( 'DEBUG', '{obj} get component from manager {name} {com}'.format( obj=self, name=name, com=component)) return component else: self.recorder( 'DEBUG', '{obj} get component from components cache {name} {com}'. format(obj=self, name=name, com=component)) return component
def setup(): """异步安装组件 初始化组件时,尽快的抛出准确的错误信息 """ recorder('DEBUG', 'asynchronous connection component manager setup start') from fastweb.setting.default_connection_component import ASYN_CONN_COMPONENTS if AsynConnManager.configer: for (cpre, cls, default_size) in ASYN_CONN_COMPONENTS: components = AsynConnManager.configer.get_components(cpre) for name, value in list(components.items()): config = AsynConnManager.configer.configs[name] size = config.get('size', default_size) awake = config.get('awake') maxconnections = config.get('maxconnections') pool = AsynConnectionPool(cls, config, size, name, awake=awake, maxconnections=maxconnections) yield pool.create() Manager._pools[value['object']] = pool recorder('DEBUG', 'asynchronous manager setup successful')
def stop(self, entire=False): if entire: for process in self._fprocesses: process.terminate() recorder('INFO', 'entire processes stop') else: self.terminate() recorder('INFO', '{proc} stop'.format(proc=self))
def _convert_attrs(att, t, va): """转换参数类型""" try: va = t(va) self.setting[att] = va except (ValueError, TypeError) as e: recorder('ERROR', "<{attr}> can't convert to {tp} ({e})".format(attr=att, tp=t, name=self.name, e=e)) raise ConfigurationError
def start(self): """微服务开始 生成一个微服务 :parameter: - `handler`:AbLogic列表 """ def process_proxy(processor): for _func_name, _func in list(processor._processMap.items()): def anonymous(p, seq, ipo, opo): oproc = getattr(module, 'Processor')(handler=self._handlers()) oproc._handler.requestid = seq if len( str(seq)) > 8 else oproc._handler.requestid oproc._handler.recorder( 'IMPORTANT', '{obj}\nremote call [{name}]'.format(obj=self, name=_func_name)) with timing('ms', 8) as t: _func(oproc, seq, ipo, opo) oproc._handler.release() oproc._handler.recorder( 'IMPORTANT', '{obj}\nremote call [{name}] success -- {t}'.format( obj=self, name=_func_name, t=t)) processor._processMap[_func_name] = anonymous # 将所有的handlers合并成一个handler module = load_module(self._thrift_module) processor = getattr(module, 'Processor')(handler=None) process_proxy(processor) transport = TSocket.TServerSocket(port=self._port) tfactory = TTransport.TFramedTransportFactory() pfactory = TCompactProtocol.TCompactProtocolFactory() server = TServer.TThreadPoolServer(processor, transport, tfactory, pfactory, daemon=self._daemon) server.setNumThreads(self.size) try: if self._active: recorder( 'INFO', '{svr} start at <{port}> threadpool size <{size}>'.format( svr=self, port=self._port, size=self.size)) server.serve() except KeyboardInterrupt: recorder( 'INFO', '{svr} stop at <{port}>'.format(svr=self, port=self._port))
def _parse_idl(self, idl): """解析idl文件 :parameter: - `idl`: idl文件""" try: program = self._idl_parser.parse_file(idl).definitions return program except ThriftParserError: recorder('ERROR', 'thrift file {idl} format error'.format(idl=idl)) raise
def _scale(self, thread): recorder( 'WARN', '{thread} {obj} scale connection pool start'.format(thread=thread, obj=self)) scale_loop = ioloop.IOLoop(make_current=True) scale_loop.run_sync(self.create) recorder( 'WARN', '{thread} {obj} scale connection pool successful'.format( thread=thread, obj=self)) scale_loop.start()
def return_connection(self, connection): """归还连接 :parameter: - `connection`:连接""" self._pool.put_nowait(connection) self._unused_pool.append(connection) recorder( 'DEBUG', '<{name}> return connection {conn}, total connections {count}'. format(name=self._name, conn=connection, count=self._pool.qsize()))
def add_connection(self): """同步增加连接""" connection = yield self._create_connection() try: self._pool.put_nowait(connection) self._unused_pool.append(connection) except Full: recorder( 'ERROR', '<{name}> connection pool is full'.format(name=self._name)) raise PoolError
def on_rescue(): recorder( 'INFO', '<{name}> rescue connection start'.format(name=self._name)) for conn in self._unused_pool: if conn: future = conn.ping() ioloop.IOLoop.current().add_future(future, on_reconnect) recorder( 'INFO', '<{name}> rescue connection successful'.format( name=self._name)) self.rescue()
def _create_hub_package(self, idls, language, hub_path, hub_suffix): """创建thrift桩代码 :return: - `service_module_pathes`: 每个thrift生成的桩代码python类路径 - `service_handler_pathes`: 自动生成的handler的python类路径 """ cwd = os.getcwd() # package 名字中不能存在`-`,无法导入 hub_module_name = 'fastweb_thrift_{hub_package}'.format( hub_package=hub_suffix) hub_path = os.path.join(hub_path, hub_module_name) hub_abspath = os.path.join(cwd, hub_path) # 创建桩代码的目录 try: os.mkdir(hub_abspath) recorder('INFO', 'create dir {path}'.format(path=hub_abspath)) except OSError: pass services = [] service_module_pathes = [] for idl in iglob(idls): # 检验+解析 program = self._parse_idl(idl) if len(program) > 1: recorder( 'ERROR', 'each thrift file can only hold one service, you define {num} service in {idl}' .format(num=len(program), idl=idl)) raise FastwebException service_package_name = idl.split('/')[-1].rstrip('.thrift') # 通过thrift命令生成桩代码 command = 'thrift --gen {language} -out {out} {idl} '.format( language=language, idl=idl, out=hub_path) self.call_subprocess(command) for service in program: service_module_path = '{hub}.{package}.{module}'.format( hub=hub_module_name, package=service_package_name, module=service.name) service_module_pathes.append(service_module_path) services.append(service) return hub_module_name, service_module_pathes, services
def _rescue(self, thread): """同步恢复连接 目前先全量恢复 """ recorder( 'INFO', '{thread} <{name}> rescue connection start'.format( thread=thread, name=self._name)) for conn in self._unused_pool: if conn: conn.ping() recorder( 'INFO', '{thread} <{name}> rescue connection successful'.format( thread=thread, name=self._name))
def load_component(self, layout, backend='ini', **setting): """加载组件管理器 可以进行多次加载 :parameter: - `layout`: 当前调用的层次,web, service, task - `backend`: 配置方式,目前支持ini - `setting`: 该格式需要的设置参数 """ layout = layout.lower() configer = ConfigurationParser(backend, **setting) # 加载需要管理连接池的组件 recorder('INFO', 'load connection component start') with timing('ms', 10) as t: if layout in ['service']: fastweb.manager.SyncConnManager.setup(configer) elif layout in ['web']: fastweb.manager.AsynConnManager.configer = configer ioloop.IOLoop.current().run_sync(fastweb.manager.AsynConnManager.setup) recorder('INFO', 'load connection component successful -- {time}'.format(time=t)) # 加载不需要管理连接池的组件 recorder('INFO', 'load component start') with timing('ms', 10) as t: fastweb.manager.Manager.setup(layout, configer) recorder('INFO', 'load component successful -- {time}'.format(time=t)) self.component_configers.append(configer) return configer
def load_recorder(self, application_log_path=DEFAULT_APP_LOG_PATH, system_log_path=DEFAULT_SYS_LOG_PATH, logging_setting=None, application_level='DEBUG', system_level='DEBUG', logging_colormap=None): """加载日志对象 需要最先加载,因为其他加载都需要使用recorder 其他server启动时会默认加载一遍,用户没有特殊需求可以不加载 :parameter: - `application_log_path`: 应用日志路径 - `system_log_path`: 系统日志路径,默认系统日志路径和应用日志路径相同 - `logging_setting_path`: 默认从fastweb.settting.default_logging.yaml获取配置, 可以指定为自定义的日志配置,必须有application_recorder和system_recorder - `logging_setting`: 自定以logging配置 - `application_level`: 应用日志输出级别 - `system_level`: 系统日志输出级别 - `logging_colormap`: 输出日志颜色 """ if not logging_setting: from fastweb.setting.default_logging import DEFAULT_LOGGING_SETTING logging_setting = DEFAULT_LOGGING_SETTING logging_setting['handlers']['application_file_time_handler']['filename'] = application_log_path logging_setting['handlers']['system_file_size_handler']['filename'] = system_log_path if application_level: check_logging_level(application_level) logging_setting['loggers']['application_recorder']['level'] = application_level if system_level: check_logging_level(system_level) logging_setting['loggers']['system_recorder']['level'] = system_level setup_logging(logging_setting) self.system_recorder = getLogger('system_recorder') self.application_recorder = getLogger('application_recorder') if logging_colormap: set_record_color(logging_colormap) self.bRecorder = True recorder('INFO', 'load recorder configuration\n{conf}\n\n' 'application log: {app_path} [{app_level}]\n' 'system log: {sys_path} [{sys_level}]'.format(conf=json.dumps(logging_setting, indent=4), app_path=application_log_path, app_level=application_level, sys_path=system_log_path, sys_level=system_level))
def _check_setting(eattr, setting): """check backend setting :parameter: - `eattr`: essential attribute - `setting`: setting """ for attr in eattr: v = setting.get(attr) if not v: recorder('CRITICAL', 'configuration backend setting error! ' 'right options {options}'.format(options=eattr)) raise ParameterError
def create(self): """异步创建连接池""" recorder( 'DEBUG', 'asynchronous connection pool create start <{name}>\n{setting}'. format(name=self._name, setting=json.dumps(self._setting, indent=4))) for _ in range(self._size): yield self.add_connection() self.rescue() recorder( 'DEBUG', 'asynchronous connection pool create successful <{name}>'.format( name=self._name))
def add_connection(self): """同步增加连接""" connection = self._create_connection() try: self._tlock.acquire() self._pool.put_nowait(connection) self._unused_pool.append(connection) self._tlock.release() except Full: recorder( 'ERROR', '<{name}> connection pool is full'.format(name=self._name)) raise PoolError return connection
def load_configuration(self, backend='ini', **setting): """加载配置文件 :parameter: - `backend`: 配置方式,目前支持ini - `setting`: 该格式需要的设置参数 """ self.configer = ConfigurationParser(backend, **setting) self.configs = self.configer.configs recorder('INFO', 'load configuration\nbackend:\t{backend}\n' 'setting:\t{setting}\nconfiguration:\t{config}'.format(backend=backend, setting=setting, config=self.configs))
def load_errcode(self, errcode=None): """加载系统错误码 :parameter: - `errcode`:自定义错误码 """ if errcode: self.errcode = errcode else: from fastweb.setting.default_errcode import ERRCODE self.errcode = ERRCODE recorder('INFO', 'load errcode\n{errcode}'.format(errcode=json.dumps(self.errcode, indent=4))) return self.errcode
def connect(self): if isinstance(self.thrift_module, six.string_types): module = load_module(self.thrift_module) else: self.recorder( 'ERROR', '{obj} module [{module}] load error'.format( obj=self, module=self.thrift_module)) raise ConfigurationError try: self.recorder('INFO', '{obj} connect start'.format(obj=self)) self._transport = TSocket.TSocket(self.host, self.port) self._transport = TTransport.TFramedTransport(self._transport) protocol = TCompactProtocol.TCompactProtocol(self._transport) self._client = getattr(module, 'Client')(protocol) self._transport.open() self.recorder('INFO', '{obj} connect successful'.format(obj=self)) except TTransport.TTransportException as e: self.recorder('ERROR', '{obj} connect error ({e})'.format( obj=self, e=e)) if self.recorder else recorder( 'ERROR', '{obj} connect error ({e})'.format(obj=self, e=ex)) raise RpcError return self
def start_web_server(port, handlers, **settings): """启动服务器""" if not app.bRecorder: app.load_recorder() application = web.Application(handlers, **settings) http_server = httpserver.HTTPServer(application, xheaders=settings.get('xheaders')) http_server.listen(port) recorder('INFO', 'server start on {port}'.format(port=port)) try: ioloop.IOLoop.current().start() except KeyboardInterrupt: ioloop.IOLoop.current().stop() FThread.stop(0) recorder('INFO', 'server stop on {port}'.format(port=port))
def _connect(self): try: yield self._transport.open() except TTransport.TTransportException as e: self.recorder('ERROR', '{obj} connect error ({e})'.format( obj=self, e=e)) if self.recorder else recorder( 'ERROR', '{obj} connect error ({e})'.format(obj=self, e=ex)) raise RpcError
def return_component(name, component): """归还组件 :parameter: - `name`:组件名称 - `component`:组件 """ pool = Manager._pools.get(name) if pool: if isinstance(pool, ConnectionPool): pool.return_connection(component) component.set_idle() else: recorder( 'CRITICAL', 'please check configuration\n{conf}\n{name}'.format( conf=json.dumps(fastweb.loader.app.configs), name=name)) raise ManagerError
def setup(layout, configer): """安装组件""" recorder('DEBUG', 'default component manager setup start') from fastweb.setting.default_component import COMPONENTS if layout in ['web']: from fastweb.setting.default_component import ASYN_COMPONENTS components = COMPONENTS + ASYN_COMPONENTS elif layout in ['service', 'task']: from fastweb.setting.default_component import SYNC_COMPONENTS components = COMPONENTS + SYNC_COMPONENTS else: recorder('ERROR', 'layout error {layout}'.format(layout=layout)) raise ManagerError if configer: for (cpre, cls) in components: components = configer.get_components(cpre) for name, value in list(components.items()): config = configer.configs[name] config['_name'] = value['object'] com = cls(config) cls.name = value['object'] Manager._pools[value['object']] = com Manager._classified_pools[cpre].append(com) recorder( 'DEBUG', 'manager setup successful\n{pool}'.format(pool=Manager._pools))
def _check_attrs(self): """检查组件属性是否合法""" def _convert_attrs(att, t, va): """转换参数类型""" try: va = t(va) self.setting[att] = va except (ValueError, TypeError) as e: recorder('ERROR', "<{attr}> can't convert to {tp} ({e})".format(attr=att, tp=t, name=self.name, e=e)) raise ConfigurationError def _add_attr(att, va): """增加成员属性""" if attr in self.eattr: if va: setattr(self, att, va) else: setattr(self, att, va) for attr, tp in self.eattr.items(): v = self._setting.get(attr) _add_attr(attr, v) if v: _convert_attrs(attr, tp, v) else: recorder('ERROR', '<{attr}> is essential attribute of <{obj}>'.format(attr=attr, obj=self.__class__)) raise ConfigurationError for attr, tp in self.oattr.items(): v = self._setting.get(attr) _add_attr(attr, v) if v: _convert_attrs(attr, tp, v)
def get_component(name, owner): """通过manager获取组件 ManagerError:可能是配置文件错误或者程序错误,应该尽快进行处理,不应该再向下继续运行 :parameter: - `name`:组件名称""" pool = Manager._pools.get(name) if pool: if isinstance(pool, ConnectionPool): component = pool.get_connection() else: component = pool component.set_used(owner) return component else: recorder( 'CRITICAL', 'get component ({name}) error,please check configuration\n{conf}' .format(conf=json.dumps(fastweb.loader.app.configs), name=name)) raise ManagerError
def get_connection(self): """获取连接""" try: self._tlock.acquire() connection = self._pool.get_nowait() self._unused_pool.remove(connection) self._tlock.release() except Empty: connection = self.add_connection() recorder( 'WARN', '<{name}> connection pool is empty,create a new connection {conn}' .format(name=self._name, conn=connection)) return self.get_connection() recorder( 'DEBUG', '{obj} get connection {conn} {id}, left connections {count}'. format(obj=self, conn=connection, id=id(connection), count=self._pool.qsize())) return connection
def get_connection(self): """获取连接""" # TODO:连接池扩展机制问题 try: connection = self._pool.get(block=True) self._unused_pool.remove(connection) if self._pool.qsize() < 2: self.scale_connections() except Empty: recorder( 'CRITICAL', '<{name}> connection pool is empty,please use service to separate your database operation' .format(name=self._name)) raise PoolError recorder( 'DEBUG', '{obj} get connection {conn} {id}, left connections {count}'. format(obj=self, conn=connection, id=id(connection), count=self._pool.qsize())) return connection
def _create_config_file(service_module_pathes, handler_python_path, program): """创建fastweb配置文件""" thrift_config_template = '' module_len = len(service_module_pathes) for i, service_module_pathes in enumerate(service_module_pathes): if i == 0: thrift_config_template += '; fastthrift gen template\n\n' thrift_config_template += '[service:{service}]\n' \ 'port =\n' \ 'thrift_module = {hub}\n' \ 'handlers = {handler}\n' \ 'active = yes'.format(hub=service_module_pathes, handler=handler_python_path, service=program[i].name) if i != module_len: thrift_config_template += '\n\n\n' recorder('INFO', 'fasthrift gen config') recorder('CRITICAL', '{config}'.format(config=thrift_config_template)) return thrift_config_template
def start_task_worker(): """启动任务消费者 每个application在一个进程中,不推荐定义大于CPU核数个application""" if not app.bRecorder: app.load_recorder() # 通过篡改命令行的参数更改application的node名称 # 命令行中的-n参数会失效 tasks = Manager.get_classified_components('worker') for idx, task in enumerate(tasks): argv = sys.argv argv.append('-n') argv.append('fastweb@celery@{app}@{idx}'.format(app=task.name, idx=idx)) p = Process(target=task.application.start, args=(argv, )) recorder( 'INFO', 'fastweb@celery@{app}@{idx} worker start'.format(app=task.name, idx=idx)) p.start()