def bench_apply(loops): pool = ThreadPool(1) t0 = perf.perf_counter() for _ in xrange(loops): for _ in xrange(N): pool.apply(noop) pool.join() pool.kill() return perf.perf_counter() - t0
class PortDao(object): def __init__(self, config_file): self.conf_file = STRING(config_file) self._threadpool = ThreadPool(1) self._cached_port_conf_list = [] self._load_conf() def _load_conf(self): if os.path.isfile(self.conf_file): config = self._threadpool.apply( Config.from_file, (self.conf_file, conf_file_schema)) self._cached_port_conf_list = config.conf def get_port_conf_list(self): return copy.deepcopy(self._cached_port_conf_list) def get_port_conf(self, port_name): for port_config in self._cached_port_conf_list: if port_config.get("port_name") == port_name: break else: raise StreamSwitchError("Port (%s) Not Exist in config file(%)" % (port_name, self.self_conf_file), 404) return copy.deepcopy(port_config) def update_port_conf(self, port_name, new_port_config): new_port_config = new_port_config_schema.validate(new_port_config) for port_config in self._cached_port_conf_list: if port_config.get("port_name") == port_name: break else: raise StreamSwitchError("Port (%s) Not Exist in config file(%)" % (port_name, self.self_conf_file), 404) port_config.update(new_port_config) save_port_conf_list = copy.deepcopy(self._cached_port_conf_list) self._threadpool.apply(Config.to_file, (self.conf_file, save_port_conf_list))
class Middleware(object): def __init__(self): self.logger_name = logger.Logger('middlewared') self.logger = self.logger_name.getLogger() self.rollbar = logger.Rollbar() self.__jobs = JobsQueue(self) self.__schemas = {} self.__services = {} self.__wsclients = {} self.__hooks = defaultdict(list) self.__server_threads = [] self.__init_services() self.__plugins_load() self.__threadpool = ThreadPool(5) def __init_services(self): from middlewared.service import CoreService self.add_service(CoreService(self)) def __plugins_load(self): from middlewared.service import Service, CRUDService, ConfigService plugins_dir = os.path.join( os.path.dirname(os.path.realpath(__file__)), 'plugins', ) self.logger.debug('Loading plugins from {0}'.format(plugins_dir)) if not os.path.exists(plugins_dir): raise ValueError('plugins dir not found') for f in os.listdir(plugins_dir): if not f.endswith('.py'): continue f = f[:-3] fp, pathname, description = imp.find_module(f, [plugins_dir]) try: mod = imp.load_module(f, fp, pathname, description) finally: if fp: fp.close() for attr in dir(mod): attr = getattr(mod, attr) if not inspect.isclass(attr): continue if attr in (Service, CRUDService, ConfigService): continue if issubclass(attr, Service): self.add_service(attr(self)) if hasattr(mod, 'setup'): mod.setup(self) # Now that all plugins have been loaded we can resolve all method params # to make sure every schema is patched and references match from middlewared.schema import resolver # Lazy import so namespace match to_resolve = [] for service in list(self.__services.values()): for attr in dir(service): to_resolve.append(getattr(service, attr)) resolved = 0 while len(to_resolve) > 0: for method in list(to_resolve): try: resolver(self, method) except ValueError: pass else: to_resolve.remove(method) resolved += 1 if resolved == 0: raise ValueError("Not all could be resolved") self.logger.debug('All plugins loaded') def register_wsclient(self, client): self.__wsclients[client.sessionid] = client def unregister_wsclient(self, client): self.__wsclients.pop(client.sessionid) def register_hook(self, name, method, sync=True): """ Register a hook under `name`. The given `method` will be called whenever using call_hook. Args: name(str): name of the hook, e.g. service.hook_name method(callable): method to be called sync(bool): whether the method should be called in a sync way """ self.__hooks[name].append({ 'method': method, 'sync': sync, }) def call_hook(self, name, *args, **kwargs): """ Call all hooks registered under `name` passing *args and **kwargs. Args: name(str): name of the hook, e.g. service.hook_name """ for hook in self.__hooks[name]: try: if hook['sync']: hook['method'](*args, **kwargs) else: gevent.spawn(hook['method'], *args, **kwargs) except: self.logger.error('Failed to run hook {}:{}(*{}, **{})'.format( name, hook['method'], args, kwargs), exc_info=True) def add_service(self, service): self.__services[service._config.namespace] = service def get_service(self, name): return self.__services[name] def get_services(self): return self.__services def add_schema(self, schema): if schema.name in self.__schemas: raise ValueError('Schema "{0}" is already registered'.format( schema.name)) self.__schemas[schema.name] = schema def get_schema(self, name): return self.__schemas.get(name) def get_jobs(self): return self.__jobs def threaded(self, method, *args, **kwargs): """ Runs method in a native thread using gevent.ThreadPool. This prevents a CPU intensive or non-greenlet friendly method to block the event loop indefinitely. """ return self.__threadpool.apply(method, args, kwargs) def _call(self, name, methodobj, params, app=None): args = [] if hasattr(methodobj, '_pass_app'): args.append(app) # If the method is marked as a @job we need to create a new # entry to keep track of its state. job_options = getattr(methodobj, '_job', None) if job_options: # Create a job instance with required args job = Job(self, name, methodobj, args, job_options) # Add the job to the queue. # At this point an `id` is assinged to the job. self.__jobs.add(job) else: job = None args.extend(params) if job: return job else: return methodobj(*args) def call_method(self, app, message): """Call method from websocket""" params = message.get('params') or [] service, method_name = message['method'].rsplit('.', 1) methodobj = getattr(self.get_service(service), method_name) if not app.authenticated and not hasattr(methodobj, '_no_auth_required'): app.send_error(message, 'Not authenticated') return return self._call(message['method'], methodobj, params, app=app) def call(self, name, *params): service, method = name.rsplit('.', 1) methodobj = getattr(self.get_service(service), method) return self._call(name, methodobj, params) def send_event(self, name, event_type, **kwargs): assert event_type in ('ADDED', 'CHANGED', 'REMOVED') for sessionid, wsclient in self.__wsclients.items(): try: wsclient.send_event(name, event_type, **kwargs) except: self.logger.warn('Failed to send event {} to {}'.format( name, sessionid), exc_info=True) def pdb(self): import pdb pdb.set_trace() def green_monitor(self): """ Start point method for setting up greenlet trace for finding out blocked green threads. """ self._green_hub = gevent.hub.get_hub() self._green_active = None self._green_counter = 0 greenlet.settrace(self._green_callback) monkey.get_original('_thread', 'start_new_thread')(self._green_monitor_thread, ()) self._green_main_threadid = monkey.get_original( '_thread', 'get_ident')() def _green_callback(self, event, args): """ This method is called for several events in the greenlet. We use this to keep track of how many switches have happened. """ if event == 'switch': origin, target = args self._green_active = target self._green_counter += 1 def _green_monitor_thread(self): sleep = monkey.get_original('time', 'sleep') while True: # Check every 2 seconds for blocked green threads. # This could be a knob in the future. sleep(2) # If there have been no greenlet switches since last time we # checked it means we are likely stuck in the same green thread # for more time than we would like to! if self._green_counter == 0: active = self._green_active # greenlet hub is OK since its the thread waiting for IO. if active not in (None, self._green_hub): frame = sys._current_frames()[self._green_main_threadid] stack = traceback.format_stack(frame) err_log = ["Green thread seems blocked:\n"] + stack self.logger.warn(''.join(err_log)) # A race condition may happen here but its fairly rare. self._green_counter = 0 def run(self): self.green_monitor() gevent.signal(signal.SIGTERM, self.kill) gevent.signal(signal.SIGUSR1, self.pdb) Application.middleware = self wsserver = WebSocketServer( ('127.0.0.1', 6000), Resource(OrderedDict([ ('/websocket', Application), ]))) restful_api = RESTfulAPI(self) apidocs_app.middleware = self apidocsserver = WSGIServer(('127.0.0.1', 8001), apidocs_app) restserver = WSGIServer(('127.0.0.1', 8002), restful_api.get_app()) fileserver = WSGIServer(('127.0.0.1', 8003), FileApplication(self)) self.__server_threads = [ gevent.spawn(wsserver.serve_forever), gevent.spawn(apidocsserver.serve_forever), gevent.spawn(restserver.serve_forever), gevent.spawn(fileserver.serve_forever), gevent.spawn(self.__jobs.run), ] self.logger.debug('Accepting connections') gevent.joinall(self.__server_threads) def kill(self): self.logger.info('Killall server threads') gevent.killall(self.__server_threads) sys.exit(0)
#!/usr/bin/python # -*- coding: utf8 -*- # apply(func, args=None, kwds=None) # Rough equivalent of the apply() builtin function, blocking until # the result is ready and returning it. # The func will usually, but not always, be run in a way that # allows the current greenlet to switch out (for example, in a new # greenlet or thread, depending on implementation). But if the current # greenlet or thread is already one that was spawned by this pool, the pool may # choose to immediately run the func synchronously. from gevent.threadpool import ThreadPool def foo(abc): print(abc) tp = ThreadPool(10) for i in range(50): tp.apply(foo, args=(i, )) # Rough quivalent of the apply() builtin function blocking until # the result is ready and returning it.
class BaseReader(c_void_p): def __str__(self): return '<%X>' % (self.value if self.value else -1) __repr__ = __str__ def __init__(self,path = None,baud = None,parity = None,impl = None, explicit_error = False): ''' Reader object can be created even if required port cannot be opened. It will try to fix itself afterwards. To check current Reader status use 'is_open' method. ''' self.pool = ThreadPool(1) self._is_open = False if not path: kw = config.reader_path[0] path,baud,parity,impl = (kw['path'],kw.get('baud',DEFAULT_BAUD), kw.get('parity',DEFAULT_PARITY), kw.get('impl',config.default_impl)) self.path = path self.baud = baud if baud != None else DEFAULT_BAUD self.parity = parity if parity != None else DEFAULT_PARITY self.impl = impl if impl != None else config.default_impl try: self.open() except ReaderError: if explicit_error: raise print 'Cannot open Reader on {0}. Will try to fix afterwards...'.format(self.path) def is_open(self): return self._is_open @staticmethod def execute_with_context(context, callback, args, kwds): with context: return callback(*args, **kwds) def apply(self, callback, args = None, kwds = None): if args == None: args = () if kwds == None: kwds = {} return self.pool.apply(self.execute_with_context, args = (self,callback,args,kwds)) def __enter__(self): self.exc_info = (None,None,None) return self def __exit__(self, type, value, traceback): self.exc_info = (type,value,traceback) return True def open(self): 'Opens reader on a given port and raises ReaderError otherwise.' if DEBUG: print 'Reader.open',(self.path,self.baud,self.parity,self.impl) if not self._is_open: if reader_open(self.path,self.baud,self.parity,self.impl,self): raise ReaderError() self._is_open = True def close(self): 'Closes current reader connection if it was open before.' if self._is_open: reader_close(self) self._is_open = False def reopen(self): print 'reopen' self.close() self.open()