def _start_workers(self): if self.is_multiprocess: kwargs = {} if self.request_queue is None: kwargs["socket"] = self._socket else: kwargs["request_queue"] = ( self.request_queue, self.request_queue.qsize, self.request_queue.item_pushed, self.request_queue.item_popped, ) kwargs["done_queue"] = ( self.done_queue, self.done_queue.qsize, self.done_queue.item_pushed, self.done_queue.item_popped, ) kwargs["sentinel"] = self.sentinel # check if running in replicated environment from porcupine.db import _db rep_mgr = _db.get_replication_manager() if rep_mgr is not None: kwargs["local"] = rep_mgr.local_site kwargs["master"] = rep_mgr.master # start worker processes for i in list(range(self.worker_processes)): pname = "%s server process %d" % (self.name, i + 1) pconn, cconn = multiprocessing.Pipe() p = SubProcess(pname, self.worker_threads, self.thread_class, cconn, **kwargs) p.start() self.pipes.append(pconn) self.worker_pool.append(p) if self.request_queue is not None: # start task dispatcher thread(s) for i in range(8): t = Thread(target=self._task_dispatch, name="%s task dispatcher %d" % (self.name, i + 1)) t.start() self.task_dispatchers.append(t) else: for i in range(self.worker_threads): tname = "%s server thread %d" % (self.name, i + 1) t = self.thread_class(target=self._thread_loop, name=tname) t.start() self.worker_pool.append(t)
def _manage(self): while True: command = self.connection.recv() if command == self.sentinel: break if isinstance(command, (list, tuple)): command, params = command if command == "DB_LOCK": self.lock_db() elif command == "DB_UNLOCK": self.unlock_db() elif command == "DB_OPEN": self.add_runtime_service("db") elif command == "DB_CLOSE": self.remove_runtime_service("db") elif command == "NEW_MASTER": from porcupine.db import _db _db.get_replication_manager().master = params elif command == "RELOAD_PACKAGE": try: mod = sys.modules[params] misc.reload_module_tree(mod) except KeyError: pass elif command == "RELOAD_MODULE": try: mod = sys.modules[params] misc.reload_module(mod) except KeyError: pass elif command == "ADD_PUBDIR": from porcupine.config import pubdirs dir_name, dir = params pubdirs.dirs[dir_name] = dir elif command == "REMOVE_PUBDIR": from porcupine.config import pubdirs try: del pubdirs.dirs[params] except KeyError: pass self.connection.send(True) self.connection.send(None) self.is_alive = False
def _manage(self): while True: command = self.connection.recv() if command == self.sentinel: break if isinstance(command, (list, tuple)): command, params = command if command == 'DB_LOCK': self.lock_db() elif command == 'DB_UNLOCK': self.unlock_db() elif command == 'DB_OPEN': self.add_runtime_service('db') elif command == 'DB_CLOSE': self.remove_runtime_service('db') elif command == 'NEW_MASTER': from porcupine.db import _db _db.get_replication_manager().master = params elif command == 'RELOAD_PACKAGE': try: mod = sys.modules[params] misc.reload_module_tree(mod) except KeyError: pass elif command == 'RELOAD_MODULE': try: mod = sys.modules[params] misc.reload_module(mod) except KeyError: pass elif command == 'ADD_PUBDIR': from porcupine.config import pubdirs dir_name, dir = params pubdirs.dirs[dir_name] = dir elif command == 'REMOVE_PUBDIR': from porcupine.config import pubdirs try: del pubdirs.dirs[params] except KeyError: pass self.connection.send(True) self.connection.send(None) self.is_alive = False
def _start_workers(self): if self.is_multiprocess: kwargs = {} if self.request_queue is None: kwargs['socket'] = self._socket else: kwargs['request_queue'] = (self.request_queue, self.request_queue.qsize, self.request_queue.item_pushed, self.request_queue.item_popped) kwargs['done_queue'] = (self.done_queue, self.done_queue.qsize, self.done_queue.item_pushed, self.done_queue.item_popped) kwargs['sentinel'] = self.sentinel # check if running in replicated environment from porcupine.db import _db rep_mgr = _db.get_replication_manager() if rep_mgr is not None: kwargs['local'] = rep_mgr.local_site kwargs['master'] = rep_mgr.master # start worker processes for i in list(range(self.worker_processes)): pname = '%s server process %d' % (self.name, i + 1) pconn, cconn = multiprocessing.Pipe() p = SubProcess(pname, self.worker_threads, self.thread_class, cconn, **kwargs) p.start() self.pipes.append(pconn) self.worker_pool.append(p) if self.request_queue is not None: # start task dispatcher thread(s) for i in range(8): t = Thread(target=self._task_dispatch, name='%s task dispatcher %d' % (self.name, i + 1)) t.start() self.task_dispatchers.append(t) else: for i in range(self.worker_threads): tname = '%s server thread %d' % (self.name, i + 1) t = self.thread_class(target=self._thread_loop, name=tname) t.start() self.worker_pool.append(t)
def handle_request(self, rh, raw_request=None): if raw_request is None: raw_request = loads(rh.input_buffer) response = context.response = HttpResponse() request = context.request = HttpRequest(raw_request) item = None registration = None # get sessionid session_id = None cookies_enabled = True path_info = request.serverVariables['PATH_INFO'] #print(path_info) # detect if sessionid is injected in the URL session_match = re.match(self._sid_pattern, path_info) if session_match: path_info = path_info.replace(session_match.group(), '', 1) or '/' request.serverVariables['PATH_INFO'] = path_info session_id = session_match.group(1) cookies_enabled = False # otherwise check cookie elif '_sid' in request.cookies: session_id = request.cookies['_sid'].value cookies_enabled = True try: try: path_tokens = path_info.split('/') if len(path_tokens) > 1: dir_name = path_tokens[1] web_app = pubdirs.dirs.get(dir_name, None) else: web_app = None if web_app is None: # create snapshot txn for reads context._trans = _db.get_transaction(snapshot=True) # try to get the requested object from the db item = _db.get_item(path_info) if item is not None and not item._isDeleted: self._fetch_session(session_id, cookies_enabled) self._dispatch_method(item) else: raise exceptions.NotFound( 'The resource "%s" does not exist' % path_info) else: # request to a published directory self._fetch_session(session_id, cookies_enabled) # remove blank entry & app name to get the requested path dir_path = '/'.join(path_tokens[2:]) registration = web_app.get_registration( dir_path, request.serverVariables['REQUEST_METHOD'], request.serverVariables['HTTP_USER_AGENT'], request.get_lang()) if not registration: raise exceptions.NotFound( 'The resource "%s" does not exist' % path_info) # apply pre-processing filters [filter[0].apply(context, item, registration, **filter[1]) for filter in registration.filters if filter[0].type == 'pre'] rtype = registration.type if rtype == 1: # psp page # create snapshot txn for reads context._trans = _db.get_transaction(snapshot=True) ServerPage.execute(context, registration.context) elif rtype == 0: # static file f_name = registration.context if_none_match = request.HTTP_IF_NONE_MATCH if if_none_match is not None and if_none_match == \ '"%s"' % misc.generate_file_etag(f_name): response._code = 304 else: response.load_from_file(f_name) if not any([f[0].mutates_output for f in registration.filters if f[0].type == 'post']): response.set_header( 'ETag', '"%s"' % misc.generate_file_etag(f_name)) if registration.encoding: response.charset = registration.encoding except exceptions.ResponseEnd as e: pass if registration is not None and response._code == 200: # do we have caching directive? if registration.max_age: response.set_expiration(registration.max_age) # apply post-processing filters [filter[0].apply(context, item, registration, **filter[1]) for filter in registration.filters if filter[0].type == 'post'] except exceptions.InternalRedirect as e: lstPathInfo = e.args[0].split('?') raw_request['env']['PATH_INFO'] = lstPathInfo[0] if len(lstPathInfo) == 2: raw_request['env']['QUERY_STRING'] = lstPathInfo[1] else: raw_request['env']['QUERY_STRING'] = '' self.handle_request(rh, raw_request) except exceptions.DBReadOnly: context._teardown() # proxy request to master rep_mgr = _db.get_replication_manager() if rep_mgr.master is not None: master_addr = rep_mgr.master.req_address master_request = BaseRequest(rh.input_buffer) try: master_response = master_request.get_response(master_addr) except: pass else: rh.write_buffer(master_response) return e = exceptions.InternalServerError('Database is in read-only mode') e.output_traceback = False e.emit(context, item) except exceptions.PorcupineException as e: e.emit(context, item) except: e = exceptions.InternalServerError() e.emit(context, item) context._teardown() settings['requestinterfaces'][request.interface](rh, response)
def run(self): # start runtime services BaseService.start(self) # set initial site master if self.master is not None: from porcupine.db import _db rep_mgr = _db.get_replication_manager() rep_mgr.master = self.master rep_mgr.local_site = self.local # start server if self.socket is not None: socket_map = {} # start server self.request_queue = queue.Queue(self.worker_threads * 2) self.done_queue = None server = Dispatcher(self.request_queue, None, socket_map) # activate server socket server.set_socket(self.socket, socket_map) # start asyncore loop asyn_thread = Thread(target=self._async_loop, args=(socket_map,), name="%s asyncore thread" % self.name) asyn_thread.start() else: # create queue for inactive RequestHandlerProxy objects # i.e. those served self.rhproxy_queue = queue.Queue(0) # patch shared queues self.request_queue = init_queue(*self.request_queue) self.done_queue = init_queue(*self.done_queue) thread_pool = [] for i in range(self.worker_threads): tname = "%s thread %d" % (self.name, i + 1) t = self.thread_class(target=self._thread_loop, name=tname) thread_pool.append(t) # start management thread mt = Thread(target=self._manage, name="%s management thread" % self.name) thread_pool.append(mt) # start threads [t.start() for t in thread_pool] try: while self.is_alive: time.sleep(8.0) except KeyboardInterrupt: pass except IOError: pass if self.socket is not None: for i in range(self.worker_threads): self.request_queue.put(self.sentinel) # join threads for t in thread_pool: t.join() if self.socket is not None: # join asyncore thread asyncore.close_all(socket_map) asyn_thread.join() # shutdown runtime services BaseService.shutdown(self)
def execute_command(self, cmd, request): try: # DB maintenance commands if cmd == 'DB_BACKUP': output_file = request.data if not os.path.isdir(os.path.dirname(output_file)): raise IOError services.lock_db() try: _db.backup(output_file) finally: services.unlock_db() return (0, 'Database backup completed successfully.') elif cmd == 'DB_RESTORE': backup_set = request.data if not os.path.exists(backup_set): raise IOError services.lock_db() services.close_db() try: _db.restore(backup_set) finally: services.open_db() services.unlock_db() return (0, 'Database restore completed successfully.') elif cmd == 'DB_SHRINK': iLogs = _db.shrink() if iLogs: return (0, 'Successfully removed %d log files.' % iLogs) else: return (0, 'No log files removed.') # package management commands elif cmd == 'PACKAGE': ini_file = request.data my_pkg = None try: my_pkg = Package(ini_file=ini_file) my_pkg.create() finally: if my_pkg is not None: my_pkg.close() return (0, 'The package "%s-%s.ppf" was created succefully.' % (my_pkg.name, my_pkg.version)) elif cmd == 'INSTALL': # some scripts might require a security context from porcupine import context ppf_file = request.data my_pkg = None try: my_pkg = Package(package_file=ppf_file) # install as system context.user = _db.get_item('system') my_pkg.install() finally: if my_pkg is not None: my_pkg.close() context.user = None return (0, 'The package "%s" was installed succefully.' % ppf_file) elif cmd == 'UNINSTALL': # some scripts might require a security context from porcupine import context ppf_file = request.data my_pkg = None try: my_pkg = Package(package_file=ppf_file) # uninstall as system context.user = _db.get_item('system') my_pkg.uninstall() finally: if my_pkg is not None: my_pkg.close() context.user = None return (0, 'The package "%s" was uninstalled succefully.' % ppf_file) # replication commands elif cmd == 'SITE_INFO': rep_mgr = _db.get_replication_manager() if rep_mgr is not None: site_list = list(rep_mgr.get_site_list().values()) site_list.append( rep_mgr.local_site.address + (1,)) #print site_list info = [str.format('{0:25}{1:10}{2:6}', 'SITE', 'STATUS', 'MASTER'), '-' * 41] for site in site_list: site_address = site[:2] if site[2] == 1: s = 'ONLINE' else: s = 'OFFLINE' if rep_mgr.master and \ rep_mgr.master.address == site_address: m = 'X' else: m = '' info.append(str.format('{0:25}{1:10}{2:6}', site_address, s, m)) info.append('') info.append('Total sites: %d' % len(site_list)) return (0, '\n'.join(info)) else: raise NotImplementedError elif cmd == 'REP_JOIN_SITE': rep_mgr = _db.get_replication_manager() if rep_mgr is not None: site = request.data #print('adding remote site %s' % (site.address, )) site_list = rep_mgr.sites.values() + [rep_mgr.local_site] rep_mgr.broadcast(MgtMessage('REP_ADD_REMOTE_SITE', site)) rep_mgr.add_remote_site(site) return (0, [rep_mgr.master, site_list]) else: raise NotImplementedError elif cmd == 'REP_ADD_REMOTE_SITE': rep_mgr = _db.get_replication_manager() if rep_mgr is not None: site = request.data #print('adding remote site %s' % (site.address, )) rep_mgr.add_remote_site(site) return (0, None) else: raise NotImplementedError elif cmd == 'REP_NEW_MASTER': rep_mgr = _db.get_replication_manager() if rep_mgr is not None: master = request.data #print('new master is %s' % (master.address, )) rep_mgr.master = master services.notify(('NEW_MASTER', master)) return (0, None) else: raise NotImplementedError # other elif cmd == 'RELOAD': mod = misc.get_rto_by_name(request.data) misc.reload_module_tree(mod) services.notify(('RELOAD_PACKAGE', request.data)) return (0, 'Reloaded module tree "%s"' % request.data) # unknown command else: logger.warning( 'Management service received unknown command: %s' % cmd) return (-1, 'Unknown command.') except IOError: return (-1, 'Invalid file path.') except NotImplementedError: return (-1, 'Unsupported command.')
def run(self): # start runtime services BaseService.start(self) # set initial site master if self.master is not None: from porcupine.db import _db rep_mgr = _db.get_replication_manager() rep_mgr.master = self.master rep_mgr.local_site = self.local # start server if self.socket is not None: socket_map = {} # start server self.request_queue = queue.Queue(self.worker_threads * 2) self.done_queue = None server = Dispatcher(self.request_queue, None, socket_map) # activate server socket server.set_socket(self.socket, socket_map) # start asyncore loop asyn_thread = Thread(target=self._async_loop, args=(socket_map, ), name='%s asyncore thread' % self.name) asyn_thread.start() else: # create queue for inactive RequestHandlerProxy objects # i.e. those served self.rhproxy_queue = queue.Queue(0) # patch shared queues self.request_queue = init_queue(*self.request_queue) self.done_queue = init_queue(*self.done_queue) thread_pool = [] for i in range(self.worker_threads): tname = '%s thread %d' % (self.name, i + 1) t = self.thread_class(target=self._thread_loop, name=tname) thread_pool.append(t) # start management thread mt = Thread(target=self._manage, name='%s management thread' % self.name) thread_pool.append(mt) # start threads [t.start() for t in thread_pool] try: while self.is_alive: time.sleep(8.0) except KeyboardInterrupt: pass except IOError: pass if self.socket is not None: for i in range(self.worker_threads): self.request_queue.put(self.sentinel) # join threads for t in thread_pool: t.join() if self.socket is not None: # join asyncore thread asyncore.close_all(socket_map) asyn_thread.join() # shutdown runtime services BaseService.shutdown(self)