class WebServer(object): def __init__(self): self._log = logging.getLogger("WebServer") authenticator = SqlAuthenticator() self._central_connection = get_central_connection() self._cluster_row = get_cluster_row(self._central_connection) self._node_local_connection = get_node_local_connection() self._unified_id_factory = UnifiedIDFactory( self._central_connection, _get_shard_id(self._central_connection, self._cluster_row.id) ) self._deliverator = Deliverator() self._zeromq_context = zmq.Context() self._pull_server = GreenletPULLServer( self._zeromq_context, _web_server_pipeline_address, self._deliverator ) self._pull_server.link_exception(self._unhandled_greenlet_exception) # message sent to data readers and writers telling them the server # is (re)starting, thereby invalidating any archvies or retrieved # that are in progress for this node timestamp = create_timestamp() start_message = { "message-type" : "web-server-start", "priority" : create_priority(), "unified-id" : self._unified_id_factory.next(), "timestamp-repr" : repr(timestamp), "source-node-name" : _local_node_name, } self._data_writer_clients = list() for node_name, address in zip(_node_names, _data_writer_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_server_pipeline_address, self._deliverator, connect_messages=[start_message, ] ) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_writer_clients.append(resilient_client) self._data_reader_clients = list() self._data_readers = list() for node_name, address in zip(_node_names, _data_reader_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_server_pipeline_address, self._deliverator, connect_messages=[start_message, ] ) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_reader_clients.append(resilient_client) data_reader = DataReader( node_name, resilient_client ) self._data_readers.append(data_reader) self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address ) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception ) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client ) self._event_push_client = EventPushClient( self._zeromq_context, "web-server" ) self._watcher = Watcher( _stats, self._data_reader_clients, self._data_writer_clients, self._event_push_client ) id_translator_keys_path = os.path.join( _repository_path, "id_translator_keys.pkl" ) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"] ) self.application = Application( self._central_connection, self._node_local_connection, self._cluster_row, self._unified_id_factory, self._id_translator, self._data_writer_clients, self._data_readers, authenticator, self._accounting_client, self._event_push_client, _stats ) self.wsgi_server = WSGIServer( (_web_server_host, _web_server_port), application=self.application, backlog=_wsgi_backlog ) def start(self): self._space_accounting_dealer_client.start() self._pull_server.start() self._watcher.start() for client in self._data_writer_clients: client.start() for client in self._data_reader_clients: client.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._pull_server.kill() self._watcher.kill() for client in self._data_writer_clients: client.kill() for client in self._data_reader_clients: client.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._pull_server.join() self._watcher.join() for client in self._data_writer_clients: client.join() for client in self._data_reader_clients: client.join() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() self._log.info("closing database connections") self._central_connection.close() self._node_local_connection.close() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object)) exctype, value = sys.exc_info()[:2] self._event_push_client.exception( "unhandled_greenlet_exception", str(value), exctype=exctype.__name__ )
class WebPublicReaderServer(object): def __init__(self, halt_event): self._log = logging.getLogger("WebServer") memcached_client = create_memcached_client() self._interaction_pool = \ gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_central_database_pool_size, do_log=True) self._interaction_pool.add_pool( dsn=get_node_local_database_dsn(), pool_name=_local_node_name, pool_size=_local_database_pool_size) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row, self._interaction_pool) greenlet.join() self._cluster_row = greenlet.get() authenticator = \ InteractionPoolAuthenticator(memcached_client, self._interaction_pool) self._zeromq_context = zmq.Context() self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address ) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception ) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client ) self._event_push_client = EventPushClient( self._zeromq_context, "web-server" ) id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"] ) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application( self._interaction_pool, self._cluster_row, self._id_translator, authenticator, self._accounting_client, self._event_push_client, redis_queue ) self.wsgi_server = WSGIServer( (_web_public_reader_host, _web_public_reader_port), application=self.application, backlog=_wsgi_backlog, log=sys.stdout ) def start(self): self._space_accounting_dealer_client.start() self._redis_sink.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._redis_sink.kill() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object)) exctype, value = sys.exc_info()[:2] self._event_push_client.exception( "unhandled_greenlet_exception", str(value), exctype=exctype.__name__ )
class WebPublicReaderServer(object): def __init__(self, halt_event): self._log = logging.getLogger("WebServer") memcached_client = create_memcached_client() self._interaction_pool = \ gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_central_database_pool_size, do_log=True) self._interaction_pool.add_pool(dsn=get_node_local_database_dsn(), pool_name=_local_node_name, pool_size=_local_database_pool_size) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row, self._interaction_pool) greenlet.join() self._cluster_row = greenlet.get() authenticator = \ InteractionPoolAuthenticator(memcached_client, self._interaction_pool) self._zeromq_context = zmq.Context() self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client) self._event_push_client = EventPushClient(self._zeromq_context, "web-server") id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"]) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application(self._interaction_pool, self._cluster_row, self._id_translator, authenticator, self._accounting_client, self._event_push_client, redis_queue) self.wsgi_server = WSGIServer( (_web_public_reader_host, _web_public_reader_port), application=self.application, backlog=_wsgi_backlog, log=sys.stdout) def start(self): self._space_accounting_dealer_client.start() self._redis_sink.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._redis_sink.kill() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object)) exctype, value = sys.exc_info()[:2] self._event_push_client.exception("unhandled_greenlet_exception", str(value), exctype=exctype.__name__)
class WebInternalReader(object): def __init__(self): self._log = logging.getLogger("WebInternalReader") memcached_client = memcache.Client(_memcached_nodes) self._central_connection = get_central_connection() self._cluster_row = get_cluster_row(self._central_connection) self._node_local_connection = get_node_local_connection() self._deliverator = Deliverator() self._zeromq_context = zmq.Context() self._pull_server = GreenletPULLServer( self._zeromq_context, _web_internal_reader_pipeline_address, self._deliverator ) self._pull_server.link_exception(self._unhandled_greenlet_exception) self._data_reader_clients = list() self._data_readers = list() for node_name, address in zip(_node_names, _data_reader_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_internal_reader_pipeline_address, self._deliverator, connect_messages=[] ) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_reader_clients.append(resilient_client) data_reader = DataReader( node_name, resilient_client ) self._data_readers.append(data_reader) self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address ) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception ) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client ) self._event_push_client = EventPushClient( self._zeromq_context, "web-internal-reader" ) # message sent to data readers telling them the server # is (re)starting, thereby invalidating any archvies or retrieved # that are in progress for this node timestamp = create_timestamp() self._event_push_client.info("web-reader-start", "web reader (re)start", timestamp_repr=repr(timestamp), source_node_name=_local_node_name) self._watcher = Watcher( _stats, self._data_reader_clients, self._event_push_client ) self.application = Application( memcached_client, self._central_connection, self._node_local_connection, self._cluster_row, self._data_readers, self._accounting_client, self._event_push_client, _stats ) self.wsgi_server = WSGIServer( (_web_internal_reader_host, _web_internal_reader_port), application=self.application, backlog=_wsgi_backlog ) def start(self): self._space_accounting_dealer_client.start() self._pull_server.start() self._watcher.start() for client in self._data_reader_clients: client.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._pull_server.kill() self._watcher.kill() for client in self._data_reader_clients: client.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._pull_server.join() self._watcher.join() for client in self._data_reader_clients: client.join() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() self._log.info("closing database connections") self._central_connection.close() self._node_local_connection.close() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object))
class WebWriter(object): def __init__(self, halt_event): self._log = logging.getLogger("WebWriter") memcached_client = memcache.Client(_memcached_nodes) self._interaction_pool = gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_database_pool_size, do_log=True) authenticator = InteractionPoolAuthenticator(memcached_client, self._interaction_pool) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row_and_node_row, self._interaction_pool) greenlet.join() self._cluster_row, node_row = greenlet.get() self._unified_id_factory = UnifiedIDFactory(node_row.id) self._deliverator = Deliverator() self._zeromq_context = zmq.Context() self._pull_server = GreenletPULLServer( self._zeromq_context, _web_writer_pipeliner_address, self._deliverator ) self._pull_server.link_exception(self._unhandled_greenlet_exception) self._data_writer_clients = list() for node_name, address in zip(_node_names, _data_writer_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_writer_pipeliner_address, self._deliverator, connect_messages=[] ) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_writer_clients.append(resilient_client) self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address ) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception ) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client ) self._event_push_client = EventPushClient( self._zeromq_context, "web-server" ) # message sent to data writers telling them the server # is (re)starting, thereby invalidating any archives # that are in progress for this node unified_id = self._unified_id_factory.next() timestamp = create_timestamp() self._event_push_client.info("web-writer-start", "web writer (re)start", unified_id=unified_id, timestamp_repr=repr(timestamp), source_node_name=_local_node_name) id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"] ) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application( self._cluster_row, self._unified_id_factory, self._id_translator, self._data_writer_clients, authenticator, self._accounting_client, self._event_push_client, redis_queue ) self.wsgi_server = WSGIServer((_web_writer_host, _web_writer_port), application=self.application, backlog=_wsgi_backlog ) def start(self): self._space_accounting_dealer_client.start() self._pull_server.start() for client in self._data_writer_clients: client.start() self._redis_sink.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._pull_server.kill() for client in self._data_writer_clients: client.kill() self._redis_sink.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._pull_server.join() for client in self._data_writer_clients: client.join() self._redis_sink.kill() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object)) exctype, value = sys.exc_info()[:2] self._event_push_client.exception( "unhandled_greenlet_exception", str(value), exctype=exctype.__name__ )
class WebWriter(object): def __init__(self, halt_event): self._log = logging.getLogger("WebWriter") memcached_client = memcache.Client(_memcached_nodes) self._interaction_pool = gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_database_pool_size, do_log=True) authenticator = InteractionPoolAuthenticator(memcached_client, self._interaction_pool) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row_and_node_row, self._interaction_pool) greenlet.join() self._cluster_row, node_row = greenlet.get() self._unified_id_factory = UnifiedIDFactory(node_row.id) self._deliverator = Deliverator() self._zeromq_context = zmq.Context() self._pull_server = GreenletPULLServer(self._zeromq_context, _web_writer_pipeliner_address, self._deliverator) self._pull_server.link_exception(self._unhandled_greenlet_exception) self._data_writer_clients = list() for node_name, address in zip(_node_names, _data_writer_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_writer_pipeliner_address, self._deliverator, connect_messages=[]) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_writer_clients.append(resilient_client) self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client) self._event_push_client = EventPushClient(self._zeromq_context, "web-server") # message sent to data writers telling them the server # is (re)starting, thereby invalidating any archives # that are in progress for this node unified_id = self._unified_id_factory.next() timestamp = create_timestamp() self._event_push_client.info("web-writer-start", "web writer (re)start", unified_id=unified_id, timestamp_repr=repr(timestamp), source_node_name=_local_node_name) id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"]) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application(self._cluster_row, self._unified_id_factory, self._id_translator, self._data_writer_clients, authenticator, self._accounting_client, self._event_push_client, redis_queue) self.wsgi_server = WSGIServer((_web_writer_host, _web_writer_port), application=self.application, backlog=_wsgi_backlog) def start(self): self._space_accounting_dealer_client.start() self._pull_server.start() for client in self._data_writer_clients: client.start() self._redis_sink.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._pull_server.kill() for client in self._data_writer_clients: client.kill() self._redis_sink.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._pull_server.join() for client in self._data_writer_clients: client.join() self._redis_sink.kill() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object)) exctype, value = sys.exc_info()[:2] self._event_push_client.exception("unhandled_greenlet_exception", str(value), exctype=exctype.__name__)
class WebInternalReader(object): def __init__(self): self._log = logging.getLogger("WebInternalReader") memcached_client = memcache.Client(_memcached_nodes) self._central_connection = get_central_connection() self._cluster_row = get_cluster_row(self._central_connection) self._node_local_connection = get_node_local_connection() self._deliverator = Deliverator() self._zeromq_context = zmq.Context() self._pull_server = GreenletPULLServer( self._zeromq_context, _web_internal_reader_pipeline_address, self._deliverator) self._pull_server.link_exception(self._unhandled_greenlet_exception) self._data_reader_clients = list() self._data_readers = list() for node_name, address in zip(_node_names, _data_reader_addresses): resilient_client = GreenletResilientClient( self._zeromq_context, node_name, address, _client_tag, _web_internal_reader_pipeline_address, self._deliverator, connect_messages=[]) resilient_client.link_exception(self._unhandled_greenlet_exception) self._data_reader_clients.append(resilient_client) data_reader = DataReader(node_name, resilient_client) self._data_readers.append(data_reader) self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client) self._event_push_client = EventPushClient(self._zeromq_context, "web-internal-reader") # message sent to data readers telling them the server # is (re)starting, thereby invalidating any archvies or retrieved # that are in progress for this node timestamp = create_timestamp() self._event_push_client.info("web-reader-start", "web reader (re)start", timestamp_repr=repr(timestamp), source_node_name=_local_node_name) self._watcher = Watcher(_stats, self._data_reader_clients, self._event_push_client) self.application = Application(memcached_client, self._central_connection, self._node_local_connection, self._cluster_row, self._data_readers, self._accounting_client, self._event_push_client, _stats) self.wsgi_server = WSGIServer( (_web_internal_reader_host, _web_internal_reader_port), application=self.application, backlog=_wsgi_backlog) def start(self): self._space_accounting_dealer_client.start() self._pull_server.start() self._watcher.start() for client in self._data_reader_clients: client.start() self.wsgi_server.start() def stop(self): self._log.info("stopping wsgi web server") self.wsgi_server.stop() self._accounting_client.close() self._log.debug("killing greenlets") self._space_accounting_dealer_client.kill() self._pull_server.kill() self._watcher.kill() for client in self._data_reader_clients: client.kill() self._log.debug("joining greenlets") self._space_accounting_dealer_client.join() self._pull_server.join() self._watcher.join() for client in self._data_reader_clients: client.join() self._log.debug("closing zmq") self._event_push_client.close() self._zeromq_context.term() self._log.info("closing database connections") self._central_connection.close() self._node_local_connection.close() def _unhandled_greenlet_exception(self, greenlet_object): try: greenlet_object.get() except Exception: self._log.exception(str(greenlet_object))