def __init__( self, interaction_pool, redis_queue, collection_id, versioned, key, version_id, slice_offset, slice_size, user_request_id, ): self._log = logging.getLogger("Retriever") self._memcached_client = create_memcached_client() self._interaction_pool = interaction_pool self._redis_queue = redis_queue self._collection_id = collection_id self._versioned = versioned self._key = key self._version_id = version_id self._slice_offset = slice_offset self._slice_size = slice_size self._key_rows = self._fetch_key_rows_from_database() self.total_file_size = 0 self.user_request_id = user_request_id self._sequence = 0 # if we are looking for a specified slice, we can stop when # we find the last block self._last_block_in_slice_retrieved = False
def __init__(self, interaction_pool, redis_queue, collection_id, versioned, key, version_id, slice_offset, slice_size, user_request_id): self._log = logging.getLogger("Retriever") self._memcached_client = create_memcached_client() self._interaction_pool = interaction_pool self._redis_queue = redis_queue self._collection_id = collection_id self._versioned = versioned self._key = key self._version_id = version_id self._slice_offset = slice_offset self._slice_size = slice_size self._key_rows = self._fetch_key_rows_from_database() self.total_file_size = 0 self.user_request_id = user_request_id self._sequence = 0 # if we are looking for a specified slice, we can stop when # we find the last block self._last_block_in_slice_retrieved = False
def __init__(self, halt_event): self._log = logging.getLogger("WebServer") memcached_client = create_memcached_client() self._interaction_pool = \ gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_central_database_pool_size, do_log=True) self._interaction_pool.add_pool( dsn=get_node_local_database_dsn(), pool_name=_local_node_name, pool_size=_local_database_pool_size) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row, self._interaction_pool) greenlet.join() self._cluster_row = greenlet.get() authenticator = \ InteractionPoolAuthenticator(memcached_client, self._interaction_pool) self._zeromq_context = zmq.Context() self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address ) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception ) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client ) self._event_push_client = EventPushClient( self._zeromq_context, "web-server" ) id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"] ) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application( self._interaction_pool, self._cluster_row, self._id_translator, authenticator, self._accounting_client, self._event_push_client, redis_queue ) self.wsgi_server = WSGIServer( (_web_public_reader_host, _web_public_reader_port), application=self.application, backlog=_wsgi_backlog, log=sys.stdout )
def __init__(self, halt_event): self._log = logging.getLogger("WebServer") memcached_client = create_memcached_client() self._interaction_pool = \ gdbpool.interaction_pool.DBInteractionPool( get_central_database_dsn(), pool_name=_central_pool_name, pool_size=_central_database_pool_size, do_log=True) self._interaction_pool.add_pool(dsn=get_node_local_database_dsn(), pool_name=_local_node_name, pool_size=_local_database_pool_size) # Ticket #25: must run database operation in a greenlet greenlet = gevent.Greenlet.spawn(_get_cluster_row, self._interaction_pool) greenlet.join() self._cluster_row = greenlet.get() authenticator = \ InteractionPoolAuthenticator(memcached_client, self._interaction_pool) self._zeromq_context = zmq.Context() self._space_accounting_dealer_client = GreenletDealerClient( self._zeromq_context, _local_node_name, _space_accounting_server_address) self._space_accounting_dealer_client.link_exception( self._unhandled_greenlet_exception) push_client = GreenletPUSHClient( self._zeromq_context, _local_node_name, _space_accounting_pipeline_address, ) self._accounting_client = SpaceAccountingClient( _local_node_name, self._space_accounting_dealer_client, push_client) self._event_push_client = EventPushClient(self._zeromq_context, "web-server") id_translator_keys_path = os.environ.get( "NIMBUS_IO_ID_TRANSLATION_KEYS", os.path.join(_repository_path, "id_translator_keys.pkl")) with open(id_translator_keys_path, "r") as input_file: id_translator_keys = pickle.load(input_file) self._id_translator = InternalIDTranslator( id_translator_keys["key"], id_translator_keys["hmac_key"], id_translator_keys["iv_key"], id_translator_keys["hmac_size"]) redis_queue = gevent.queue.Queue() self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue, _local_node_name) self._redis_sink.link_exception(self._unhandled_greenlet_exception) self.application = Application(self._interaction_pool, self._cluster_row, self._id_translator, authenticator, self._accounting_client, self._event_push_client, redis_queue) self.wsgi_server = WSGIServer( (_web_public_reader_host, _web_public_reader_port), application=self.application, backlog=_wsgi_backlog, log=sys.stdout)