Example #1
0
 def setUp(self):
     self._halt_event = Event()
     self._redis_queue = gevent.queue.Queue()
     self._redis_sink = OperationalStatsRedisSink(self._halt_event,
                                                  self._redis_queue,
                                                  _node_name)
     self._redis_sink.link_exception(_unhandled_greenlet_exception)
     self._redis_sink.start()
     self._redis_connection = create_redis_connection()
 def setUp(self):
     self._halt_event = Event()
     self._redis_queue = gevent.queue.Queue()
     self._redis_sink = OperationalStatsRedisSink(self._halt_event, self._redis_queue, _node_name)
     self._redis_sink.link_exception(_unhandled_greenlet_exception)
     self._redis_sink.start()
     self._redis_connection = create_redis_connection()
    def __init__(self, halt_event):
        self._log = logging.getLogger("WebServer")
        memcached_client = create_memcached_client()

        self._interaction_pool = \
            gdbpool.interaction_pool.DBInteractionPool(
                get_central_database_dsn(), 
                pool_name=_central_pool_name,
                pool_size=_central_database_pool_size, 
                do_log=True)

        self._interaction_pool.add_pool(
            dsn=get_node_local_database_dsn(), 
            pool_name=_local_node_name,
            pool_size=_local_database_pool_size) 

        # Ticket #25: must run database operation in a greenlet
        greenlet =  gevent.Greenlet.spawn(_get_cluster_row, 
                                           self._interaction_pool)
        greenlet.join()
        self._cluster_row = greenlet.get()

        authenticator = \
            InteractionPoolAuthenticator(memcached_client, 
                                         self._interaction_pool)

        self._zeromq_context = zmq.Context()

        self._space_accounting_dealer_client = GreenletDealerClient(
            self._zeromq_context, 
            _local_node_name, 
            _space_accounting_server_address
        )
        self._space_accounting_dealer_client.link_exception(
            self._unhandled_greenlet_exception
        )

        push_client = GreenletPUSHClient(
            self._zeromq_context, 
            _local_node_name, 
            _space_accounting_pipeline_address,
        )

        self._accounting_client = SpaceAccountingClient(
            _local_node_name,
            self._space_accounting_dealer_client,
            push_client
        )

        self._event_push_client = EventPushClient(
            self._zeromq_context,
            "web-server"
        )

        id_translator_keys_path = os.environ.get(
            "NIMBUS_IO_ID_TRANSLATION_KEYS", 
            os.path.join(_repository_path, "id_translator_keys.pkl"))
        with open(id_translator_keys_path, "r") as input_file:
            id_translator_keys = pickle.load(input_file)

        self._id_translator = InternalIDTranslator(
            id_translator_keys["key"],
            id_translator_keys["hmac_key"], 
            id_translator_keys["iv_key"],
            id_translator_keys["hmac_size"]
        )

        redis_queue = gevent.queue.Queue()

        self._redis_sink = OperationalStatsRedisSink(halt_event, 
                                                     redis_queue,
                                                     _local_node_name)
        self._redis_sink.link_exception(self._unhandled_greenlet_exception)

        self.application = Application(
            self._interaction_pool,
            self._cluster_row,
            self._id_translator,
            authenticator,
            self._accounting_client,
            self._event_push_client,
            redis_queue
        )
        self.wsgi_server = WSGIServer(
            (_web_public_reader_host, _web_public_reader_port), 
            application=self.application,
            backlog=_wsgi_backlog,
            log=sys.stdout
        )
Example #4
0
    def __init__(self, halt_event):
        self._log = logging.getLogger("WebServer")
        memcached_client = create_memcached_client()

        self._interaction_pool = \
            gdbpool.interaction_pool.DBInteractionPool(
                get_central_database_dsn(),
                pool_name=_central_pool_name,
                pool_size=_central_database_pool_size,
                do_log=True)

        self._interaction_pool.add_pool(dsn=get_node_local_database_dsn(),
                                        pool_name=_local_node_name,
                                        pool_size=_local_database_pool_size)

        # Ticket #25: must run database operation in a greenlet
        greenlet = gevent.Greenlet.spawn(_get_cluster_row,
                                         self._interaction_pool)
        greenlet.join()
        self._cluster_row = greenlet.get()

        authenticator = \
            InteractionPoolAuthenticator(memcached_client,
                                         self._interaction_pool)

        self._zeromq_context = zmq.Context()

        self._space_accounting_dealer_client = GreenletDealerClient(
            self._zeromq_context, _local_node_name,
            _space_accounting_server_address)
        self._space_accounting_dealer_client.link_exception(
            self._unhandled_greenlet_exception)

        push_client = GreenletPUSHClient(
            self._zeromq_context,
            _local_node_name,
            _space_accounting_pipeline_address,
        )

        self._accounting_client = SpaceAccountingClient(
            _local_node_name, self._space_accounting_dealer_client,
            push_client)

        self._event_push_client = EventPushClient(self._zeromq_context,
                                                  "web-server")

        id_translator_keys_path = os.environ.get(
            "NIMBUS_IO_ID_TRANSLATION_KEYS",
            os.path.join(_repository_path, "id_translator_keys.pkl"))
        with open(id_translator_keys_path, "r") as input_file:
            id_translator_keys = pickle.load(input_file)

        self._id_translator = InternalIDTranslator(
            id_translator_keys["key"], id_translator_keys["hmac_key"],
            id_translator_keys["iv_key"], id_translator_keys["hmac_size"])

        redis_queue = gevent.queue.Queue()

        self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue,
                                                     _local_node_name)
        self._redis_sink.link_exception(self._unhandled_greenlet_exception)

        self.application = Application(self._interaction_pool,
                                       self._cluster_row, self._id_translator,
                                       authenticator, self._accounting_client,
                                       self._event_push_client, redis_queue)
        self.wsgi_server = WSGIServer(
            (_web_public_reader_host, _web_public_reader_port),
            application=self.application,
            backlog=_wsgi_backlog,
            log=sys.stdout)
Example #5
0
class TestStatsAccumulator(unittest.TestCase):
    """
    test StatsAccumulator
    """
    def setUp(self):
        self._halt_event = Event()
        self._redis_queue = gevent.queue.Queue()
        self._redis_sink = OperationalStatsRedisSink(self._halt_event,
                                                     self._redis_queue,
                                                     _node_name)
        self._redis_sink.link_exception(_unhandled_greenlet_exception)
        self._redis_sink.start()
        self._redis_connection = create_redis_connection()

    def tearDown(self):
        log = logging.getLogger("teardown")
        log.info("1")
        if hasattr(self, "_halt_event"):
            self._halt_event.set()
        log.info("2")
        if hasattr(self, "redis_sink"):
            self._redis_sink.join()
        log.info("3")
        if hasattr(self, "_redis_queue"):
            delattr(self, "_redis_queue")
        log.info("4")
        if hasattr(self, "_halt_event"):
            delattr(self, "_halt_event")
        log.info("5")
        if hasattr(self, "redis_sink"):
            delattr(self, "redis_sink")
        log.info("6")
        for key in self._redis_connection.keys(compute_search_key(_node_name)):
            self._redis_connection.delete(key)
        if hasattr(self, "_redis_connection"):
            delattr(self, "_redis_connection")
        log.info("7")

    def test_greenlet_creation(self):
        """
        test that we can create, and stop, the greenlet
        """
        keys = self._redis_connection.keys()
        print >> sys.stderr, "keys =", str(keys)

        # give the greenlet some time to start
        self._halt_event.wait(1)

    def test_single_increment(self):
        """
        test that we can increment a key once
        """
        partial_key = "get_request"
        queue_entry = redis_queue_entry_tuple(timestamp=datetime.utcnow(),
                                              collection_id=42,
                                              value=12345)

        # feed an entry to the queue
        self._redis_queue.put((partial_key, queue_entry), )

        # give the greenlet some time to start
        self._halt_event.wait(1)

        # verify that the key got incremented
        expected_key = compute_key(_node_name, queue_entry.timestamp,
                                   partial_key)
        hash_dict = self._redis_connection.hgetall(expected_key)
        items = hash_dict.items()
        self.assertEqual(len(items), 1)
        collection_id_bytes, count_bytes = items[0]
        collection_id = int(collection_id_bytes)
        count = int(count_bytes)

        self.assertEqual(collection_id, queue_entry.collection_id)
        self.assertEqual(count, queue_entry.value)

    def test_multiple_increment(self):
        """
        test that we can increment a key multiple times in a time interval
        """
        partial_key = "get_request"

        # create a base time, rounded off to the nearest minute
        current_time = datetime.utcnow()
        base_time = datetime(current_time.year, current_time.month,
                             current_time.day, current_time.hour,
                             current_time.minute)

        test_range = range(10)
        for index in test_range:
            timestamp = base_time + timedelta(seconds=index)
            queue_entry = redis_queue_entry_tuple(timestamp=timestamp,
                                                  collection_id=42,
                                                  value=index)

            self._redis_queue.put((partial_key, queue_entry), )

        # give the greenlet and redis some time to operate
        self._halt_event.wait(1)

        # verify that the key got incremented
        expected_key = compute_key(_node_name, queue_entry.timestamp,
                                   partial_key)
        expected_value = sum([x for x in test_range])
        hash_value = self._redis_connection.hget(expected_key,
                                                 queue_entry.collection_id)
        self.assertEqual(int(hash_value), expected_value)
class TestStatsAccumulator(unittest.TestCase):
    """
    test StatsAccumulator
    """

    def setUp(self):
        self._halt_event = Event()
        self._redis_queue = gevent.queue.Queue()
        self._redis_sink = OperationalStatsRedisSink(self._halt_event, self._redis_queue, _node_name)
        self._redis_sink.link_exception(_unhandled_greenlet_exception)
        self._redis_sink.start()
        self._redis_connection = create_redis_connection()

    def tearDown(self):
        log = logging.getLogger("teardown")
        log.info("1")
        if hasattr(self, "_halt_event"):
            self._halt_event.set()
        log.info("2")
        if hasattr(self, "redis_sink"):
            self._redis_sink.join()
        log.info("3")
        if hasattr(self, "_redis_queue"):
            delattr(self, "_redis_queue")
        log.info("4")
        if hasattr(self, "_halt_event"):
            delattr(self, "_halt_event")
        log.info("5")
        if hasattr(self, "redis_sink"):
            delattr(self, "redis_sink")
        log.info("6")
        for key in self._redis_connection.keys(compute_search_key(_node_name)):
            self._redis_connection.delete(key)
        if hasattr(self, "_redis_connection"):
            delattr(self, "_redis_connection")
        log.info("7")

    def test_greenlet_creation(self):
        """
        test that we can create, and stop, the greenlet
        """
        keys = self._redis_connection.keys()
        print >>sys.stderr, "keys =", str(keys)

        # give the greenlet some time to start
        self._halt_event.wait(1)

    def test_single_increment(self):
        """
        test that we can increment a key once
        """
        partial_key = "get_request"
        queue_entry = redis_queue_entry_tuple(timestamp=datetime.utcnow(), collection_id=42, value=12345)

        # feed an entry to the queue
        self._redis_queue.put((partial_key, queue_entry))

        # give the greenlet some time to start
        self._halt_event.wait(1)

        # verify that the key got incremented
        expected_key = compute_key(_node_name, queue_entry.timestamp, partial_key)
        hash_dict = self._redis_connection.hgetall(expected_key)
        items = hash_dict.items()
        self.assertEqual(len(items), 1)
        collection_id_bytes, count_bytes = items[0]
        collection_id = int(collection_id_bytes)
        count = int(count_bytes)

        self.assertEqual(collection_id, queue_entry.collection_id)
        self.assertEqual(count, queue_entry.value)

    def test_multiple_increment(self):
        """
        test that we can increment a key multiple times in a time interval
        """
        partial_key = "get_request"

        # create a base time, rounded off to the nearest minute
        current_time = datetime.utcnow()
        base_time = datetime(
            current_time.year, current_time.month, current_time.day, current_time.hour, current_time.minute
        )

        test_range = range(10)
        for index in test_range:
            timestamp = base_time + timedelta(seconds=index)
            queue_entry = redis_queue_entry_tuple(timestamp=timestamp, collection_id=42, value=index)

            self._redis_queue.put((partial_key, queue_entry))

        # give the greenlet and redis some time to operate
        self._halt_event.wait(1)

        # verify that the key got incremented
        expected_key = compute_key(_node_name, queue_entry.timestamp, partial_key)
        expected_value = sum([x for x in test_range])
        hash_value = self._redis_connection.hget(expected_key, queue_entry.collection_id)
        self.assertEqual(int(hash_value), expected_value)
    def __init__(self, halt_event):
        self._log = logging.getLogger("WebWriter")
        memcached_client = memcache.Client(_memcached_nodes)

        self._interaction_pool = gdbpool.interaction_pool.DBInteractionPool(
            get_central_database_dsn(), 
            pool_name=_central_pool_name,
            pool_size=_database_pool_size, 
            do_log=True)

        authenticator = InteractionPoolAuthenticator(memcached_client, 
                                                     self._interaction_pool)

        # Ticket #25: must run database operation in a greenlet
        greenlet =  gevent.Greenlet.spawn(_get_cluster_row_and_node_row, 
                                           self._interaction_pool)
        greenlet.join()
        self._cluster_row, node_row = greenlet.get()

        self._unified_id_factory = UnifiedIDFactory(node_row.id)

        self._deliverator = Deliverator()

        self._zeromq_context = zmq.Context()

        self._pull_server = GreenletPULLServer(
            self._zeromq_context, 
            _web_writer_pipeliner_address,
            self._deliverator
        )
        self._pull_server.link_exception(self._unhandled_greenlet_exception)

        self._data_writer_clients = list()
        for node_name, address in zip(_node_names, _data_writer_addresses):
            resilient_client = GreenletResilientClient(
                self._zeromq_context, 
                node_name,
                address,
                _client_tag,
                _web_writer_pipeliner_address,
                self._deliverator,
                connect_messages=[]
            )
            resilient_client.link_exception(self._unhandled_greenlet_exception)
            self._data_writer_clients.append(resilient_client)

        self._space_accounting_dealer_client = GreenletDealerClient(
            self._zeromq_context, 
            _local_node_name, 
            _space_accounting_server_address
        )
        self._space_accounting_dealer_client.link_exception(
            self._unhandled_greenlet_exception
        )

        push_client = GreenletPUSHClient(
            self._zeromq_context, 
            _local_node_name, 
            _space_accounting_pipeline_address,
        )

        self._accounting_client = SpaceAccountingClient(
            _local_node_name,
            self._space_accounting_dealer_client,
            push_client
        )

        self._event_push_client = EventPushClient(
            self._zeromq_context,
            "web-server"
        )

        # message sent to data writers telling them the server
        # is (re)starting, thereby invalidating any archives
        # that are in progress for this node
        unified_id = self._unified_id_factory.next()
        timestamp = create_timestamp()
        self._event_push_client.info("web-writer-start",
                                     "web writer (re)start",
                                     unified_id=unified_id,
                                     timestamp_repr=repr(timestamp),
                                     source_node_name=_local_node_name)

        id_translator_keys_path = os.environ.get(
            "NIMBUS_IO_ID_TRANSLATION_KEYS", 
            os.path.join(_repository_path, "id_translator_keys.pkl"))
        with open(id_translator_keys_path, "r") as input_file:
            id_translator_keys = pickle.load(input_file)

        self._id_translator = InternalIDTranslator(
            id_translator_keys["key"],
            id_translator_keys["hmac_key"], 
            id_translator_keys["iv_key"],
            id_translator_keys["hmac_size"]
        )

        redis_queue = gevent.queue.Queue()

        self._redis_sink = OperationalStatsRedisSink(halt_event, 
                                                     redis_queue,
                                                     _local_node_name)
        self._redis_sink.link_exception(self._unhandled_greenlet_exception)

        self.application = Application(
            self._cluster_row,
            self._unified_id_factory,
            self._id_translator,
            self._data_writer_clients,
            authenticator,
            self._accounting_client,
            self._event_push_client,
            redis_queue
        )
        self.wsgi_server = WSGIServer((_web_writer_host, _web_writer_port), 
                                      application=self.application,
                                      backlog=_wsgi_backlog
        )
Example #8
0
    def __init__(self, halt_event):
        self._log = logging.getLogger("WebWriter")
        memcached_client = memcache.Client(_memcached_nodes)

        self._interaction_pool = gdbpool.interaction_pool.DBInteractionPool(
            get_central_database_dsn(),
            pool_name=_central_pool_name,
            pool_size=_database_pool_size,
            do_log=True)

        authenticator = InteractionPoolAuthenticator(memcached_client,
                                                     self._interaction_pool)

        # Ticket #25: must run database operation in a greenlet
        greenlet = gevent.Greenlet.spawn(_get_cluster_row_and_node_row,
                                         self._interaction_pool)
        greenlet.join()
        self._cluster_row, node_row = greenlet.get()

        self._unified_id_factory = UnifiedIDFactory(node_row.id)

        self._deliverator = Deliverator()

        self._zeromq_context = zmq.Context()

        self._pull_server = GreenletPULLServer(self._zeromq_context,
                                               _web_writer_pipeliner_address,
                                               self._deliverator)
        self._pull_server.link_exception(self._unhandled_greenlet_exception)

        self._data_writer_clients = list()
        for node_name, address in zip(_node_names, _data_writer_addresses):
            resilient_client = GreenletResilientClient(
                self._zeromq_context,
                node_name,
                address,
                _client_tag,
                _web_writer_pipeliner_address,
                self._deliverator,
                connect_messages=[])
            resilient_client.link_exception(self._unhandled_greenlet_exception)
            self._data_writer_clients.append(resilient_client)

        self._space_accounting_dealer_client = GreenletDealerClient(
            self._zeromq_context, _local_node_name,
            _space_accounting_server_address)
        self._space_accounting_dealer_client.link_exception(
            self._unhandled_greenlet_exception)

        push_client = GreenletPUSHClient(
            self._zeromq_context,
            _local_node_name,
            _space_accounting_pipeline_address,
        )

        self._accounting_client = SpaceAccountingClient(
            _local_node_name, self._space_accounting_dealer_client,
            push_client)

        self._event_push_client = EventPushClient(self._zeromq_context,
                                                  "web-server")

        # message sent to data writers telling them the server
        # is (re)starting, thereby invalidating any archives
        # that are in progress for this node
        unified_id = self._unified_id_factory.next()
        timestamp = create_timestamp()
        self._event_push_client.info("web-writer-start",
                                     "web writer (re)start",
                                     unified_id=unified_id,
                                     timestamp_repr=repr(timestamp),
                                     source_node_name=_local_node_name)

        id_translator_keys_path = os.environ.get(
            "NIMBUS_IO_ID_TRANSLATION_KEYS",
            os.path.join(_repository_path, "id_translator_keys.pkl"))
        with open(id_translator_keys_path, "r") as input_file:
            id_translator_keys = pickle.load(input_file)

        self._id_translator = InternalIDTranslator(
            id_translator_keys["key"], id_translator_keys["hmac_key"],
            id_translator_keys["iv_key"], id_translator_keys["hmac_size"])

        redis_queue = gevent.queue.Queue()

        self._redis_sink = OperationalStatsRedisSink(halt_event, redis_queue,
                                                     _local_node_name)
        self._redis_sink.link_exception(self._unhandled_greenlet_exception)

        self.application = Application(self._cluster_row,
                                       self._unified_id_factory,
                                       self._id_translator,
                                       self._data_writer_clients,
                                       authenticator, self._accounting_client,
                                       self._event_push_client, redis_queue)
        self.wsgi_server = WSGIServer((_web_writer_host, _web_writer_port),
                                      application=self.application,
                                      backlog=_wsgi_backlog)