Пример #1
0
    def setUp(self):
        self.tearDown()
        os.makedirs(_repository_path)
        self._key_generator = generate_key()

        self._database_connection = get_node_local_connection()

        self._event_publisher_process = start_event_publisher(
            _local_node_name, 
            _event_publisher_pull_address,
            _event_publisher_pub_address
        )
        poll_result = poll_process(self._event_publisher_process)
        self.assertEqual(poll_result, None)

        self._data_writer_process = start_data_writer(
            _cluster_name,
            _local_node_name, 
            _data_writer_address,
            _event_publisher_pull_address,
            _repository_path
        )
        poll_result = poll_process(self._data_writer_process)
        self.assertEqual(poll_result, None)

        self._data_reader_process = start_data_reader(
            _local_node_name, 
            _data_reader_address,
            _event_publisher_pull_address,
            _repository_path
        )
        poll_result = poll_process(self._data_reader_process)
        self.assertEqual(poll_result, None)
    def test_usage(self):
        """test SpaceUsage"""
        total_bytes_added = 42 * 1024 * 1024 * 1000
        total_bytes_removed = 21 * 1024 * 1024 * 50
        total_bytes_retrieved = 66 * 1024 * 1024 * 25
        request_id = uuid.uuid1().hex

        poll_result = poll_process(self._space_accounting_server_process)
        self.assertEqual(poll_result, None)

        send_to_pipeline(
            _space_accounting_server_address,
            _detail_generator(total_bytes_added, total_bytes_removed, total_bytes_retrieved),
        )

        request = {"message-type": "space-usage-request", "collection-id": _collection_id}
        reply = send_request_and_get_reply(_space_accounting_server_address, request)
        self.assertEqual(reply["collection-id"], _collection_id)
        self.assertEqual(reply["message-type"], "space-usage-reply")
        self.assertEqual(reply["result"], "success")
        self.assertEqual(reply["bytes-added"], total_bytes_added, (reply["bytes-added"], total_bytes_added))
        self.assertEqual(reply["bytes-removed"], total_bytes_removed, (reply["bytes-removed"], total_bytes_removed))
        self.assertEqual(
            reply["bytes-retrieved"], total_bytes_retrieved, (reply["bytes-retrieved"], total_bytes_retrieved)
        )
Пример #3
0
    def setUp(self):
        self.tearDown()
        self._key_generator = generate_key()

        self._event_publisher_process = start_event_publisher(
            _local_node_name, _event_publisher_pull_address,
            _event_publisher_pub_address)
        poll_result = poll_process(self._event_publisher_process)
        self.assertEqual(poll_result, None)
Пример #4
0
    def setUp(self):
        self.tearDown()
        self._key_generator = generate_key()

        self._event_publisher_process = start_event_publisher(
            _local_node_name, 
            _event_publisher_pull_address,
            _event_publisher_pub_address
        )
        poll_result = poll_process(self._event_publisher_process)
        self.assertEqual(poll_result, None)
    def setUp(self):
        initialize_logging(_log_path)
        self.tearDown()

        # clear out any old stats
        space_accounting_database = SpaceAccountingDatabase()
        space_accounting_database.clear_collection_stats(_collection_id)
        space_accounting_database.commit()

        self._space_accounting_server_process = start_space_accounting_server(
            _local_node_name, _space_accounting_server_address, _space_accounting_pipeline_address
        )
        poll_result = poll_process(self._space_accounting_server_process)
        self.assertEqual(poll_result, None)
Пример #6
0
    def setUp(self):
        self.tearDown()
        os.makedirs(_repository_path)
        self._key_generator = generate_key()

        self._database_connection = get_node_local_connection()

        self._event_publisher_process = start_event_publisher(
            _local_node_name, _event_publisher_pull_address,
            _event_publisher_pub_address)
        poll_result = poll_process(self._event_publisher_process)
        self.assertEqual(poll_result, None)

        self._data_writer_process = start_data_writer(
            _cluster_name, _local_node_name, _data_writer_address,
            _event_publisher_pull_address, _repository_path)
        poll_result = poll_process(self._data_writer_process)
        self.assertEqual(poll_result, None)

        self._data_reader_process = start_data_reader(
            _local_node_name, _data_reader_address,
            _event_publisher_pull_address, _repository_path)
        poll_result = poll_process(self._data_reader_process)
        self.assertEqual(poll_result, None)
Пример #7
0
    def setUp(self):
        initialize_logging(_log_path)
        self.tearDown()

        # clear out any old stats
        space_accounting_database = SpaceAccountingDatabase()
        space_accounting_database.clear_collection_stats(_collection_id)
        space_accounting_database.commit()

        self._space_accounting_server_process = \
            start_space_accounting_server(
                _local_node_name,
                _space_accounting_server_address,
                _space_accounting_pipeline_address
            )
        poll_result = poll_process(self._space_accounting_server_process)
        self.assertEqual(poll_result, None)
Пример #8
0
    def test_usage(self):
        """test SpaceUsage"""
        total_bytes_added = 42 * 1024 * 1024 * 1000
        total_bytes_removed = 21 * 1024 * 1024 * 50
        total_bytes_retrieved = 66 * 1024 * 1024 * 25
        request_id = uuid.uuid1().hex

        poll_result = poll_process(self._space_accounting_server_process)
        self.assertEqual(poll_result, None)

        send_to_pipeline(
            _space_accounting_server_address,
            _detail_generator(total_bytes_added, total_bytes_removed,
                              total_bytes_retrieved))

        request = {
            "message-type": "space-usage-request",
            "collection-id": _collection_id,
        }
        reply = send_request_and_get_reply(_space_accounting_server_address,
                                           request)
        self.assertEqual(reply["collection-id"], _collection_id)
        self.assertEqual(reply["message-type"], "space-usage-reply")
        self.assertEqual(reply["result"], "success")
        self.assertEqual(reply["bytes-added"], total_bytes_added, (
            reply["bytes-added"],
            total_bytes_added,
        ))
        self.assertEqual(reply["bytes-removed"], total_bytes_removed, (
            reply["bytes-removed"],
            total_bytes_removed,
        ))
        self.assertEqual(reply["bytes-retrieved"], total_bytes_retrieved, (
            reply["bytes-retrieved"],
            total_bytes_retrieved,
        ))
Пример #9
0
    def setUp(self):
        if not hasattr(self, "_log"):
            self._log = logging.getLogger("TestHandoffServer")

        self.tearDown()
        database_connection = get_central_connection()
        cluster_row = get_cluster_row(database_connection)
        node_rows = get_node_rows(database_connection, cluster_row.id)
        database_connection.close()

        self._key_generator = generate_key()

        self._event_publisher_processes = list()
        self._data_writer_processes = list()
        self._data_reader_processes = list()
        self._handoff_server_processes = list()

        for i in xrange(_node_count):
            node_name = _generate_node_name(i)
            repository_path = _repository_path(node_name)
            os.makedirs(repository_path)

            process = start_event_publisher(node_name,
                                            _event_publisher_pull_addresses[i],
                                            _event_publisher_pub_addresses[i])
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._event_publisher_processes.append(process)
            time.sleep(1.0)

            process = start_data_writer(_cluster_name, node_name,
                                        _data_writer_addresses[i],
                                        _event_publisher_pull_addresses[i],
                                        repository_path)
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._data_writer_processes.append(process)
            time.sleep(1.0)

            process = start_data_reader(node_name, _data_reader_addresses[i],
                                        _event_publisher_pull_addresses[i],
                                        repository_path)
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._data_reader_processes.append(process)
            time.sleep(1.0)

            process = start_handoff_server(
                _cluster_name, node_name, _handoff_server_addresses,
                _handoff_server_pipeline_addresses[i], _data_reader_addresses,
                _data_writer_addresses, _event_publisher_pull_addresses[i],
                _repository_path(node_name))
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._handoff_server_processes.append(process)
            time.sleep(1.0)

        self._context = zmq.context.Context()
        self._pollster = GreenletZeroMQPollster()
        self._deliverator = Deliverator()

        self._pull_server = GreenletPULLServer(self._context, _client_address,
                                               self._deliverator)
        self._pull_server.register(self._pollster)

        backup_nodes = random.sample(node_rows[1:], 2)
        self._log.debug("backup nodes = %s" % ([n.name
                                                for n in backup_nodes], ))

        self._resilient_clients = list()
        for node_row, address in zip(node_rows, _data_writer_addresses):
            if not node_row in backup_nodes:
                continue
            resilient_client = GreenletResilientClient(
                self._context,
                self._pollster,
                node_row.name,
                address,
                _local_node_name,
                _client_address,
                self._deliverator,
            )
            self._resilient_clients.append(resilient_client)
        self._log.debug("%s resilient clients" %
                        (len(self._resilient_clients), ))

        self._data_writer_handoff_client = DataWriterHandoffClient(
            node_rows[0].name, self._resilient_clients)

        self._pollster.start()
Пример #10
0
class TestDataWriter(unittest.TestCase):
    """test message handling in data writer"""
    def setUp(self):
        self.tearDown()
        self._key_generator = generate_key()

        self._event_publisher_process = start_event_publisher(
            _local_node_name, _event_publisher_pull_address,
            _event_publisher_pub_address)
        poll_result = poll_process(self._event_publisher_process)
        self.assertEqual(poll_result, None)

    data_writer_type = os.environ["NIMBUSIO_DATA_WRITER"]
    if data_writer_type == "python":
        self._data_writer_process = start_data_writer(
            _cluster_name,
            _local_node_name,
            _data_writer_address,
            _anti_entropy_address,
            _event_publisher_pull_address,
            _event_publisher_pub_address,
            _repository_path,
            central_db_pw=os.environ["NIMBUSIO_CENTRAL_USER_PASSWORD"],
            node_db_pw=os.environ["NIMBUSIO_NODE_USER_PASSWORD"])
        poll_result = poll_process(self._data_writer_process)
        self.assertEqual(poll_result, None)

    def tearDown(self):
        if hasattr(self, "_data_writer_process") \
        and self._data_writer_process is not None:
            terminate_process(self._data_writer_process)
            self._data_writer_process = None

        if hasattr(self, "_event_publisher_process") \
        and self._event_publisher_process is not None:
            terminate_process(self._event_publisher_process)
            self._event_publisher_process = None

    def test_archive_key_entire(self):
        """test archiving all data for a key in a single message"""
        file_size = 10 * 64 * 1024
        content_item = random_string(file_size)
        user_request_id = uuid.uuid1().hex
        collection_id = 1001
        key = self._key_generator.next()
        archive_priority = create_priority()
        timestamp = create_timestamp()
        segment_num = 2

        file_adler32 = zlib.adler32(content_item)
        file_md5 = hashlib.md5(content_item)

        unified_id_factory = UnifiedIDFactory(1)
        unified_id = unified_id_factory.next()

        message = {
            "message-type": "archive-key-entire",
            "priority": archive_priority,
            "user-request-id": user_request_id,
            "collection-id": collection_id,
            "key": key,
            "unified-id": unified_id,
            "timestamp-repr": repr(timestamp),
            "conjoined-part": 0,
            "segment-num": segment_num,
            "segment-size": file_size,
            "zfec-padding-size": 4,
            "segment-md5-digest": b64encode(file_md5.digest()),
            "segment-adler32": file_adler32,
            "file-size": file_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "source-node-name": _local_node_name,
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name,
                                           _client_address,
                                           message,
                                           data=content_item)
        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["user-request-id"], user_request_id)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def xxxtest_archive_key_entire_with_meta(self):
        """
        test archiving a key in a single message, including meta data
        """
        file_size = 10 * 64 * 1024
        content_item = random_string(file_size)
        user_request_id = uuid.uuid1().hex
        collection_id = 1001
        key = self._key_generator.next()
        archive_priority = create_priority()
        timestamp = create_timestamp()
        segment_num = 2

        meta_key = "".join([nimbus_meta_prefix, "test_key"])
        meta_value = "pork"

        file_adler32 = zlib.adler32(content_item)
        file_md5 = hashlib.md5(content_item)

        unified_id_factory = UnifiedIDFactory(1)
        unified_id = unified_id_factory.next()

        message = {
            "message-type": "archive-key-entire",
            "priority": archive_priority,
            "user-request-id": user_request_id,
            "collection-id": collection_id,
            "key": key,
            "unified-id": unified_id,
            "timestamp-repr": repr(timestamp),
            "conjoined-part": 0,
            "segment-num": segment_num,
            "segment-size": file_size,
            "zfec-padding-size": 4,
            "segment-adler32": file_adler32,
            "segment-md5-digest": b64encode(file_md5.digest()),
            "file-size": file_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "source-node-name": _local_node_name,
            "handoff-node-name": None,
            meta_key: meta_value
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name,
                                           _client_address,
                                           message,
                                           data=content_item)
        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["user-request-id"], user_request_id)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def xxxtest_large_archive(self):
        """
        test archiving a file that needs more than one message.
        For example, a 10 Mb file: each node would get 10 120kb 
        zefec shares.
        """
        slice_size = 1024 * 1024
        slice_count = 10
        total_size = slice_size * slice_count
        test_data = random_string(total_size)

        user_request_id = uuid.uuid1().hex

        collection_id = 1001
        archive_priority = create_priority()
        timestamp = create_timestamp()
        key = self._key_generator.next()
        segment_num = 4
        sequence_num = 0

        file_adler32 = zlib.adler32(test_data)
        file_md5 = hashlib.md5(test_data)

        slice_start = 0
        slice_end = slice_size

        segment_adler32 = zlib.adler32(test_data[slice_start:slice_end])
        segment_md5 = hashlib.md5(test_data[slice_start:slice_end])

        unified_id_factory = UnifiedIDFactory(1)
        unified_id = unified_id_factory.next()

        message = {
            "message-type": "archive-key-start",
            "priority": archive_priority,
            "user-request-id": user_request_id,
            "collection-id": collection_id,
            "key": key,
            "unified-id": unified_id,
            "timestamp-repr": repr(timestamp),
            "conjoined-part": 0,
            "segment-num": segment_num,
            "segment-size": len(test_data[slice_start:slice_end]),
            "zfec-padding-size": 4,
            "segment-md5-digest": b64encode(segment_md5.digest()),
            "segment-adler32": segment_adler32,
            "sequence-num": sequence_num,
            "source-node-name": _local_node_name,
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(
            _local_node_name,
            _data_writer_address,
            _local_node_name,
            _client_address,
            message,
            data=test_data[slice_start:slice_end])
        self.assertEqual(reply["message-type"], "archive-key-start-reply")
        self.assertEqual(reply["user-request-id"], user_request_id)
        self.assertEqual(reply["result"], "success", reply["error-message"])

        for _ in range(slice_count - 2):
            sequence_num += 1
            slice_start += slice_size
            slice_end += slice_size

            segment_adler32 = zlib.adler32(test_data[slice_start:slice_end])
            segment_md5 = hashlib.md5(test_data[slice_start:slice_end])

            message_id = uuid.uuid1().hex
            message = {
                "message-type": "archive-key-next",
                "priority": archive_priority,
                "user-request-id": user_request_id,
                "collection-id": collection_id,
                "key": key,
                "unified-id": unified_id,
                "timestamp-repr": repr(timestamp),
                "conjoined-part": 0,
                "segment-num": segment_num,
                "segment-size": len(test_data[slice_start:slice_end]),
                "zfec-padding-size": 4,
                "segment-md5-digest": b64encode(segment_md5.digest()),
                "segment-adler32": segment_adler32,
                "sequence-num": sequence_num,
                "source-node-name": _local_node_name,
                "handoff-node-name": None,
            }
            reply = send_request_and_get_reply(
                _local_node_name,
                _data_writer_address,
                _local_node_name,
                _client_address,
                message,
                data=test_data[slice_start:slice_end])
            self.assertEqual(reply["message-type"], "archive-key-next-reply")
            self.assertEqual(reply["user-request-id"], user_request_id)
            self.assertEqual(reply["result"], "success",
                             reply["error-message"])

        sequence_num += 1
        slice_start += slice_size
        slice_end += slice_size

        segment_adler32 = zlib.adler32(test_data[slice_start:slice_end])
        segment_md5 = hashlib.md5(test_data[slice_start:slice_end])

        message = {
            "message-type": "archive-key-final",
            "priority": archive_priority,
            "user-request-id": user_request_id,
            "collection-id": collection_id,
            "key": key,
            "unified-id": unified_id,
            "timestamp-repr": repr(timestamp),
            "conjoined-part": 0,
            "segment-num": segment_num,
            "segment-size": len(test_data[slice_start:slice_end]),
            "zfec-padding-size": 4,
            "segment-md5-digest": b64encode(segment_md5.digest()),
            "segment-adler32": segment_adler32,
            "sequence-num": sequence_num,
            "file-size": total_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "source-node-name": _local_node_name,
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(
            _local_node_name,
            _data_writer_address,
            _local_node_name,
            _client_address,
            message,
            data=test_data[slice_start:slice_end])

        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["user-request-id"], user_request_id)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def _destroy(self, collection_id, key, unified_id_to_delete, timestamp,
                 segment_num):
        user_request_id = uuid.uuid1().hex
        archive_priority = create_priority()

        unified_id_factory = UnifiedIDFactory(1)
        unified_id = unified_id_factory.next()

        message = {
            "message-type": "destroy-key",
            "priority": archive_priority,
            "user-request-id": user_request_id,
            "collection-id": collection_id,
            "key": key,
            "unified-id-to-delete": unified_id_to_delete,
            "unified-id": unified_id,
            "timestamp-repr": repr(timestamp),
            "segment-num": segment_num,
            "source-node-name": _local_node_name,
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name, _client_address,
                                           message)
        self.assertEqual(reply["message-type"], "destroy-key-reply")
        self.assertEqual(reply["user-request-id"], user_request_id)

        return reply

    def xxxtest_destroy_nonexistent_key(self):
        """test destroying a key that does not exist, with no complications"""
        unified_id_factory = UnifiedIDFactory(1)
        unified_id = unified_id_factory.next()

        collection_id = 1001
        key = self._key_generator.next()
        segment_num = 4
        timestamp = create_timestamp()
        reply = self._destroy(collection_id, key, unified_id, timestamp,
                              segment_num)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def xxxtest_simple_destroy(self):
        """test destroying a key that exists, with no complicatons"""
        file_size = 10 * 64 * 1024
        content_item = random_string(file_size)
        message_id = uuid.uuid1().hex
        collection_id = 1001
        key = self._key_generator.next()
        archive_priority = create_priority()
        archive_timestamp = create_timestamp()
        destroy_timestamp = archive_timestamp + timedelta(seconds=1)
        segment_num = 2

        file_adler32 = zlib.adler32(content_item)
        file_md5 = hashlib.md5(content_item)

        message = {
            "message-type": "archive-key-entire",
            "message-id": message_id,
            "priority": archive_priority,
            "collection-id": collection_id,
            "key": key,
            "timestamp-repr": repr(archive_timestamp),
            "segment-num": segment_num,
            "segment-size": file_size,
            "segment-adler32": file_adler32,
            "segment-md5-digest": b64encode(file_md5.digest()),
            "file-size": file_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name,
                                           _client_address,
                                           message,
                                           data=content_item)
        self.assertEqual(reply["message-id"], message_id)
        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["result"], "success")

        reply = self._destroy(collection_id, key, destroy_timestamp,
                              segment_num)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def xxxtest_destroy_tombstone(self):
        """test destroying a key that has already been destroyed"""
        file_size = 10 * 64 * 1024
        content_item = random_string(file_size)
        message_id = uuid.uuid1().hex
        collection_id = 1001
        key = self._key_generator.next()
        archive_priority = create_priority()
        archive_timestamp = create_timestamp()
        destroy_1_timestamp = archive_timestamp + timedelta(seconds=1)
        destroy_2_timestamp = destroy_1_timestamp + timedelta(seconds=1)
        segment_num = 2

        file_adler32 = zlib.adler32(content_item)
        file_md5 = hashlib.md5(content_item)

        message = {
            "message-type": "archive-key-entire",
            "message-id": message_id,
            "priority": archive_priority,
            "collection-id": collection_id,
            "key": key,
            "timestamp-repr": repr(archive_timestamp),
            "segment-num": segment_num,
            "segment-size": file_size,
            "segment-adler32": file_adler32,
            "segment-md5-digest": b64encode(file_md5.digest()),
            "file-size": file_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name,
                                           _client_address,
                                           message,
                                           data=content_item)
        self.assertEqual(reply["message-id"], message_id)
        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["result"], "success")

        reply = self._destroy(collection_id, key, destroy_1_timestamp,
                              segment_num)
        self.assertEqual(reply["result"], "success", reply["error-message"])

        reply = self._destroy(collection_id, key, destroy_2_timestamp,
                              segment_num)
        self.assertEqual(reply["result"], "success", reply["error-message"])

    def xxxtest_old_destroy(self):
        """
        test destroying a key that exists, but is newer than the destroy
        message
        """
        file_size = 10 * 64 * 1024
        content_item = random_string(file_size)
        message_id = uuid.uuid1().hex
        collection_id = 1001
        key = self._key_generator.next()
        archive_priority = create_priority()
        archive_timestamp = create_timestamp()
        destroy_timestamp = archive_timestamp - timedelta(seconds=1)
        segment_num = 2

        file_adler32 = zlib.adler32(content_item)
        file_md5 = hashlib.md5(content_item)

        message = {
            "message-type": "archive-key-entire",
            "message-id": message_id,
            "priority": archive_priority,
            "collection-id": collection_id,
            "key": key,
            "timestamp-repr": repr(archive_timestamp),
            "segment-num": segment_num,
            "segment-size": file_size,
            "segment-adler32": file_adler32,
            "segment-md5-digest": b64encode(file_md5.digest()),
            "file-size": file_size,
            "file-adler32": file_adler32,
            "file-hash": b64encode(file_md5.digest()),
            "handoff-node-name": None,
        }
        reply = send_request_and_get_reply(_local_node_name,
                                           _data_writer_address,
                                           _local_node_name,
                                           _client_address,
                                           message,
                                           data=content_item)
        self.assertEqual(reply["message-id"], message_id)
        self.assertEqual(reply["message-type"], "archive-key-final-reply")
        self.assertEqual(reply["result"], "success")

        reply = self._destroy(collection_id, key, destroy_timestamp,
                              segment_num)
        self.assertEqual(reply["result"], "success", reply["error-message"])
    def setUp(self):
        if not hasattr(self, "_log"):
            self._log = logging.getLogger("TestHandoffServer")

        self.tearDown()
        database_connection = get_central_connection()
        cluster_row = get_cluster_row(database_connection)
        node_rows = get_node_rows(database_connection, cluster_row.id)
        database_connection.close()

        self._key_generator = generate_key()

        self._event_publisher_processes = list()
        self._data_writer_processes = list()
        self._data_reader_processes = list()
        self._handoff_server_processes = list()

        for i in xrange(_node_count):
            node_name = _generate_node_name(i)
            repository_path = _repository_path(node_name)
            os.makedirs(repository_path)
            
            process = start_event_publisher(
                node_name, 
                _event_publisher_pull_addresses[i],
                _event_publisher_pub_addresses[i]
            )
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._event_publisher_processes.append(process)
            time.sleep(1.0)

            process = start_data_writer(
                _cluster_name,
                node_name, 
                _data_writer_addresses[i],
                _event_publisher_pull_addresses[i],
                repository_path
            )
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._data_writer_processes.append(process)
            time.sleep(1.0)

            process = start_data_reader(
                node_name, 
                _data_reader_addresses[i],
                _event_publisher_pull_addresses[i], 
                repository_path
            )
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._data_reader_processes.append(process)
            time.sleep(1.0)

            process = start_handoff_server(
                _cluster_name,
                node_name, 
                _handoff_server_addresses,
                _handoff_server_pipeline_addresses[i],
                _data_reader_addresses,
                _data_writer_addresses,
                _event_publisher_pull_addresses[i], 
                _repository_path(node_name)
            )
            poll_result = poll_process(process)
            self.assertEqual(poll_result, None)
            self._handoff_server_processes.append(process)
            time.sleep(1.0)

        self._context = zmq.context.Context()
        self._pollster = GreenletZeroMQPollster()
        self._deliverator = Deliverator()

        self._pull_server = GreenletPULLServer(
            self._context, 
            _client_address,
            self._deliverator
        )
        self._pull_server.register(self._pollster)

        backup_nodes = random.sample(node_rows[1:], 2)
        self._log.debug("backup nodes = %s" % (
            [n.name for n in backup_nodes], 
        ))

        self._resilient_clients = list()        
        for node_row, address in zip(node_rows, _data_writer_addresses):
            if not node_row in backup_nodes:
                continue
            resilient_client = GreenletResilientClient(
                self._context,
                self._pollster,
                node_row.name,
                address,
                _local_node_name,
                _client_address,
                self._deliverator,
            )
            self._resilient_clients.append(resilient_client)
        self._log.debug("%s resilient clients" % (
            len(self._resilient_clients), 
        ))

        self._data_writer_handoff_client = DataWriterHandoffClient(
            node_rows[0].name,
            self._resilient_clients
        )

        self._pollster.start()