def test_05_getCellList(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unsable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = StorageNode(Mock(), server2, uuid2) pt.setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = StorageNode(Mock(), server3, uuid3) pt.setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = StorageNode(Mock(), server4, uuid4) pt.setCell(0, sn4, CellStates.DISCARDED) # won't be added # now checks result self.assertEqual(len(pt.partition_list[0]), 3) for x in xrange(num_partitions): if x == 0: # all nodes all_cell = pt.getCellList(0) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 3) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) # readable nodes all_cell = pt.getCellList(0, readable=True) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 2) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 not in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) else: self.assertEqual(len(pt.getCellList(1, False)), 0) self.assertEqual(len(pt.getCellList(1, True)), 0)
def test_05_getCellList(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unusable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = self.createStorage(server2, uuid2) pt._setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = self.createStorage(server3, uuid3) pt._setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = self.createStorage(server4, uuid4) pt._setCell(0, sn4, CellStates.DISCARDED) # won't be added # now checks result self.assertEqual(len(pt.partition_list[0]), 3) for x in xrange(num_partitions): if x == 0: # all nodes all_cell = pt.getCellList(0) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 3) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) # readable nodes all_cell = pt.getCellList(0, readable=True) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 2) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 not in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) else: self.assertEqual(len(pt.getCellList(1, False)), 0) self.assertEqual(len(pt.getCellList(1, True)), 0)
class Application(BaseApplication): """The storage node application.""" def __init__(self, config): super(Application, self).__init__( config.getSSL(), config.getDynamicMasterList()) for address in config.getMasters(): self.nm.createMaster(address=address) self.name = config.getCluster() self.server = config.getBind() logging.debug('IP address is %s, port is %d', *self.server) # The partition table is initialized after getting the number of # partitions. self.pt = None self.uuid = config.getUUID() self.request_handler = MasterRequestEventHandler(self) self.master_event_handler = MasterEventHandler(self) self.cluster_state = None self.reset() registerLiveDebugger(on_log=self.log) def close(self): self.listening_conn = None super(Application, self).close() def reset(self): self.bootstrapped = False self.master_conn = None self.master_node = None def log(self): self.em.log() self.nm.log() if self.pt is not None: self.pt.log() def run(self): try: self._run() except Exception: logging.exception('Pre-mortem data:') self.log() logging.flush() raise def _run(self): """Make sure that the status is sane and start a loop.""" if len(self.name) == 0: raise RuntimeError, 'cluster name must be non-empty' # Make a listening port. handler = AdminEventHandler(self) self.listening_conn = ListeningConnection(self, handler, self.server) while self.cluster_state != ClusterStates.STOPPING: self.connectToPrimary() try: while True: self.em.poll(1) except PrimaryFailure: logging.error('primary master is down') self.listening_conn.close() while not self.em.isIdle(): self.em.poll(1) def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, NodeTypes.ADMIN, self.server) self.master_node, self.master_conn, num_partitions, num_replicas = \ bootstrap.getPrimaryConnection() if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskPartitionTable()) def sendPartitionTable(self, conn, min_offset, max_offset, uuid): # we have a pt self.pt.log() row_list = [] if max_offset == 0: max_offset = self.pt.getPartitions() try: for offset in xrange(min_offset, max_offset): row = [] try: for cell in self.pt.getCellList(offset): if uuid is None or cell.getUUID() == uuid: row.append((cell.getUUID(), cell.getState())) except TypeError: pass row_list.append((offset, row)) except IndexError: conn.send(Errors.ProtocolError('invalid partition table offset')) else: conn.answer(Packets.AnswerPartitionList(self.pt.getID(), row_list))
class Application(BaseApplication): """The storage node application.""" def __init__(self, config): super(Application, self).__init__( config.getSSL(), config.getDynamicMasterList()) for address in config.getMasters(): self.nm.createMaster(address=address) self.name = config.getCluster() self.server = config.getBind() logging.debug('IP address is %s, port is %d', *self.server) # The partition table is initialized after getting the number of # partitions. self.pt = None self.uuid = config.getUUID() self.request_handler = MasterRequestEventHandler(self) self.master_event_handler = MasterEventHandler(self) self.cluster_state = None self.reset() registerLiveDebugger(on_log=self.log) def close(self): self.listening_conn = None super(Application, self).close() def reset(self): self.bootstrapped = False self.master_conn = None self.master_node = None def log(self): self.em.log() self.nm.log() if self.pt is not None: self.pt.log() def run(self): try: self._run() except Exception: logging.exception('Pre-mortem data:') self.log() logging.flush() raise def _run(self): """Make sure that the status is sane and start a loop.""" if len(self.name) == 0: raise RuntimeError, 'cluster name must be non-empty' # Make a listening port. handler = AdminEventHandler(self) self.listening_conn = ListeningConnection(self, handler, self.server) while self.cluster_state != ClusterStates.STOPPING: self.connectToPrimary() try: while True: self.em.poll(1) except PrimaryFailure: logging.error('primary master is down') self.listening_conn.close() while not self.em.isIdle(): self.em.poll(1) def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, self.name, NodeTypes.ADMIN, self.uuid, self.server) data = bootstrap.getPrimaryConnection() (node, conn, uuid, num_partitions, num_replicas) = data self.master_node = node self.master_conn = conn self.uuid = uuid if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskNodeInformation()) self.master_conn.ask(Packets.AskPartitionTable()) def sendPartitionTable(self, conn, min_offset, max_offset, uuid): # we have a pt self.pt.log() row_list = [] if max_offset == 0: max_offset = self.pt.getPartitions() try: for offset in xrange(min_offset, max_offset): row = [] try: for cell in self.pt.getCellList(offset): if uuid is None or cell.getUUID() == uuid: row.append((cell.getUUID(), cell.getState())) except TypeError: pass row_list.append((offset, row)) except IndexError: conn.notify(Errors.ProtocolError('invalid partition table offset')) else: conn.answer(Packets.AnswerPartitionList(self.pt.getID(), row_list))