def test_06_clear(self): # add some nodes num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unusable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = self.createStorage(server2, uuid2) pt._setCell(1, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = self.createStorage(server3, uuid3) pt._setCell(2, sn3, CellStates.FEEDING) # now checks result self.assertEqual(len(pt.partition_list[0]), 1) self.assertEqual(len(pt.partition_list[1]), 1) self.assertEqual(len(pt.partition_list[2]), 1) pt.clear() partition_list = pt.partition_list self.assertEqual(len(partition_list), num_partitions) for x in xrange(num_partitions): part = partition_list[x] self.assertTrue(isinstance(part, list)) self.assertEqual(len(part), 0) self.assertEqual(len(pt.count_dict), 0)
def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, NodeTypes.ADMIN, self.server) self.master_node, self.master_conn, num_partitions, num_replicas = \ bootstrap.getPrimaryConnection() if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskPartitionTable())
def test_06_clear(self): # add some nodes num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unsable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = StorageNode(Mock(), server2, uuid2) pt.setCell(1, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = StorageNode(Mock(), server3, uuid3) pt.setCell(2, sn3, CellStates.FEEDING) # now checks result self.assertEqual(len(pt.partition_list[0]), 1) self.assertEqual(len(pt.partition_list[1]), 1) self.assertEqual(len(pt.partition_list[2]), 1) pt.clear() partition_list = pt.partition_list self.assertEqual(len(partition_list), num_partitions) for x in xrange(num_partitions): part = partition_list[x] self.assertTrue(isinstance(part, list)) self.assertEqual(len(part), 0) self.assertEqual(len(pt.count_dict), 0)
def test_08_filled(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) self.assertEqual(pt.np, num_partitions) self.assertEqual(pt.num_filled_rows, 0) self.assertFalse(pt.filled()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): pt.setCell(x, sn1, CellStates.UP_TO_DATE) self.assertEqual(pt.num_filled_rows, num_partitions) self.assertTrue(pt.filled())
def test_09_answerPartitionTable(self): # send a table conn = self.getClientConnection() self.app.pt = PartitionTable(3, 2) node_1 = self.getStorageUUID() node_2 = self.getStorageUUID() node_3 = self.getStorageUUID() self.app.uuid = node_1 # SN already know all nodes self.app.nm.createStorage(uuid=node_1) self.app.nm.createStorage(uuid=node_2) self.app.nm.createStorage(uuid=node_3) self.assertFalse(list(self.app.dm.getPartitionTable())) row_list = [(0, ((node_1, CellStates.UP_TO_DATE), (node_2, CellStates.UP_TO_DATE))), (1, ((node_3, CellStates.UP_TO_DATE), (node_1, CellStates.UP_TO_DATE))), (2, ((node_2, CellStates.UP_TO_DATE), (node_3, CellStates.UP_TO_DATE)))] self.assertFalse(self.app.pt.filled()) # send a complete new table and ack self.verification.sendPartitionTable(conn, 2, row_list) self.assertTrue(self.app.pt.filled()) self.assertEqual(self.app.pt.getID(), 2) self.assertTrue(list(self.app.dm.getPartitionTable()))
def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, self.name, NodeTypes.ADMIN, self.uuid, self.server) data = bootstrap.getPrimaryConnection() (node, conn, uuid, num_partitions, num_replicas) = data self.master_node = node self.master_conn = conn self.uuid = uuid if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskNodeInformation()) self.master_conn.ask(Packets.AskPartitionTable())
def loadPartitionTable(self): """Load a partition table from the database.""" ptid = self.dm.getPTID() if ptid is None: self.pt = PartitionTable(0, 0) return row_list = [] for offset, uuid, state in self.dm.getPartitionTable(): while len(row_list) <= offset: row_list.append([]) # register unknown nodes if self.nm.getByUUID(uuid) is None: self.nm.createStorage(uuid=uuid) row_list[offset].append((uuid, CellStates[state])) self.pt = object.__new__(PartitionTable) self.pt.load(ptid, self.dm.getNumReplicas(), row_list, self.nm)
def test_14_notifyPartitionChanges2(self): # cases : uuid1, uuid2, uuid3 = [self.getStorageUUID() for i in range(3)] cells = ( (0, uuid1, CellStates.UP_TO_DATE), (1, uuid2, CellStates.DISCARDED), (2, uuid3, CellStates.OUT_OF_DATE), ) # context conn = self.getMasterConnection() app = self.app # register nodes app.nm.createStorage(uuid=uuid1) app.nm.createStorage(uuid=uuid2) app.nm.createStorage(uuid=uuid3) ptid1, ptid2 = (1, 2) self.assertNotEqual(ptid1, ptid2) app.pt = PartitionTable(3, 1) app.dm = Mock({}) app.replicator = Mock({}) self.operation.notifyPartitionChanges(conn, ptid2, cells) # ptid set self.assertEqual(app.pt.getID(), ptid2) # dm call calls = self.app.dm.mockGetNamedCalls('changePartitionTable') self.assertEqual(len(calls), 1) calls[0].checkArgs(ptid2, cells)
def setUp(self): NeoUnitTestBase.setUp(self) config = self.getStorageConfiguration(master_number=1) self.app = Application(config) self.app.name = 'NEO' self.app.ready = True self.app.pt = PartitionTable(4, 1) self.identification = IdentificationHandler(self.app)
def test_04_removeCell(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a cell to an empty row self.assertFalse(pt.count_dict.has_key(sn1)) pt._setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) else: self.assertEqual(len(pt.partition_list[x]), 0) # remove it pt.removeCell(0, sn1) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a feeding cell pt._setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) else: self.assertEqual(len(pt.partition_list[x]), 0) # remove it pt.removeCell(0, sn1) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0)
def _acceptIdentification(self, node, uuid, num_partitions, num_replicas, your_uuid, primary, known_master_list): app = self.app # Register new master nodes. found = False conn_address = node.getAddress() for node_address, node_uuid in known_master_list: if node_address == conn_address: assert uuid == node_uuid, (dump(uuid), dump(node_uuid)) found = True n = app.nm.getByAddress(node_address) if n is None: n = app.nm.createMaster(address=node_address) if node_uuid is not None and n.getUUID() != node_uuid: n.setUUID(node_uuid) assert found, (node, dump(uuid), known_master_list) conn = node.getConnection() if primary is not None: primary_node = app.nm.getByAddress(primary) if primary_node is None: # I don't know such a node. Probably this information # is old. So ignore it. logging.warning('Unknown primary master: %s. Ignoring.', primary) return else: if app.trying_master_node is not primary_node: app.trying_master_node = None conn.close() app.primary_master_node = primary_node else: if app.primary_master_node is not None: # The primary master node is not a primary master node # any longer. app.primary_master_node = None app.trying_master_node = None conn.close() return # the master must give an UUID if your_uuid is None: raise ProtocolError('No UUID supplied') app.uuid = your_uuid logging.info('Got an UUID: %s', dump(app.uuid)) # Always create partition table app.pt = PartitionTable(num_partitions, num_replicas)
def setUp(self): NeoUnitTestBase.setUp(self) self.prepareDatabase(number=1) # create an application object config = self.getStorageConfiguration(master_number=1) self.app = Application(config) self.verification = InitializationHandler(self.app) # define some variable to simulate client and storage node self.master_port = 10010 self.storage_port = 10020 self.client_port = 11011 self.num_partitions = 1009 self.num_replicas = 2 self.app.operational = False self.app.load_lock_dict = {} self.app.pt = PartitionTable(self.num_partitions, self.num_replicas)
def test_09_hasOffset(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unusable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) # now test self.assertTrue(pt.hasOffset(0)) self.assertFalse(pt.hasOffset(1)) # unknown partition self.assertFalse(pt.hasOffset(50))
def test_04_removeCell(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a cell to an empty row self.assertFalse(pt.count_dict.has_key(sn1)) pt.setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) else: self.assertEqual(len(pt.partition_list[x]), 0) # remove it pt.removeCell(0, sn1) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a feeding cell pt.setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) else: self.assertEqual(len(pt.partition_list[x]), 0) # remove it pt.removeCell(0, sn1) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0)
def test_09_hasOffset(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unsable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) # now test self.assertTrue(pt.hasOffset(0)) self.assertFalse(pt.hasOffset(1)) # unknonw partition self.assertFalse(pt.hasOffset(50))
def test_08_filled(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) self.assertEqual(pt.np, num_partitions) self.assertEqual(pt.num_filled_rows, 0) self.assertFalse(pt.filled()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) for x in xrange(num_partitions): pt._setCell(x, sn1, CellStates.UP_TO_DATE) self.assertEqual(pt.num_filled_rows, num_partitions) self.assertTrue(pt.filled())
def test_01_loadPartitionTable(self): self.app.dm = Mock({ 'getPartitionTable': [], }) self.assertEqual(self.app.pt, None) num_partitions = 3 num_replicas = 2 self.app.pt = PartitionTable(num_partitions, num_replicas) self.assertFalse(self.app.pt.getNodeSet()) self.assertFalse(self.app.pt.filled()) for x in xrange(num_partitions): self.assertFalse(self.app.pt.hasOffset(x)) # load an empty table self.app.loadPartitionTable() self.assertFalse(self.app.pt.getNodeSet()) self.assertFalse(self.app.pt.filled()) for x in xrange(num_partitions): self.assertFalse(self.app.pt.hasOffset(x)) # add some node, will be remove when loading table master_uuid = self.getMasterUUID() master = self.app.nm.createMaster(uuid=master_uuid) storage_uuid = self.getStorageUUID() storage = self.app.nm.createStorage(uuid=storage_uuid) client_uuid = self.getClientUUID() self.app.pt._setCell(0, master, CellStates.UP_TO_DATE) self.app.pt._setCell(0, storage, CellStates.UP_TO_DATE) self.assertEqual(len(self.app.pt.getNodeSet()), 2) self.assertFalse(self.app.pt.filled()) for x in xrange(num_partitions): if x == 0: self.assertTrue(self.app.pt.hasOffset(x)) else: self.assertFalse(self.app.pt.hasOffset(x)) # load an empty table, everything removed self.app.loadPartitionTable() self.assertFalse(self.app.pt.getNodeSet()) self.assertFalse(self.app.pt.filled()) for x in xrange(num_partitions): self.assertFalse(self.app.pt.hasOffset(x)) # add some node self.app.pt._setCell(0, master, CellStates.UP_TO_DATE) self.app.pt._setCell(0, storage, CellStates.UP_TO_DATE) self.assertEqual(len(self.app.pt.getNodeSet()), 2) self.assertFalse(self.app.pt.filled()) for x in xrange(num_partitions): if x == 0: self.assertTrue(self.app.pt.hasOffset(x)) else: self.assertFalse(self.app.pt.hasOffset(x)) # fill partition table self.app.dm = Mock({ 'getPartitionTable': [ (0, client_uuid, CellStates.UP_TO_DATE), (1, client_uuid, CellStates.UP_TO_DATE), (1, storage_uuid, CellStates.UP_TO_DATE), (2, storage_uuid, CellStates.UP_TO_DATE), (2, master_uuid, CellStates.UP_TO_DATE), ], 'getPTID': 1, }) self.app.pt.clear() self.app.loadPartitionTable() self.assertTrue(self.app.pt.filled()) for x in xrange(num_partitions): self.assertTrue(self.app.pt.hasOffset(x)) # check each row cell_list = self.app.pt.getCellList(0) self.assertEqual(len(cell_list), 1) self.assertEqual(cell_list[0].getUUID(), client_uuid) cell_list = self.app.pt.getCellList(1) self.assertEqual(len(cell_list), 2) self.assertTrue(cell_list[0].getUUID() in (client_uuid, storage_uuid)) self.assertTrue(cell_list[1].getUUID() in (client_uuid, storage_uuid)) cell_list = self.app.pt.getCellList(2) self.assertEqual(len(cell_list), 2) self.assertTrue(cell_list[0].getUUID() in (master_uuid, storage_uuid)) self.assertTrue(cell_list[1].getUUID() in (master_uuid, storage_uuid))
class Application(BaseApplication): """The storage node application.""" checker = replicator = tm = None @classmethod def _buildOptionParser(cls): parser = cls.option_parser parser.description = "NEO Storage node" cls.addCommonServerOptions('storage', '127.0.0.1') _ = parser.group('storage') _('a', 'adapter', choices=sorted(DATABASE_MANAGER_DICT), help="database adapter to use") _('d', 'database', required=True, help="database connections string") _.float('w', 'wait', help="seconds to wait for backend to be available," " before erroring-out (-1 = infinite)") _.bool('disable-drop-partitions', help="do not delete data of discarded cells, which is useful for" " big databases because the current implementation is" " inefficient (this option should disappear in the future)") _.bool('new-nid', help="request a new NID from a cluster that is already" " operational, update the database with the new NID and exit," " which makes easier to quickly set up a replica by copying" " the database of another node while it was stopped") _ = parser.group('database creation') _.int('i', 'nid', help="specify an NID to use for this process. Previously" " assigned NID takes precedence (i.e. you should" " always use reset with this switch)") _('e', 'engine', help="database engine (MySQL only)") _.bool('dedup', help="enable deduplication of data" " when setting up a new storage node") # TODO: Forbid using "reset" along with any unneeded argument. # "reset" is too dangerous to let user a chance of accidentally # letting it slip through in a long option list. # It should even be forbidden in configuration files. _.bool('reset', help="remove an existing database if any, and exit") parser.set_defaults(**option_defaults) def __init__(self, config): super(Application, self).__init__( config.get('ssl'), config.get('dynamic_master_list')) # set the cluster name self.name = config['cluster'] self.dm = buildDatabaseManager(config['adapter'], (config['database'], config.get('engine'), config['wait']), ) self.disable_drop_partitions = config.get('disable_drop_partitions', False) # load master nodes for master_address in config['masters']: self.nm.createMaster(address=master_address) # set the bind address self.server = config['bind'] logging.debug('IP address is %s, port is %d', *self.server) # The partition table is initialized after getting the number of # partitions. self.pt = None self.listening_conn = None self.master_conn = None self.master_node = None # operation related data self.operational = False self.dm.setup(reset=config.get('reset', False), dedup=config.get('dedup', False)) self.loadConfiguration() self.devpath = self.dm.getTopologyPath() if config.get('new_nid'): self.new_nid = [x[0] for x in self.dm.iterAssignedCells()] if not self.new_nid: sys.exit('database is empty') self.uuid = None else: self.new_nid = () if 'nid' in config: # for testing purpose only self.uuid = config['nid'] logging.node(self.name, self.uuid) registerLiveDebugger(on_log=self.log) def close(self): self.listening_conn = None self.dm.close() super(Application, self).close() def _poll(self): self.em.poll(1) def log(self): self.em.log() self.nm.log() if self.tm: self.tm.log() if self.pt is not None: self.pt.log() def loadConfiguration(self): """Load persistent configuration data from the database. If data is not present, generate it.""" dm = self.dm # check cluster name name = dm.getName() if name is None: dm.setName(self.name) elif name != self.name: raise RuntimeError('name %r does not match with the database: %r' % (self.name, name)) # load configuration self.uuid = dm.getUUID() logging.node(self.name, self.uuid) logging.info('Configuration loaded:') logging.info('PTID : %s', dump(dm.getPTID())) logging.info('Name : %s', self.name) def loadPartitionTable(self): """Load a partition table from the database.""" ptid = self.dm.getPTID() if ptid is None: self.pt = PartitionTable(0, 0) return row_list = [] for offset, uuid, state in self.dm.getPartitionTable(): while len(row_list) <= offset: row_list.append([]) # register unknown nodes if self.nm.getByUUID(uuid) is None: self.nm.createStorage(uuid=uuid) row_list[offset].append((uuid, CellStates[state])) self.pt = object.__new__(PartitionTable) self.pt.load(ptid, self.dm.getNumReplicas(), row_list, self.nm) def run(self): try: self._run() except Exception: logging.exception('Pre-mortem data:') self.log() logging.flush() raise def _run(self): """Make sure that the status is sane and start a loop.""" if len(self.name) == 0: raise RuntimeError, 'cluster name must be non-empty' # Make a listening port handler = identification.IdentificationHandler(self) self.listening_conn = ListeningConnection(self, handler, self.server) self.server = self.listening_conn.getAddress() # Connect to a primary master node, verify data, and # start the operation. This cycle will be executed permanently, # until the user explicitly requests a shutdown. self.operational = False while True: self.cluster_state = None if self.master_node is None: # look for the primary master self.connectToPrimary() self.checker = Checker(self) self.replicator = Replicator(self) self.tm = TransactionManager(self) try: self.initialize() self.doOperation() raise RuntimeError, 'should not reach here' except StoppedOperation, msg: logging.error('operation stopped: %s', msg) except PrimaryFailure, msg: logging.error('primary master is down: %s', msg) finally:
def test_10_operational(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): pt.setCell(x, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.filled()) # it's up to date and running, so operational sn1.setState(NodeStates.RUNNING) self.assertTrue(pt.operational()) # same with feeding state pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): pt.setCell(x, sn1, CellStates.FEEDING) self.assertTrue(pt.filled()) # it's feeding and running, so operational sn1.setState(NodeStates.RUNNING) self.assertTrue(pt.operational()) # same with feeding state but non running node pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) sn1.setState(NodeStates.TEMPORARILY_DOWN) for x in xrange(num_partitions): pt.setCell(x, sn1, CellStates.FEEDING) self.assertTrue(pt.filled()) # it's up to date and not running, so not operational self.assertFalse(pt.operational()) # same with out of date state and running pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): pt.setCell(x, sn1, CellStates.OUT_OF_DATE) self.assertTrue(pt.filled()) # it's not up to date and running, so not operational self.assertFalse(pt.operational())
def test_03_setCell(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a cell to an empty row self.assertFalse(pt.count_dict.has_key(sn1)) pt._setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.UP_TO_DATE) else: self.assertEqual(len(pt.partition_list[x]), 0) # try to add to a nonexistent partition self.assertRaises(IndexError, pt._setCell, 10, sn1, CellStates.UP_TO_DATE) # if we add in discards state, must be removed pt._setCell(0, sn1, CellStates.DISCARDED) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0) # add a feeding node into empty row pt._setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.FEEDING) else: self.assertEqual(len(pt.partition_list[x]), 0) # re-add it as feeding, nothing change pt._setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.FEEDING) else: self.assertEqual(len(pt.partition_list[x]), 0) # now add it as up to date pt._setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.UP_TO_DATE) else: self.assertEqual(len(pt.partition_list[x]), 0) # now add down state, must not be taken into account pt._setCell(0, sn1, CellStates.DISCARDED) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0) sn1.setState(NodeStates.UNKNOWN) self.assertRaises(PartitionTableException, pt._setCell, 0, sn1, CellStates.UP_TO_DATE) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0)
def test_12_getRow(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add nodes uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) pt._setCell(1, sn1, CellStates.UP_TO_DATE) pt._setCell(2, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = self.createStorage(server2, uuid2) pt._setCell(0, sn2, CellStates.UP_TO_DATE) pt._setCell(1, sn2, CellStates.UP_TO_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = self.createStorage(server3, uuid3) pt._setCell(0, sn3, CellStates.UP_TO_DATE) # test row_0 = pt.getRow(0) self.assertEqual(len(row_0), 3) for uuid, state in row_0: self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID(), sn3.getUUID())) self.assertEqual(state, CellStates.UP_TO_DATE) row_1 = pt.getRow(1) self.assertEqual(len(row_1), 2) for uuid, state in row_1: self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID())) self.assertEqual(state, CellStates.UP_TO_DATE) row_2 = pt.getRow(2) self.assertEqual(len(row_2), 1) for uuid, state in row_2: self.assertEqual(uuid, sn1.getUUID()) self.assertEqual(state, CellStates.UP_TO_DATE) row_3 = pt.getRow(3) self.assertEqual(len(row_3), 0) row_4 = pt.getRow(4) self.assertEqual(len(row_4), 0) # unknown row self.assertRaises(IndexError, pt.getRow, 5)
def test_10_operational(self): def createStorage(): uuid = self.getStorageUUID() return self.createStorage(("127.0.0.1", uuid), uuid) num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition sn1 = createStorage() for x in xrange(num_partitions): pt._setCell(x, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.filled()) # it's up to date and running, so operational sn1.setState(NodeStates.RUNNING) self.assertTrue(pt.operational()) # same with feeding state pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition sn1 = createStorage() for x in xrange(num_partitions): pt._setCell(x, sn1, CellStates.FEEDING) self.assertTrue(pt.filled()) # it's feeding and running, so operational sn1.setState(NodeStates.RUNNING) self.assertTrue(pt.operational()) # same with feeding state but non running node pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition sn1 = createStorage() sn1.setState(NodeStates.DOWN) for x in xrange(num_partitions): pt._setCell(x, sn1, CellStates.FEEDING) self.assertTrue(pt.filled()) # it's up to date and not running, so not operational self.assertFalse(pt.operational()) # same with out of date state and running pt.clear() self.assertFalse(pt.filled()) self.assertFalse(pt.operational()) # adding a node in all partition sn1 = createStorage() for x in xrange(num_partitions): pt._setCell(x, sn1, CellStates.OUT_OF_DATE) self.assertTrue(pt.filled()) # it's not up to date and running, so not operational self.assertFalse(pt.operational())
def test_12_getRow(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add nodes uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) pt.setCell(1, sn1, CellStates.UP_TO_DATE) pt.setCell(2, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = StorageNode(Mock(), server2, uuid2) pt.setCell(0, sn2, CellStates.UP_TO_DATE) pt.setCell(1, sn2, CellStates.UP_TO_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = StorageNode(Mock(), server3, uuid3) pt.setCell(0, sn3, CellStates.UP_TO_DATE) # test row_0 = pt.getRow(0) self.assertEqual(len(row_0), 3) for uuid, state in row_0: self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID(), sn3.getUUID())) self.assertEqual(state, CellStates.UP_TO_DATE) row_1 = pt.getRow(1) self.assertEqual(len(row_1), 2) for uuid, state in row_1: self.assertTrue(uuid in (sn1.getUUID(), sn2.getUUID())) self.assertEqual(state, CellStates.UP_TO_DATE) row_2 = pt.getRow(2) self.assertEqual(len(row_2), 1) for uuid, state in row_2: self.assertEqual(uuid, sn1.getUUID()) self.assertEqual(state, CellStates.UP_TO_DATE) row_3 = pt.getRow(3) self.assertEqual(len(row_3), 0) row_4 = pt.getRow(4) self.assertEqual(len(row_4), 0) # unknwon row self.assertRaises(IndexError, pt.getRow, 5)
def test_05_getCellList(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unsable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = StorageNode(Mock(), server2, uuid2) pt.setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = StorageNode(Mock(), server3, uuid3) pt.setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = StorageNode(Mock(), server4, uuid4) pt.setCell(0, sn4, CellStates.DISCARDED) # won't be added # now checks result self.assertEqual(len(pt.partition_list[0]), 3) for x in xrange(num_partitions): if x == 0: # all nodes all_cell = pt.getCellList(0) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 3) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) # readable nodes all_cell = pt.getCellList(0, readable=True) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 2) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 not in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) else: self.assertEqual(len(pt.getCellList(1, False)), 0) self.assertEqual(len(pt.getCellList(1, True)), 0)
class Application(BaseApplication): """The storage node application.""" def __init__(self, config): super(Application, self).__init__( config.getSSL(), config.getDynamicMasterList()) for address in config.getMasters(): self.nm.createMaster(address=address) self.name = config.getCluster() self.server = config.getBind() logging.debug('IP address is %s, port is %d', *self.server) # The partition table is initialized after getting the number of # partitions. self.pt = None self.uuid = config.getUUID() self.request_handler = MasterRequestEventHandler(self) self.master_event_handler = MasterEventHandler(self) self.cluster_state = None self.reset() registerLiveDebugger(on_log=self.log) def close(self): self.listening_conn = None super(Application, self).close() def reset(self): self.bootstrapped = False self.master_conn = None self.master_node = None def log(self): self.em.log() self.nm.log() if self.pt is not None: self.pt.log() def run(self): try: self._run() except Exception: logging.exception('Pre-mortem data:') self.log() logging.flush() raise def _run(self): """Make sure that the status is sane and start a loop.""" if len(self.name) == 0: raise RuntimeError, 'cluster name must be non-empty' # Make a listening port. handler = AdminEventHandler(self) self.listening_conn = ListeningConnection(self, handler, self.server) while self.cluster_state != ClusterStates.STOPPING: self.connectToPrimary() try: while True: self.em.poll(1) except PrimaryFailure: logging.error('primary master is down') self.listening_conn.close() while not self.em.isIdle(): self.em.poll(1) def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, NodeTypes.ADMIN, self.server) self.master_node, self.master_conn, num_partitions, num_replicas = \ bootstrap.getPrimaryConnection() if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskPartitionTable()) def sendPartitionTable(self, conn, min_offset, max_offset, uuid): # we have a pt self.pt.log() row_list = [] if max_offset == 0: max_offset = self.pt.getPartitions() try: for offset in xrange(min_offset, max_offset): row = [] try: for cell in self.pt.getCellList(offset): if uuid is None or cell.getUUID() == uuid: row.append((cell.getUUID(), cell.getState())) except TypeError: pass row_list.append((offset, row)) except IndexError: conn.send(Errors.ProtocolError('invalid partition table offset')) else: conn.answer(Packets.AnswerPartitionList(self.pt.getID(), row_list))
def test_03_setCell(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) # add a cell to an empty row self.assertFalse(pt.count_dict.has_key(sn1)) pt.setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.UP_TO_DATE) else: self.assertEqual(len(pt.partition_list[x]), 0) # try to add to an unexistant partition self.assertRaises(IndexError, pt.setCell, 10, sn1, CellStates.UP_TO_DATE) # if we add in discardes state, must be removed pt.setCell(0, sn1, CellStates.DISCARDED) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0) # add a feeding node into empty row pt.setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.FEEDING) else: self.assertEqual(len(pt.partition_list[x]), 0) # re-add it as feeding, nothing change pt.setCell(0, sn1, CellStates.FEEDING) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 0) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.FEEDING) else: self.assertEqual(len(pt.partition_list[x]), 0) # now add it as up to date pt.setCell(0, sn1, CellStates.UP_TO_DATE) self.assertTrue(pt.count_dict.has_key(sn1)) self.assertEqual(pt.count_dict[sn1], 1) for x in xrange(num_partitions): if x == 0: self.assertEqual(len(pt.partition_list[x]), 1) cell = pt.partition_list[x][0] self.assertEqual(cell.getState(), CellStates.UP_TO_DATE) else: self.assertEqual(len(pt.partition_list[x]), 0) # now add broken and down state, must not be taken into account pt.setCell(0, sn1, CellStates.DISCARDED) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0) sn1.setState(NodeStates.BROKEN) self.assertRaises(PartitionTableException, pt.setCell, 0, sn1, CellStates.UP_TO_DATE) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0) sn1.setState(NodeStates.DOWN) self.assertRaises(PartitionTableException, pt.setCell, 0, sn1, CellStates.UP_TO_DATE) for x in xrange(num_partitions): self.assertEqual(len(pt.partition_list[x]), 0) self.assertEqual(pt.count_dict[sn1], 0)
def _acceptIdentification(self, node, num_partitions, num_replicas): self.app.pt = PartitionTable(num_partitions, num_replicas)
def test_07_getNodeSet(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unusable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = self.createStorage(server2, uuid2) pt._setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = self.createStorage(server3, uuid3) pt._setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = self.createStorage(server4, uuid4) pt._setCell(0, sn4, CellStates.DISCARDED) # won't be added # must get only two node as feeding and discarded not taken # into account self.assertEqual(pt.getNodeSet(True), {sn1, sn3}) self.assertEqual(len(pt.getNodeSet()), 3)
def test_07_getNodeSet(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unsable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = StorageNode(Mock(), server1, uuid1) pt.setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = StorageNode(Mock(), server2, uuid2) pt.setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = StorageNode(Mock(), server3, uuid3) pt.setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = StorageNode(Mock(), server4, uuid4) pt.setCell(0, sn4, CellStates.DISCARDED) # won't be added # must get only two node as feeding and discarded not taken # into account self.assertEqual(pt.getNodeSet(True), {sn1, sn3}) self.assertEqual(len(pt.getNodeSet()), 3)
def test_05_getCellList(self): num_partitions = 5 num_replicas = 2 pt = PartitionTable(num_partitions, num_replicas) # add two kind of node, usable and unusable uuid1 = self.getStorageUUID() server1 = ("127.0.0.1", 19001) sn1 = self.createStorage(server1, uuid1) pt._setCell(0, sn1, CellStates.UP_TO_DATE) uuid2 = self.getStorageUUID() server2 = ("127.0.0.2", 19001) sn2 = self.createStorage(server2, uuid2) pt._setCell(0, sn2, CellStates.OUT_OF_DATE) uuid3 = self.getStorageUUID() server3 = ("127.0.0.3", 19001) sn3 = self.createStorage(server3, uuid3) pt._setCell(0, sn3, CellStates.FEEDING) uuid4 = self.getStorageUUID() server4 = ("127.0.0.4", 19001) sn4 = self.createStorage(server4, uuid4) pt._setCell(0, sn4, CellStates.DISCARDED) # won't be added # now checks result self.assertEqual(len(pt.partition_list[0]), 3) for x in xrange(num_partitions): if x == 0: # all nodes all_cell = pt.getCellList(0) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 3) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) # readable nodes all_cell = pt.getCellList(0, readable=True) all_nodes = [x.getNode() for x in all_cell] self.assertEqual(len(all_cell), 2) self.assertTrue(sn1 in all_nodes) self.assertTrue(sn2 not in all_nodes) self.assertTrue(sn3 in all_nodes) self.assertTrue(sn4 not in all_nodes) else: self.assertEqual(len(pt.getCellList(1, False)), 0) self.assertEqual(len(pt.getCellList(1, True)), 0)
class Application(BaseApplication): """The storage node application.""" def __init__(self, config): super(Application, self).__init__( config.getSSL(), config.getDynamicMasterList()) for address in config.getMasters(): self.nm.createMaster(address=address) self.name = config.getCluster() self.server = config.getBind() logging.debug('IP address is %s, port is %d', *self.server) # The partition table is initialized after getting the number of # partitions. self.pt = None self.uuid = config.getUUID() self.request_handler = MasterRequestEventHandler(self) self.master_event_handler = MasterEventHandler(self) self.cluster_state = None self.reset() registerLiveDebugger(on_log=self.log) def close(self): self.listening_conn = None super(Application, self).close() def reset(self): self.bootstrapped = False self.master_conn = None self.master_node = None def log(self): self.em.log() self.nm.log() if self.pt is not None: self.pt.log() def run(self): try: self._run() except Exception: logging.exception('Pre-mortem data:') self.log() logging.flush() raise def _run(self): """Make sure that the status is sane and start a loop.""" if len(self.name) == 0: raise RuntimeError, 'cluster name must be non-empty' # Make a listening port. handler = AdminEventHandler(self) self.listening_conn = ListeningConnection(self, handler, self.server) while self.cluster_state != ClusterStates.STOPPING: self.connectToPrimary() try: while True: self.em.poll(1) except PrimaryFailure: logging.error('primary master is down') self.listening_conn.close() while not self.em.isIdle(): self.em.poll(1) def connectToPrimary(self): """Find a primary master node, and connect to it. If a primary master node is not elected or ready, repeat the attempt of a connection periodically. Note that I do not accept any connection from non-master nodes at this stage. """ self.cluster_state = None # search, find, connect and identify to the primary master bootstrap = BootstrapManager(self, self.name, NodeTypes.ADMIN, self.uuid, self.server) data = bootstrap.getPrimaryConnection() (node, conn, uuid, num_partitions, num_replicas) = data self.master_node = node self.master_conn = conn self.uuid = uuid if self.pt is None: self.pt = PartitionTable(num_partitions, num_replicas) elif self.pt.getPartitions() != num_partitions: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of partitions is inconsistent') elif self.pt.getReplicas() != num_replicas: # XXX: shouldn't we recover instead of raising ? raise RuntimeError('the number of replicas is inconsistent') # passive handler self.master_conn.setHandler(self.master_event_handler) self.master_conn.ask(Packets.AskClusterState()) self.master_conn.ask(Packets.AskNodeInformation()) self.master_conn.ask(Packets.AskPartitionTable()) def sendPartitionTable(self, conn, min_offset, max_offset, uuid): # we have a pt self.pt.log() row_list = [] if max_offset == 0: max_offset = self.pt.getPartitions() try: for offset in xrange(min_offset, max_offset): row = [] try: for cell in self.pt.getCellList(offset): if uuid is None or cell.getUUID() == uuid: row.append((cell.getUUID(), cell.getState())) except TypeError: pass row_list.append((offset, row)) except IndexError: conn.notify(Errors.ProtocolError('invalid partition table offset')) else: conn.answer(Packets.AnswerPartitionList(self.pt.getID(), row_list))