def _add_pool(self, pool: PoolHandle, out_list: List[PoolDesc]) -> None: conf = self.provider layout = self._get_layout(pool) if not layout: return (cid, pool_type, i) = pool.tuple() storage_set_name = conf.get(f'cluster>{cid}>storage_set[{i}]>name') pool_name = f'{storage_set_name}__{pool_type}' allowed_failure = self._calculate_allowed_failure(layout) out_list.append( PoolDesc( name=Text(pool_name), disk_refs=Maybe( DList([ DiskRef(path=Text(device), node=Maybe( Text( conf.get(f'server_node>{node}>' 'network>data>private_fqdn')), 'Text')) for node in self._get_server_nodes(pool) for device in self._get_devices(pool, node) ], 'List DiskRef'), 'List DiskRef'), data_units=layout.data, parity_units=layout.parity, spare_units=Maybe(layout.spare, 'Natural'), type=PoolType[pool_type], allowed_failures=Maybe(allowed_failure, 'AllowedFailures')))
def test_disks_empty(self): val = M0ServerDesc(runs_confd=Maybe(True, 'Bool'), io_disks=DisksDesc(meta_data=Maybe(None, 'Text'), data=DList([], 'List Text'))) self.assertEqual( '{ runs_confd = Some True, io_disks = { meta_data = None Text, data = [] : List Text } }', str(val))
def _create_node(self, machine_id: str) -> NodeDesc: store = self.provider hostname = store.get( f'server_node>{machine_id}>network>data>private_fqdn') name = store.get(f'server_node>{machine_id}>name') iface = self._get_iface(machine_id) try: no_m0clients = int( store.get('cortx>software>motr>service>client_instances', allow_null=True)) except TypeError: no_m0clients = 2 # Currently, there is 1 m0d per cvg. # We will create 1 IO service entry in CDF per cvg. # An IO service entry will use data devices from corresponding cvg. # meta data device is taken from motr-hare shared store. servers = DList([ M0ServerDesc(io_disks=DisksDesc( data=self._get_data_devices(machine_id, cvg), meta_data=Maybe(self._get_metadata_device(name, cvg, m0d), 'Text')), runs_confd=Maybe(False, 'Bool')) for cvg in range( len(store.get(f'server_node>{machine_id}>storage>cvg'))) for m0d in range(self._get_m0d_per_cvg(name, cvg)) ], 'List M0ServerDesc') # Adding a Motr confd entry per server node in CDF. # The `runs_confd` value (true/false) determines if Motr confd process # will be started on the node or not. servers.value.append( M0ServerDesc(io_disks=DisksDesc(data=DList([], 'List Text'), meta_data=Maybe(None, 'Text')), runs_confd=Maybe(True, 'Bool'))) return NodeDesc( hostname=Text(hostname), data_iface=Text(iface), data_iface_type=Maybe(self._get_iface_type(machine_id), 'P'), m0_servers=Maybe(servers, 'List M0ServerDesc'), # # [KN] This is a hotfix for singlenode deployment # TODO in the future the value must be taken from a correct # ConfStore key (it doesn't exist now). s3_instances=int(store.get('cortx>software>s3>service>instances')), client_instances=no_m0clients)
def _get_data_devices(self, machine_id: str, cvg: int) -> DList[Text]: store = self.provider data_devices = DList([ Text(device) for device in store.get(f'server_node>{machine_id}>' f'storage>cvg[{cvg}]>data_devices') ], 'List Text') return data_devices
def test_m0server_with_disks(self): val = M0ServerDesc( runs_confd=Maybe(True, 'Bool'), io_disks=DisksDesc( meta_data=Maybe(None, 'Text'), data=DList([Text('/disk1'), Text('/disk2')], 'test'))) self.assertEqual( '{ runs_confd = Some True, io_disks = { meta_data = None Text, data = ["/disk1", "/disk2"] } }', str(val))
def _create_profile_descriptions( self, pool_desc: List[PoolDesc]) -> List[ProfileDesc]: profiles: List[ProfileDesc] = [] profiles.append( ProfileDesc(name=Text('Profile_the_pool'), pools=DList([pool.name for pool in pool_desc], 'List Text'))) return profiles
def test_pooldesc_empty(self): val = PoolDesc(name=Text('storage_set_name'), disk_refs=Maybe(DList([], 'List DiskRef'), []), data_units=0, parity_units=0, spare_units=Maybe(0, 'Natural'), type=PoolType.sns, allowed_failures=Maybe(None, 'AllowedFailures')) self.assertEqual( '{ name = "storage_set_name", disk_refs = Some ([] : List DiskRef), ' 'data_units = 0, parity_units = 0, spare_units = Some (0), type = T.PoolType.sns, ' 'allowed_failures = None AllowedFailures }', str(val))
def _create_node(self, name: str) -> NodeDesc: store = self.provider hostname = store.get(f'cluster>{name}>hostname') iface = self._get_iface(name) return NodeDesc( hostname=Text(hostname), data_iface=Text(iface), data_iface_type=Maybe(self._get_iface_type(name), 'P'), io_disks=DList([ Text(device) for device in store.get(f'cluster>{name}>storage>data_devices') ], 'List Text'), # # [KN] This is a hotfix for singlenode deployment # TODO in the future the value must be taken from a correct # ConfStore key (it doesn't exist now). meta_data=Text('/dev/vg_metadata_srvnode-1/lv_raw_metadata'), s3_instances=int(store.get(f'cluster>{name}>s3_instances')))
def get_drives_info_for(self, cvg: int, machine_id: str) -> DList[Disk]: data_devs = self.get_data_devices(machine_id, cvg) return DList([self.get_drive_info_from_consul(dev_path, machine_id) for dev_path in data_devs.value], 'List Disk')
def get_data_devices(self, machine_id: str, cvg: int) -> DList[Text]: data_devices = DList( [Text(device) for device in self.provider.get( f'node>{machine_id}>' f'storage>cvg[{cvg}]>devices>data')], 'List Text') return data_devices