def test_disks_empty(self): val = M0ServerDesc(runs_confd=Maybe(True, 'Bool'), io_disks=DisksDesc(meta_data=Maybe(None, 'Text'), data=DList([], 'List Text'))) self.assertEqual( '{ runs_confd = Some True, io_disks = { meta_data = None Text, data = [] : List Text } }', str(val))
def _add_pool(self, pool: PoolHandle, out_list: List[PoolDesc]) -> None: conf = self.provider layout = self._get_layout(pool) if not layout: return (cid, pool_type, i) = pool.tuple() storage_set_name = conf.get(f'cluster>{cid}>storage_set[{i}]>name') pool_name = f'{storage_set_name}__{pool_type}' allowed_failure = self._calculate_allowed_failure(layout) out_list.append( PoolDesc( name=Text(pool_name), disk_refs=Maybe( DList([ DiskRef(path=Text(device), node=Maybe( Text( conf.get(f'server_node>{node}>' 'network>data>private_fqdn')), 'Text')) for node in self._get_server_nodes(pool) for device in self._get_devices(pool, node) ], 'List DiskRef'), 'List DiskRef'), data_units=layout.data, parity_units=layout.parity, spare_units=Maybe(layout.spare, 'Natural'), type=PoolType[pool_type], allowed_failures=Maybe(allowed_failure, 'AllowedFailures')))
def _get_drive_info_form_os(self, path: str) -> Disk: drive_size = 0 with open(path, 'rb') as f: drive_size = f.seek(0, io.SEEK_END) return Disk(path=Maybe(Text(path), 'Text'), size=Maybe(drive_size, 'Natural'), blksize=Maybe(os.stat(path).st_blksize, 'Natural'))
def test_m0server_with_disks(self): val = M0ServerDesc( runs_confd=Maybe(True, 'Bool'), io_disks=DisksDesc( meta_data=Maybe(None, 'Text'), data=DList([Text('/disk1'), Text('/disk2')], 'test'))) self.assertEqual( '{ runs_confd = Some True, io_disks = { meta_data = None Text, data = ["/disk1", "/disk2"] } }', str(val))
def test_pooldesc_empty(self): val = PoolDesc(name=Text('storage_set_name'), disk_refs=Maybe(DList([], 'List DiskRef'), []), data_units=0, parity_units=0, spare_units=Maybe(0, 'Natural'), type=PoolType.sns, allowed_failures=Maybe(None, 'AllowedFailures')) self.assertEqual( '{ name = "storage_set_name", disk_refs = Some ([] : List DiskRef), ' 'data_units = 0, parity_units = 0, spare_units = Some (0), type = T.PoolType.sns, ' 'allowed_failures = None AllowedFailures }', str(val))
def get_drive_info_from_consul(self, path: Text, machine_id: str) -> Disk: hostname = self.get_hostname(machine_id) disk_path = json.loads(str(path)).lstrip(os.sep) drive_data = None drive_info = None while (not drive_data or drive_data is None): try: drive_data = self.kv.kv_get(f'{hostname}/drives/{disk_path}') drive_info = json.loads(drive_data['Value']) except TypeError: logging.info('%s details are not available yet, retrying...', disk_path) sleep(2) continue return (Disk(path=Maybe(path, 'Text'), size=Maybe(drive_info['size'], 'Natural'), blksize=Maybe(drive_info['blksize'], 'Natural')))
def _create_node(self, name: str) -> NodeDesc: store = self.provider hostname = store.get(f'cluster>{name}>hostname') iface = self._get_iface(name) return NodeDesc( hostname=Text(hostname), data_iface=Text(iface), data_iface_type=Maybe(self._get_iface_type(name), 'P'), io_disks=DList([ Text(device) for device in store.get(f'cluster>{name}>storage>data_devices') ], 'List Text'), # # [KN] This is a hotfix for singlenode deployment # TODO in the future the value must be taken from a correct # ConfStore key (it doesn't exist now). meta_data=Text('/dev/vg_metadata_srvnode-1/lv_raw_metadata'), s3_instances=int(store.get(f'cluster>{name}>s3_instances')))
def _get_cdf_dhall(self) -> str: dhall_path = self._get_dhall_path() conf = self.provider nodes = self._create_node_descriptions() pools = self._create_pool_descriptions() profiles = self._create_profile_descriptions(pools) fdmi_filters = self._create_fdmi_filter_descriptions(nodes) create_aux = conf.get('cluster>create_aux', allow_null=True) if create_aux is None: create_aux = False params_text = str( ClusterDesc(create_aux=Maybe(create_aux, 'Bool'), node_info=nodes, pool_info=pools, profile_info=profiles, fdmi_filter_info=fdmi_filters)) gencdf = Template(self._gencdf()).substitute(path=dhall_path, params=params_text) return gencdf
def _create_node(self, machine_id: str) -> NodeDesc: store = self.provider hostname = store.get( f'server_node>{machine_id}>network>data>private_fqdn') name = store.get(f'server_node>{machine_id}>name') iface = self._get_iface(machine_id) try: no_m0clients = int( store.get('cortx>software>motr>service>client_instances', allow_null=True)) except TypeError: no_m0clients = 2 # Currently, there is 1 m0d per cvg. # We will create 1 IO service entry in CDF per cvg. # An IO service entry will use data devices from corresponding cvg. # meta data device is taken from motr-hare shared store. servers = DList([ M0ServerDesc(io_disks=DisksDesc( data=self._get_data_devices(machine_id, cvg), meta_data=Maybe(self._get_metadata_device(name, cvg, m0d), 'Text')), runs_confd=Maybe(False, 'Bool')) for cvg in range( len(store.get(f'server_node>{machine_id}>storage>cvg'))) for m0d in range(self._get_m0d_per_cvg(name, cvg)) ], 'List M0ServerDesc') # Adding a Motr confd entry per server node in CDF. # The `runs_confd` value (true/false) determines if Motr confd process # will be started on the node or not. servers.value.append( M0ServerDesc(io_disks=DisksDesc(data=DList([], 'List Text'), meta_data=Maybe(None, 'Text')), runs_confd=Maybe(True, 'Bool'))) return NodeDesc( hostname=Text(hostname), data_iface=Text(iface), data_iface_type=Maybe(self._get_iface_type(machine_id), 'P'), m0_servers=Maybe(servers, 'List M0ServerDesc'), # # [KN] This is a hotfix for singlenode deployment # TODO in the future the value must be taken from a correct # ConfStore key (it doesn't exist now). s3_instances=int(store.get('cortx>software>s3>service>instances')), client_instances=no_m0clients)
def test_maybe_none(self): val = Maybe(None, 'P') self.assertEqual('None P', str(val)) self.assertEqual('Some P.tcp', str(Maybe(Protocol.tcp, 'P')))
def _create_fdmi_filter_descriptions( self, nodes: List[NodeDesc]) -> Maybe[List[FdmiFilterDesc]]: return Maybe(None, 'List T.FdmiFilterDesc')