def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) rsc_dfn_hdr = list(cls._rsc_dfn_headers) if args.external_name: rsc_dfn_hdr.insert(1, linstor_client.TableHeader("External")) for hdr in rsc_dfn_hdr: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in lstmsg.resource_definitions: drbd_data = rsc_dfn.drbd_data row = [rsc_dfn.name] if args.external_name and isinstance(rsc_dfn.external_name, str): row.append(rsc_dfn.external_name) row.append(drbd_data.port if drbd_data else "") row.append(rsc_dfn.resource_group_name) row.append( tbl.color_cell("DELETING", Color.RED) if FLAG_DELETE in rsc_dfn.flags else tbl.color_cell("ok", Color.DARKGREEN)) tbl.add_row(row) tbl.show()
def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) vlm_dfn_hdrs = list(cls._vlm_dfn_headers) if args.external_name: vlm_dfn_hdrs.insert(1, linstor_client.TableHeader("External")) for hdr in vlm_dfn_hdrs: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in lstmsg.resource_definitions: for vlmdfn in rsc_dfn.volume_definitions: state = tbl.color_cell("ok", Color.DARKGREEN) if FLAG_DELETE in vlmdfn.flags: state = tbl.color_cell("DELETING", Color.RED) elif FLAG_RESIZE in vlmdfn.flags: state = tbl.color_cell("resizing", Color.DARKPINK) drbd_data = vlmdfn.drbd_data tbl.add_row([ rsc_dfn.name, vlmdfn.number, drbd_data.minor if drbd_data else "", SizeCalc.approximate_size_string(vlmdfn.size), "+" if FLAG_GROSS_SIZE in vlmdfn.flags else "", state ]) tbl.show()
def show_error_report_list(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_header(linstor_client.TableHeader("Nr.", alignment_text=">")) tbl.add_header(linstor_client.TableHeader("Id")) tbl.add_header(linstor_client.TableHeader("Datetime")) tbl.add_header(linstor_client.TableHeader("Node")) i = 1 for error in lstmsg: tbl.add_row([ str(i), error.id, str(error.datetime)[:19], error.node_names ]) i += 1 tbl.show()
def show_nodes(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) node_hdr = list(cls._node_headers) if args.show_aux_props: node_hdr.insert(-1, linstor_client.TableHeader("AuxProps")) for hdr in node_hdr: tbl.add_header(hdr) conn_stat_dict = { apiconsts.ConnectionStatus.OFFLINE.name: ("OFFLINE", Color.RED), apiconsts.ConnectionStatus.CONNECTED.name: ("Connected", Color.YELLOW), apiconsts.ConnectionStatus.ONLINE.name: ("Online", Color.GREEN), apiconsts.ConnectionStatus.VERSION_MISMATCH.name: ("OFFLINE(VERSION MISMATCH)", Color.RED), apiconsts.ConnectionStatus.FULL_SYNC_FAILED.name: ("OFFLINE(FULL SYNC FAILED)", Color.RED), apiconsts.ConnectionStatus.AUTHENTICATION_ERROR.name: ("OFFLINE(AUTHENTICATION ERROR)", Color.RED), apiconsts.ConnectionStatus.UNKNOWN.name: ("Unknown", Color.YELLOW), apiconsts.ConnectionStatus.HOSTNAME_MISMATCH.name: ("OFFLINE(HOSTNAME MISMATCH)", Color.RED), apiconsts.ConnectionStatus.OTHER_CONTROLLER.name: ("OFFLINE(OTHER_CONTROLLER)", Color.RED), apiconsts.ConnectionStatus.NO_STLT_CONN.name: ("OFFLINE(NO CONNECTION TO SATELLITE)", Color.RED) } tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for node in lstmsg.nodes: # concat a ip list with satellite connection indicator active_ip = "" for net_if in node.net_interfaces: if net_if.is_active and net_if.stlt_port: active_ip = net_if.address + ":" + str(net_if.stlt_port) + " (" + net_if.stlt_encryption_type + ")" aux_props = ["{k}={v}".format(k=k, v=v) for k, v in node.properties.items() if k.startswith(apiconsts.NAMESPC_AUXILIARY + '/')] if apiconsts.FLAG_EVICTED in node.flags: conn_stat = (apiconsts.FLAG_EVICTED, Color.RED) elif apiconsts.FLAG_DELETE in node.flags: conn_stat = (apiconsts.FLAG_DELETE, Color.RED) else: conn_stat = conn_stat_dict.get(node.connection_status) row = [node.name, node.type, active_ip] if args.show_aux_props: row.append("\n".join(aux_props)) row += [tbl.color_cell(conn_stat[0], conn_stat[1])] tbl.add_row(row) tbl.show()
def show_backups(cls, args, lstmsg): tbl = Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) if args.others: for hdr in cls._backup_other_headers: tbl.add_header(hdr) for entry in lstmsg.other.files: tbl.add_row([entry]) else: backup_hdr = list(cls._backup_headers) for hdr in backup_hdr: tbl.add_header(hdr) if args.show_id: tbl.add_header(linstor_client.TableHeader("Backup Name(ID)")) for backup in lstmsg.linstor: # resource, snapshot, finish time, base, status row = [backup.origin_rsc_name, backup.origin_snap_name] if backup.finished_timestamp: row += [ datetime.fromtimestamp( int(backup.finished_timestamp / 1000)) ] else: row += [""] row += [backup.based_on[0:-5] if backup.based_on else ""] status_text = "Success" status_color = Color.GREEN if backup.shipping: status_text = "Shipping" status_color = Color.YELLOW elif not backup.restorable: status_text = "Not restorable" status_color = Color.RED row += [tbl.color_cell(status_text, status_color)] if args.show_id: row += [backup.id] tbl.add_row(row) tbl.show()
class VolumeDefinitionCommands(Commands): _vlm_dfn_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("VolumeNr"), linstor_client.TableHeader("VolumeMinor"), linstor_client.TableHeader("Size"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] VOLUME_SIZE_HELP =\ 'Size of the volume. ' \ 'Valid units: ' + SizeCalc.UNITS_LIST_STR + '. ' \ 'The default unit is GiB (2 ^ 30 bytes). ' \ 'The unit can be specified with a postfix. ' \ 'Linstor\'s internal granularity for the capacity of volumes is one ' \ 'kibibyte (2 ^ 10 bytes). The actual size used by linstor ' \ 'is the smallest natural number of kibibytes that is large enough to ' \ 'accommodate a volume of the requested size in the specified size unit.' def __init__(self): super(VolumeDefinitionCommands, self).__init__() def setup_commands(self, parser): # volume definition subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetSize, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions ] vol_def_parser = parser.add_parser( Commands.VOLUME_DEF, aliases=["vd"], formatter_class=argparse.RawTextHelpFormatter, description="Volume definition subcommands") vol_def_subp = vol_def_parser.add_subparsers( title="Volume definition commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) p_new_vol = vol_def_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a volume with a capacity of size for use with ' 'linstore. If the resource resname exists already, a new volume is ' 'added to that resource, otherwise the resource is created automatically ' 'with default settings. Unless minornr is specified, a minor number for ' "the volume's DRBD block device is assigned automatically by the " 'linstor server.') p_new_vol.add_argument('--storage-pool', '-s', type=namecheck(STORPOOL_NAME), help="Storage pool name to use." ).completer = self.storage_pool_dfn_completer p_new_vol.add_argument('-n', '--vlmnr', type=int) p_new_vol.add_argument('-m', '--minor', type=int) p_new_vol.add_argument( '--encrypt', action="store_true", help="Encrypt created volumes using cryptsetup.") p_new_vol.add_argument('resource_name', type=namecheck(RES_NAME), help='Name of an existing resource' ).completer = self.resource_dfn_completer p_new_vol.add_argument( 'size', help=VolumeDefinitionCommands.VOLUME_SIZE_HELP ).completer = VolumeDefinitionCommands.size_completer p_new_vol.set_defaults(func=self.create) # remove-volume definition p_rm_vol = vol_def_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description= 'Removes a volume definition from the linstor cluster, and removes ' 'the volume definition from the resource definition. The volume is ' 'undeployed from all nodes and the volume entry is marked for removal ' "from the resource definition in linstor's data tables. After all " 'nodes have undeployed the volume, the volume entry is removed from ' 'the resource definition.') p_rm_vol.add_argument( '-q', '--quiet', action="store_true", help= 'Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.' ) p_rm_vol.add_argument('resource_name', help='Resource name of the volume definition' ).completer = self.resource_dfn_completer p_rm_vol.add_argument('volume_nr', type=int, help="Volume number to delete.") p_rm_vol.set_defaults(func=self.delete) # list volume definitions vlm_dfn_groupby = [x.name for x in self._vlm_dfn_headers] vlm_dfn_group_completer = Commands.show_group_completer( vlm_dfn_groupby, "groupby") p_lvols = vol_def_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description= ' Prints a list of all volume definitions known to linstor. ' 'By default, the list is printed as a human readable table.') p_lvols.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lvols.add_argument( '-g', '--groupby', nargs='+', choices=vlm_dfn_groupby).completer = vlm_dfn_group_completer p_lvols.add_argument('-R', '--resources', nargs='+', type=namecheck(RES_NAME), help='Filter by list of resources' ).completer = self.resource_dfn_completer p_lvols.set_defaults(func=self.list) # show properties p_sp = vol_def_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given volume definition." ) p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'resource_name', help="Resource name").completer = self.resource_dfn_completer p_sp.add_argument('volume_nr', type=int, help="Volume number") p_sp.set_defaults(func=self.print_props) # set properties p_setprop = vol_def_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], description='Sets properties for the given volume definition.') p_setprop.add_argument( 'resource_name', help="Resource name").completer = self.resource_dfn_completer p_setprop.add_argument('volume_nr', type=int, help="Volume number") Commands.add_parser_keyvalue(p_setprop, "volume-definition") p_setprop.set_defaults(func=self.set_props) p_drbd_opts = vol_def_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description="Set drbd volume options.") p_drbd_opts.add_argument( 'resource_name', type=namecheck(RES_NAME), help="Resource name").completer = self.resource_dfn_completer p_drbd_opts.add_argument('volume_nr', type=int, help="Volume number") DrbdOptions.add_arguments(p_drbd_opts, [ x for x in DrbdOptions.drbd_options()['options'] if x in DrbdOptions.drbd_options()['filters']['volume'] ]) p_drbd_opts.set_defaults(func=self.set_drbd_opts) # set size p_set_size = vol_def_subp.add_parser( Commands.Subcommands.SetSize.LONG, aliases=[Commands.Subcommands.SetSize.SHORT], description='Change the size of a volume. ' 'Decreasing the size is only supported when the resource definition does not have any ' 'resources. ' 'Increasing the size is supported even when the resource definition has resources. ' 'Filesystems present on the volumes will not be resized.') p_set_size.add_argument('resource_name', type=namecheck(RES_NAME), help='Name of an existing resource' ).completer = self.resource_dfn_completer p_set_size.add_argument('volume_nr', type=int, help="Volume number") p_set_size.add_argument( 'size', help=VolumeDefinitionCommands.VOLUME_SIZE_HELP ).completer = VolumeDefinitionCommands.size_completer p_set_size.set_defaults(func=self.set_volume_size) self.check_subcommands(vol_def_subp, subcmds) def create(self, args): replies = self._linstor.volume_dfn_create( args.resource_name, self._get_volume_size(args.size), args.vlmnr, args.minor, args.encrypt, args.storage_pool) return self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.volume_dfn_delete(args.resource_name, args.volume_nr) return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in cls._vlm_dfn_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in cls.filter_rsc_dfn_list(lstmsg.rsc_dfns, args.resources): for vlmdfn in rsc_dfn.vlm_dfns: state = tbl.color_cell("ok", Color.DARKGREEN) if FLAG_DELETE in rsc_dfn.rsc_dfn_flags: state = tbl.color_cell("DELETING", Color.RED) elif FLAG_RESIZE in vlmdfn.vlm_flags: state = tbl.color_cell("resizing", Color.DARKPINK) tbl.add_row([ rsc_dfn.rsc_name, vlmdfn.vlm_nr, vlmdfn.vlm_minor, SizeCalc.approximate_size_string(vlmdfn.vlm_size), state ]) tbl.show() def list(self, args): lstmsg = self._linstor.resource_dfn_list() return self.output_list(args, lstmsg, self.show) @classmethod def _get_volume_size(cls, size_str): m = re.match('(\d+)(\D*)', size_str) size = 0 try: size = int(m.group(1)) except AttributeError: sys.stderr.write('Size is not a valid number\n') sys.exit(ExitCode.ARGPARSE_ERROR) unit_str = m.group(2) if unit_str == "": unit_str = "GiB" try: _, unit = SizeCalc.UNITS_MAP[unit_str.lower()] except KeyError: sys.stderr.write('"%s" is not a valid unit!\n' % (unit_str)) sys.stderr.write('Valid units: %s\n' % SizeCalc.UNITS_LIST_STR) sys.exit(ExitCode.ARGPARSE_ERROR) _, unit = SizeCalc.UNITS_MAP[unit_str.lower()] if unit != SizeCalc.UNIT_KiB: size = SizeCalc.convert_round_up(size, unit, SizeCalc.UNIT_KiB) return size @staticmethod def size_completer(prefix, **kwargs): choices = [unit_str for unit_str, _ in SizeCalc.UNITS_MAP.values()] m = re.match('(\d+)(\D*)', prefix) digits = m.group(1) unit = m.group(2) if unit and unit != "": p_units = [x for x in choices if x.startswith(unit)] else: p_units = choices return [digits + u for u in p_units] @classmethod def _props_list(cls, args, lstmsg): result = [] if lstmsg: for rsc_dfn in [ x for x in lstmsg.rsc_dfns if x.rsc_name == args.resource_name ]: for vlmdfn in rsc_dfn.vlm_dfns: if vlmdfn.vlm_nr == args.volume_nr: result.append(vlmdfn.vlm_props) break return result def print_props(self, args): lstmsg = self._linstor.resource_dfn_list() return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.volume_dfn_modify( args.resource_name, args.volume_nr, set_properties=mod_prop_dict['pairs'], delete_properties=mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] # remove resource name key del a['volume-nr'] mod_props, del_props = DrbdOptions.parse_opts(a) replies = self._linstor.volume_dfn_modify(args.resource_name, args.volume_nr, set_properties=mod_props, delete_properties=del_props) return self.handle_replies(args, replies) def set_volume_size(self, args): replies = self._linstor.volume_dfn_modify(args.resource_name, args.volume_nr, size=self._get_volume_size( args.size)) return self.handle_replies(args, replies)
class ResourceCommands(Commands): CONN_OBJECT_NAME = 'rsc-conn' _resource_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("Node"), linstor_client.TableHeader("Port"), linstor_client.TableHeader("Usage", Color.DARKGREEN), linstor_client.TableHeader("Conns", Color.DARKGREEN), linstor_client.TableHeader("State", Color.DARKGREEN, alignment_text=linstor_client.TableHeader.ALIGN_RIGHT), linstor_client.TableHeader("CreatedOn") ] def __init__(self, state_service): super(ResourceCommands, self).__init__() self._state_service = state_service def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.MakeAvailable, Commands.Subcommands.List, Commands.Subcommands.ListVolumes, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdPeerDeviceOptions, Commands.Subcommands.ToggleDisk, Commands.Subcommands.CreateTransactional, Commands.Subcommands.Activate, Commands.Subcommands.Deactivate, Commands.Subcommands.Involved ] # Resource subcommands res_parser = parser.add_parser( Commands.RESOURCE, aliases=["r"], formatter_class=argparse.RawTextHelpFormatter, description="Resouce subcommands") res_subp = res_parser.add_subparsers( title="resource commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds) ) # new-resource p_new_res = res_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Deploys a resource definition to a node.') p_new_res.add_argument( '--diskless', '-d', action="store_true", help='DEPRECATED. Use --nvme-initiator or --drbd-diskless instead' ) p_new_res.add_argument( '--node-id', type=int, help='Override the automatic selection of DRBD node ID' ) p_new_res.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_new_res.add_argument( '--nvme-initiator', action="store_true", help='Mark this resource as initiator' ) p_new_res.add_argument( '--drbd-diskless', action="store_true", help='Mark this resource as drbd diskless' ) p_new_res.add_argument( '--inactive', action="store_true", help="Marks the resource created as inactive" ) self.add_auto_select_argparse_arguments(p_new_res) p_new_res.add_argument( 'node_name', type=str, nargs='*', help='Name of the node to deploy the resource').completer = self.node_completer p_new_res.add_argument( 'resource_definition_name', type=str, help='Name of the resource definition').completer = self.resource_dfn_completer p_new_res.set_defaults(func=self.create, allowed_states=[DefaultState, ResourceCreateTransactionState]) # make available p_mkavial = res_subp.add_parser( Commands.Subcommands.MakeAvailable.LONG, aliases=[Commands.Subcommands.MakeAvailable.SHORT], description='Make a resource available on a node, noop if already exists.') p_mkavial.add_argument( '--diskful', action="store_true", help='Make the resource diskful on the node.' ) p_mkavial.add_argument( '-l', '--layer-list', type=self.layer_data_check, help="Comma separated layer list, order is from left to right top-down " "This means the top most layer is on the left. " "Possible layers are: " + ",".join(linstor.Linstor.layer_list())) p_mkavial.add_argument( 'node_name', type=str, help='Name of the node to deploy the resource').completer = self.node_completer p_mkavial.add_argument( 'resource_name', type=str, help='Name of the resource definition').completer = self.resource_dfn_completer p_mkavial.set_defaults(func=self.make_available) # remove-resource p_rm_res = res_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Removes a resource. ' 'The resource is undeployed from the node ' "and the resource entry is marked for removal from linstor's data " 'tables. After the node has undeployed the resource, the resource ' "entry is removed from linstor's data tables.") p_rm_res.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_rm_res.add_argument('node_name', nargs="+", help='Name of the node').completer = self.node_completer p_rm_res.add_argument('name', help='Name of the resource to delete').completer = self.resource_completer p_rm_res.set_defaults(func=self.delete) resgroupby = [x.name.lower() for x in ResourceCommands._resource_headers] res_group_completer = Commands.show_group_completer(resgroupby, "groupby") p_lreses = res_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource definitions known to ' 'linstor. By default, the list is printed as a human readable table.') p_lreses.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lreses.add_argument( '-g', '--groupby', nargs='+', choices=resgroupby, type=str.lower).completer = res_group_completer p_lreses.add_argument( '-r', '--resources', nargs='+', type=str, help='Filter by list of resources').completer = self.resource_completer p_lreses.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lreses.add_argument( '-a', '--all', action="store_true", help='Show all resources.') p_lreses.add_argument( '--faulty', action="store_true", help='Only show faulty resource.') p_lreses.add_argument('--props', nargs='+', type=str, help='Filter list by object properties') p_lreses.set_defaults(func=self.list) p_involved = res_subp.add_parser( Commands.Subcommands.Involved.LONG, aliases=[Commands.Subcommands.Involved.SHORT], description='Prints a list of resourced involved on a given node') p_involved.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_involved.add_argument('-i', '--inuse', action="store_true", help='Only show resource bundles that are used.') p_involved.add_argument( '-d', '--diskless-inuse', action="store_true", help='Only show resource bundles that have a diskless in-use.') p_involved.add_argument( '-m', '--min-diskful', type=int, default=None, help='Only show resource bundles that have less diskful replicas.') p_involved.add_argument( 'node', type=str, help='Node name where a resource is used').completer = self.node_completer p_involved.set_defaults(func=self.involved) # list volumes p_lvlms = res_subp.add_parser( Commands.Subcommands.ListVolumes.LONG, aliases=[Commands.Subcommands.ListVolumes.SHORT], description='Prints a list of all volumes.' ) p_lvlms.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lvlms.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lvlms.add_argument('-s', '--storage-pools', nargs='+', type=str, help='Filter by list of storage pools').completer = self.storage_pool_completer p_lvlms.add_argument( '-r', '--resources', nargs='+', type=str, help='Filter by list of resources').completer = self.resource_completer p_lvlms.add_argument( '-a', '--all', action="store_true", help='Show all resources.' ) p_lvlms.set_defaults(func=self.list_volumes) # show properties p_sp = res_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given resource.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'node_name', help="Node name where the resource is deployed.").completer = self.node_completer p_sp.add_argument( 'resource_name', help="Resource name").completer = self.resource_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = res_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given resource on the given node.') p_setprop.add_argument( 'node_name', type=str, help='Node name where resource is deployed.').completer = self.node_completer p_setprop.add_argument( 'name', type=str, help='Name of the resource' ).completer = self.resource_completer Commands.add_parser_keyvalue(p_setprop, "resource") p_setprop.set_defaults(func=self.set_props) # drbd peer device options p_drbd_peer_opts = res_subp.add_parser( Commands.Subcommands.DrbdPeerDeviceOptions.LONG, aliases=[Commands.Subcommands.DrbdPeerDeviceOptions.SHORT], description=DrbdOptions.description("peer-device") ) p_drbd_peer_opts.add_argument( 'node_a', type=str, help="1. Node in the node connection" ).completer = self.node_completer p_drbd_peer_opts.add_argument( 'node_b', type=str, help="1. Node in the node connection" ).completer = self.node_completer p_drbd_peer_opts.add_argument( 'resource_name', type=str, help="Resource name" ).completer = self.resource_completer DrbdOptions.add_arguments(p_drbd_peer_opts, self.CONN_OBJECT_NAME) p_drbd_peer_opts.set_defaults(func=self.drbd_peer_opts) # toggle-disk p_toggle_disk = res_subp.add_parser( Commands.Subcommands.ToggleDisk.LONG, aliases=[Commands.Subcommands.ToggleDisk.SHORT], description='Toggles a resource between diskless and having disks.') p_toggle_disk_group_storage = p_toggle_disk.add_mutually_exclusive_group(required=False) p_toggle_disk_group_storage.add_argument( '--storage-pool', '-s', type=str, help="Add disks to a diskless resource using this storage pool name" ).completer = self.storage_pool_dfn_completer p_toggle_disk_group_storage.add_argument( '--default-storage-pool', '--dflt', action='store_true', help="Add disks to a diskless resource using the storage pools determined from the properties of the " "objects to which the volumes belong" ) p_toggle_disk_group_storage.add_argument( '--diskless', '-d', action='store_true', help="Remove the disks from a resource (toggles --drbd-diskless)" ) p_toggle_disk.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_toggle_disk.add_argument( '--migrate-from', type=str, metavar="MIGRATION_SOURCE", help='Name of the node on which the resource should be deleted once the sync is complete. ' 'Only applicable when adding a disk to a diskless resource. ' 'The command will complete once the new disk has been added; ' 'the deletion will occur later in the background.' ).completer = self.node_completer p_toggle_disk.add_argument( 'node_name', type=str, help='Node name where resource is deployed' ).completer = self.node_completer p_toggle_disk.add_argument( 'name', type=str, help='Name of the resource' ).completer = self.resource_dfn_completer p_toggle_disk.set_defaults(func=self.toggle_disk, parser=p_toggle_disk) # activate/deactivate resource commands p_activate = res_subp.add_parser( Commands.Subcommands.Activate.LONG, aliases=[Commands.Subcommands.Activate.SHORT], description='Activate a resource.') p_activate.add_argument( 'node_name', type=str, help='Node name of the resource').completer = self.node_completer p_activate.add_argument( 'resource_name', type=str, help='Name of the resource').completer = self.resource_dfn_completer p_activate.set_defaults(func=self.activate) p_deactivate = res_subp.add_parser( Commands.Subcommands.Deactivate.LONG, aliases=[Commands.Subcommands.Deactivate.SHORT], description='Deactivate a resource.') p_deactivate.add_argument( 'node_name', type=str, help='Node name of the resource').completer = self.node_completer p_deactivate.add_argument( 'resource_name', type=str, help='Name of the resource').completer = self.resource_dfn_completer p_deactivate.set_defaults(func=self.deactivate) # resource creation transaction commands transactional_create_subcmds = [ Commands.Subcommands.Begin, Commands.Subcommands.Abort, Commands.Subcommands.Commit ] transactional_create_parser = res_subp.add_parser( Commands.Subcommands.CreateTransactional.LONG, formatter_class=argparse.RawTextHelpFormatter, aliases=[Commands.Subcommands.CreateTransactional.SHORT], description="%s subcommands" % Commands.Subcommands.CreateTransactional.LONG) transactional_create_subp = transactional_create_parser.add_subparsers( title="%s subcommands" % Commands.Subcommands.CreateTransactional.LONG, description=Commands.Subcommands.generate_desc(transactional_create_subcmds)) # begin resource creation transaction p_transactional_create_begin = transactional_create_subp.add_parser( Commands.Subcommands.Begin.LONG, aliases=[Commands.Subcommands.Begin.SHORT], description='Start group of resources to create in a single transaction.') p_transactional_create_begin.add_argument( '--terminate-on-error', action='store_true', help='Abort the transaction when any command fails' ) p_transactional_create_begin.set_defaults(func=self.transactional_create_begin) # abort resource creation transaction p_transactional_create_abort = transactional_create_subp.add_parser( Commands.Subcommands.Abort.LONG, aliases=[Commands.Subcommands.Abort.SHORT], description='Abort resource creation transaction.') p_transactional_create_abort.set_defaults( func=self.transactional_create_abort, allowed_states=[ResourceCreateTransactionState]) # commit resource creation transaction p_transactional_create_commit = transactional_create_subp.add_parser( Commands.Subcommands.Commit.LONG, aliases=[Commands.Subcommands.Commit.SHORT], description='Create resources defined in the current resource creation transaction.') p_transactional_create_commit.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_transactional_create_commit.set_defaults( func=self.transactional_create_commit, allowed_states=[ResourceCreateTransactionState]) self.check_subcommands(transactional_create_subp, transactional_create_subcmds) self.check_subcommands(res_subp, subcmds) def create(self, args): async_flag = vars(args)["async"] current_state = self._state_service.get_state() if args.auto_place: if current_state.__class__ == ResourceCreateTransactionState: print("Error: --auto-place not allowed in state '{state.name}'".format(state=current_state)) return ExitCode.ILLEGAL_STATE place_count, additional_place_count, diskless_type = self.parse_place_count_args(args) # auto-place resource replies = self._linstor.resource_auto_place( args.resource_definition_name, place_count, args.storage_pool, args.do_not_place_with, args.do_not_place_with_regex, replicas_on_same=self.prepare_argparse_list(args.replicas_on_same, linstor.consts.NAMESPC_AUXILIARY + '/'), replicas_on_different=self.prepare_argparse_list( args.replicas_on_different, linstor.consts.NAMESPC_AUXILIARY + '/'), diskless_on_remaining=self.parse_diskless_on_remaining(args), async_msg=async_flag, layer_list=args.layer_list, provider_list=args.providers, additional_place_count=additional_place_count, diskless_type=diskless_type, diskless_storage_pool=args.diskless_storage_pool ) return self.handle_replies(args, replies) else: # normal create resource # check that node is given if not args.node_name: raise ArgumentError("resource create: too few arguments: Node name missing.") rscs = [ linstor.ResourceData( node_name, args.resource_definition_name, args.diskless, args.storage_pool[0] if args.storage_pool else None, args.node_id, args.layer_list, args.drbd_diskless, args.nvme_initiator, not args.inactive ) for node_name in args.node_name ] if current_state.__class__ == ResourceCreateTransactionState: print("{} resource(s) added to transaction".format(len(rscs))) current_state.rscs.extend(rscs) return ExitCode.OK else: replies = self._linstor.resource_create(rscs, async_flag) return self.handle_replies(args, replies) def make_available(self, args): replies = self.get_linstorapi().resource_make_available( args.node_name, args.resource_name, args.diskful, args.layer_list) return self.handle_replies(args, replies) def delete(self, args): async_flag = vars(args)["async"] # execute delete resource and flatten result list replies = [x for subx in args.node_name for x in self._linstor.resource_delete(subx, args.name, async_flag)] return self.handle_replies(args, replies) @classmethod def _filter_involved_resources( cls, res_response, on_nodes=None, only_inuse=False, only_diskless_inuse=False, min_replicas=None): """ Filter a RscRespWrapper object according to involved rules. :param RscRespWrapper res_response: response resource wrapper where deleting resource is possible :param list[str] on_nodes: if resource is on any of these nodes :param bool only_inuse: only show resource bundles that are inuse :param bool only_diskless_inuse: only show resource bundles where the inuse node is diskless :param Optional[int] min_replicas: only show resource bundles that have less diskful replicas :return: The res_response argument with filtered resources :rtype: RscRespWrapper """ res_del = set() rsc_state_lkup = {x.node_name + x.name: x for x in res_response.resource_states} if on_nodes: look_up_nodes = {} for rsc in res_response.resources: in_use = rsc_state_lkup.get(rsc.node_name + rsc.name).in_use if rsc.name not in look_up_nodes: look_up_nodes[rsc.name] = { 'nodes': [rsc.node_name], 'inuse': rsc_state_lkup.get(rsc.node_name + rsc.name).in_use, 'diskless-inuse': in_use and apiconsts.FLAG_DISKLESS in rsc.flags, 'replicas': 1 if apiconsts.FLAG_DISKLESS not in rsc.flags else 0 } else: look_up_nodes[rsc.name]['nodes'].append(rsc.node_name) look_up_nodes[rsc.name]['inuse'] |= in_use look_up_nodes[rsc.name]['diskless-inuse'] |= in_use and apiconsts.FLAG_DISKLESS in rsc.flags look_up_nodes[rsc.name]['replicas'] += 1 if apiconsts.FLAG_DISKLESS not in rsc.flags else 0 for i, rsc in enumerate(res_response.resources): look_rsc = look_up_nodes[rsc.name] for n in on_nodes: if n not in look_rsc['nodes']: res_del.add(i) break if only_inuse and not look_rsc['inuse']: res_del.add(i) if only_diskless_inuse and not look_rsc['diskless-inuse']: res_del.add(i) if min_replicas is not None and min_replicas <= look_rsc['replicas']: res_del.add(i) res_del_list = list(res_del) res_del_list.sort() for ri in reversed(res_del_list): del res_response.resources[ri] return res_response def show(self, args, lstmsg): """ :param args: :param RscRespWrapper lstmsg: :return: """ rsc_dfns = self._linstor.resource_dfn_list_raise(query_volume_definitions=False) rsc_dfn_map = {x.name: x for x in rsc_dfns.resource_definitions} rsc_state_lkup = {x.node_name + x.name: x for x in lstmsg.resource_states} tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in ResourceCommands._resource_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [ResourceCommands._resource_headers[0].name]) for rsc in lstmsg.resources: rsc_dfn_port = '' if rsc.name in rsc_dfn_map: drbd_data = rsc_dfn_map[rsc.name].drbd_data rsc_dfn_port = drbd_data.port if drbd_data else "" marked_delete = apiconsts.FLAG_DELETE in rsc.flags rsc_state_obj = rsc_state_lkup.get(rsc.node_name + rsc.name) rsc_state_color = Color.YELLOW rsc_state = "Unknown" rsc_usage = "" rsc_usage_color = None if marked_delete: rsc_state_color = Color.RED rsc_state = "DELETING" elif apiconsts.FLAG_RSC_INACTIVE in rsc.flags: rsc_state = apiconsts.FLAG_RSC_INACTIVE elif rsc_state_obj: if rsc_state_obj.in_use is not None: if rsc_state_obj.in_use: rsc_usage_color = Color.GREEN rsc_usage = "InUse" else: rsc_usage = "Unused" for vlm in rsc.volumes: vlm_state = VolumeCommands.get_volume_state(rsc_state_obj.volume_states, vlm.number) \ if rsc_state_obj else None rsc_state, rsc_state_color = VolumeCommands.volume_state_cell(vlm_state, rsc.flags, vlm.flags) if apiconsts.FLAG_EVACUATE in rsc.flags: rsc_state += ", Evacuating" if rsc_state_color is not None: break # check if connections failed conns_col = "" conns_col_entries = None if rsc_state != "Unknown" and not self.get_linstorapi().api_version_smaller("1.0.15"): failed_conns = {} if rsc.layer_data.drbd_resource is not None: connections = rsc.layer_data.drbd_resource.connections for k, v in connections.items(): if not v.connected: if v.message not in failed_conns: failed_conns[v.message] = [] failed_conns[v.message].append(k) conns_col_entries = ["{s}({n})".format(s=k, n=",".join(v)) for k, v in failed_conns.items()] conns_col = tbl.color_cell(",".join(conns_col_entries), Color.RED) if conns_col_entries else "Ok" show_row = True if args.faulty: show_row = rsc_state_color is not None or not (conns_col == 'Ok' or conns_col == "") if show_row: tbl.add_row([ rsc.name, rsc.node_name, rsc_dfn_port, tbl.color_cell(rsc_usage, rsc_usage_color) if rsc_usage_color else rsc_usage, conns_col, tbl.color_cell(rsc_state, Color.RED if conns_col_entries else rsc_state_color), str(rsc.create_datetime)[:19] if rsc.create_datetime else "" ]) tbl.show() def list(self, args): lstmsg = self._linstor.resource_list( filter_by_nodes=args.nodes, filter_by_resources=args.resources, filter_by_props=args.props) return self.output_list(args, lstmsg, self.show) def list_volumes(self, args): lstmsg = self._linstor.volume_list(args.nodes, args.storage_pools, args.resources) return self.output_list(args, lstmsg, VolumeCommands.show_volumes) def show_involved(self, args, lstmsg): """ Filters involved resources and shows them with the normal resource list function. :param args: argparse options :param linstor.responses.ResourceResponse lstmsg: resource list REST answer from controller. :return: None """ args.groupby = None args.all = True args.faulty = False rsc_resp_wrp = RscRespWrapper(lstmsg) self._filter_involved_resources(rsc_resp_wrp, [args.node], args.inuse, args.diskless_inuse, args.min_diskful) self.show(args, rsc_resp_wrp) def involved(self, args): lstmsg = self._linstor.resource_list() return self.output_list(args, lstmsg, self.show_involved) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: for rsc in lstmsg.resources: result.append(rsc.properties) return result def print_props(self, args): lstmsg = self._linstor.resource_list([args.node_name], [args.resource_name]) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value)]) replies = self._linstor.resource_modify( args.node_name, args.name, mod_prop_dict['pairs'], mod_prop_dict['delete'] ) return self.handle_replies(args, replies) def drbd_peer_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] del a['node-a'] del a['node-b'] mod_props, del_props = DrbdOptions.parse_opts(a, self.CONN_OBJECT_NAME) replies = self._linstor.resource_conn_modify( args.resource_name, args.node_a, args.node_b, mod_props, del_props ) return self.handle_replies(args, replies) def toggle_disk(self, args): async_flag = vars(args)["async"] if args.diskless and args.migrate_from: args.parser.error("--migrate-from cannot be used with --diskless") replies = self._linstor.resource_toggle_disk( args.node_name, args.name, storage_pool=args.storage_pool, migrate_from=args.migrate_from, diskless=args.diskless, async_msg=async_flag ) return self.handle_replies(args, replies) def transactional_create_begin(self, args): return self._state_service.enter_state( ResourceCreateTransactionState(args.terminate_on_error), verbose=args.verbose ) def transactional_create_abort(self, _): self._state_service.pop_state() return ExitCode.OK def transactional_create_commit(self, args): async_flag = vars(args)["async"] replies = self._linstor.resource_create(self._state_service.get_state().rscs, async_flag) self._state_service.pop_state() return self.handle_replies(args, replies) def activate(self, args): replies = self.get_linstorapi().resource_activate( node_name=args.node_name, rsc_name=args.resource_name ) return self.handle_replies(args, replies) def deactivate(self, args): replies = self.get_linstorapi().resource_deactivate( node_name=args.node_name, rsc_name=args.resource_name ) return self.handle_replies(args, replies)
class ExosCommands(Commands): _exos_enclosure_headers = [ linstor_client.TableHeader("Enclosure"), linstor_client.TableHeader("Ctrl A IP"), linstor_client.TableHeader("Ctrl B IP"), linstor_client.TableHeader("Health"), linstor_client.TableHeader("Health Reason") ] _exos_map_headers = [ linstor_client.TableHeader("Node"), linstor_client.TableHeader("Enclosure"), linstor_client.TableHeader("Connected Ports") ] _exos_defaults_headers = [ linstor_client.TableHeader("Property"), linstor_client.TableHeader("Value") ] def __init__(self): super(ExosCommands, self).__init__() class GetDefaults: LONG = 'get-defaults' class SetDefaults: LONG = 'set-defaults' class Events: LONG = 'events' SHORT = 'e' class Exec: LONG = 'exec' class Map: LONG = 'map' def setup_commands(self, parser): # Exos subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.Modify, ExosCommands.SetDefaults, ExosCommands.GetDefaults, ExosCommands.Events, ExosCommands.Map, ExosCommands.Exec, ] exos_parser = parser.add_parser( Commands.EXOS, formatter_class=argparse.RawTextHelpFormatter, description='EXOS subcommands') exos_subp = exos_parser.add_subparsers( title='EXOS commands', metavar='', description=Commands.Subcommands.generate_desc(subcmds)) # get defaults p_get_dflts = exos_subp.add_parser( ExosCommands.GetDefaults.LONG, description='Lists the default configuration') p_get_dflts.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_get_dflts.set_defaults(func=self.get_dflts) # set defaults p_set_dftls = exos_subp.add_parser( ExosCommands.SetDefaults.LONG, description='Sets the default configuration for all enclosures') p_set_dftls.add_argument('--username', type=str, help='Default username') p_set_dftls.add_argument( '--username-env', type=str, help='Default environment variable containing the username') p_set_dftls.add_argument('--password', type=str, nargs='?', help='Default password', action='store', const='') p_set_dftls.add_argument( '--password-env', type=str, help='Default environment variable containing the password') p_set_dftls.add_argument('--unset-username', action='store_true', help='Unsets the default username') p_set_dftls.add_argument('--unset-username-env', action='store_true', help='Unsets the default username-env') p_set_dftls.add_argument('--unset-password-env', action='store_true', help='Unsets the default password-env') p_set_dftls.add_argument('--unset-password', action='store_true', help='Unsets the default password') p_set_dftls.set_defaults(func=self.set_dflts) # create enclosure p_new_encl = exos_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Create a new EXOS enclosure') self._add_create_mod_args(p_new_encl, True) p_new_encl.set_defaults(func=self.create_encl) # modify enclosure p_mod_encl = exos_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modifies the given EXOS enclosure') self._add_create_mod_args(p_mod_encl, False) p_mod_encl.set_defaults(func=self.modify_encl) # delete enclosure p_del_encl = exos_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Deletes the given EXOS enclosure') p_del_encl.add_argument('name', help='Name of the enclosure', type=str) p_del_encl.set_defaults(func=self.delete_encl) # list enclosures p_list_encl = exos_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Deletes the given EXOS enclosure') p_list_encl.add_argument('--nocache', help='Force recaching', action='store_true') p_list_encl.add_argument('-p', '--pastable', action='store_true', help='Generate pastable output') p_list_encl.set_defaults(func=self.list_encl) # list enclosure events p_events_encl = exos_subp.add_parser( ExosCommands.Events.LONG, aliases=[ExosCommands.Events.SHORT], description='Lists events from both given EXOS controllers') p_events_encl.add_argument('name', help='Name of the enclosure', type=str) p_events_encl.add_argument('--count', help="Fetch the last X events (default 20)", type=int) p_events_encl.set_defaults(func=self.list_encl_events) # exec p_exec = exos_subp.add_parser( ExosCommands.Exec.LONG, description="Passthrough to the EXOS API") p_exec.add_argument('name', help='Name of the enclosure', type=str) p_exec.add_argument('exos_cmd', nargs='+', type=str) p_exec.set_defaults(func=self.exos_exec) # map p_map = exos_subp.add_parser( ExosCommands.Map.LONG, description='Lists to which Exos controller.ports each Linstor \ node is connected') p_map.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_map.set_defaults(func=self.exos_map) self.check_subcommands(exos_subp, subcmds) def _add_create_mod_args(self, sub_parser, create=True): sub_parser.add_argument('name', help='Name of the enclosure', type=str) sub_parser.add_argument( 'ctrl_a_ip' if create else '--ctrl-a-ip', help='IP address of the first EXOS controller', # nargs = 1 if create else '?', type=str) sub_parser.add_argument( 'ctrl_b_ip' if create else '--ctrl-b-ip', help='IP address of the second EXOS controller', nargs='?' if create else 1, type=str) sub_parser.add_argument('--username', help='Username for this EXOS enclosure', type=str) sub_parser.add_argument( '--username-env', help='Environment variable containing the username for this EXOS \ enclosure', type=str) sub_parser.add_argument('--password', help='Password for this EXOS enclosure', nargs='?', type=str) sub_parser.add_argument( '--password-env', help='Environment variable containing the username for this EXOS \ enclosure', type=str) def get_dflts(self, args): exos_dflt = self.get_linstorapi().exos_get_defaults() return self.output_list(args, exos_dflt, self.show_exos_dflts) @classmethod def show_exos_dflts(cls, args, exos_dflts): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) header = list(cls._exos_defaults_headers) for hdr in header: tbl.add_header(hdr) # tbl.set_groupby([tbl.header_name(0)]) tbl.add_row(cls._get_row("Username", exos_dflts.username)) tbl.add_row(cls._get_row("UsernameEnv", exos_dflts.username_env)) tbl.add_row(cls._get_row("Password", exos_dflts.password)) tbl.add_row(cls._get_row("PasswordEnv", exos_dflts.password_env)) tbl.show() @classmethod def _get_row(cls, key, value): return [key, value if value else '-- Not set --'] def set_dflts(self, args): unset = [] if args.unset_username: unset += ["username"] if args.unset_username_env: unset += ["usernameEnv"] if args.unset_password: unset += ["password"] if args.unset_password_env: unset += ["passwordEnv"] replies = self.get_linstorapi().exos_set_defaults( args.username, args.username_env, self._get_password(args), args.password_env, unset) return self.handle_replies(args, replies) def create_encl(self, args): replies = self.get_linstorapi().exos_enclosure_create( args.name, args.ctrl_a_ip, args.ctrl_b_ip if args.ctrl_b_ip else None, args.username, args.username_env, self._get_password(args), args.password_env) return self.handle_replies(args, replies) def modify_encl(self, args): replies = self.get_linstorapi().exos_enclosure_modify( args.name, args.ctrl_a_ip, args.ctrl_b_ip if args.ctrl_b_ip else None, args.username, args.username_env, self._get_password(args), args.password_env) return self.handle_replies(args, replies) def delete_encl(self, args): replies = self.get_linstorapi().exos_enclosure_delete(args.name) return self.handle_replies(args, replies) def list_encl(self, args): list_msg = self.get_linstorapi().exos_list_enclosures(args.nocache) return self.output_list(args, list_msg, self.show_enclosures) @classmethod def show_enclosures(cls, args, list_msg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) header = list(cls._exos_enclosure_headers) for hdr in header: tbl.add_header(hdr) tbl.set_groupby([tbl.header_name(0)]) health_colors_dict = {"OK": Color.GREEN} for encl in list_msg.exos_enclosures: if encl.health in health_colors_dict: health_color = health_colors_dict[encl.health] else: health_color = Color.RED row = [ encl.name, encl.ctrl_a_ip if encl.ctrl_a_ip else "-", encl.ctrl_b_ip if encl.ctrl_b_ip else "-", tbl.color_cell(encl.health, health_color), encl.health_reason if encl.health_reason else "" ] tbl.add_row(row) tbl.show() def list_encl_events(self, args): list_msg = self.get_linstorapi().exos_enclosure_events( args.name, args.count) return self.output_list(args, list_msg, self.show_events) @classmethod def show_events(cls, args, list_msg): for i, event in enumerate(list_msg.exos_events): if i > 0: print("------") print("{}, {}, {}".format(event.severity, event.event_id, event.time_stamp)) print("Message: {}".format(event.message)) if event.additional_information != "None.": print("Additional information: {}".format( event.additional_information)) if event.recommended_action != "- No action is required.": print("Recommended Action: {}".format( event.recommended_action)) def exos_exec(self, args): replies = self.get_linstorapi().exos_exec(args.name, args.exos_cmd) if replies: print(json.dumps(replies[0].data_v1)) return ExitCode.OK def exos_map(self, args): list_msg = self.get_linstorapi().exos_map() return self.output_list(args, list_msg, self.show_map) @classmethod def show_map(cls, args, list_msg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) header = list(cls._exos_map_headers) for hdr in header: tbl.add_header(hdr) tbl.set_groupby([tbl.header_name(0)]) for con_map in list_msg.exos_connections: row = [ con_map.node_name, con_map.enclosure_name, ", ".join(con_map.connections) ] tbl.add_row(row) tbl.show() def _get_password(self, args): if args.password is None: return None elif args.password: return args.password else: return getpass.getpass("Password: ")
class ResourceGroupCommands(Commands): OBJECT_NAME = 'resource-definition' # resource-definition is used here for properties _rsc_grp_headers = [ linstor_client.TableHeader("ResourceGroup"), linstor_client.TableHeader("SelectFilter"), linstor_client.TableHeader("VlmNrs"), linstor_client.TableHeader("Description") ] def __init__(self): super(ResourceGroupCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.Modify, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions, Commands.Subcommands.Spawn, Commands.Subcommands.QueryMaxVlmSize, Commands.Subcommands.Adjust ] # Resource group subcommands res_grp_parser = parser.add_parser( Commands.RESOURCE_GRP, aliases=["rg"], formatter_class=argparse.RawTextHelpFormatter, description="Resource group subcommands") res_grp_subp = res_grp_parser.add_subparsers( title="resource group subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # ------------ CREATE START p_new_res_grp = res_grp_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a Linstor resource group for use with linstor.' ) p_new_res_grp.add_argument('-d', '--description', help="Description for the resource group.") self.add_auto_select_argparse_arguments(p_new_res_grp, use_place_count=True) p_new_res_grp.add_argument('name', type=str, help='Name of the resource group.') p_new_res_grp.set_defaults(func=self.create) # ------------ CREATE END # ------------ MODIFY START p_mod_res_grp = res_grp_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modifies a Linstor resource group') p_mod_res_grp.add_argument('-d', '--description', help="Description for the resource group.") self.add_auto_select_argparse_arguments(p_mod_res_grp, use_place_count=True) p_mod_res_grp.add_argument('name', help='Name of the resource group' ).completer = self.resource_grp_completer p_mod_res_grp.set_defaults(func=self.modify) # ------------ MODIFY END # ------------ DELETE START p_rm_res_grp = res_grp_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=" Removes a resource group from the linstor cluster.") p_rm_res_grp.add_argument('name', help='Name of the resource group to delete' ).completer = self.resource_grp_completer p_rm_res_grp.set_defaults(func=self.delete) # ------------ DELETE END # ------------ LIST START rsc_grp_groupby = [x.name.lower() for x in self._rsc_grp_headers] rsc_grp_group_completer = Commands.show_group_completer( rsc_grp_groupby, "groupby") p_lrscgrps = res_grp_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource groups known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lrscgrps.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lrscgrps.add_argument( '-g', '--groupby', nargs='+', choices=rsc_grp_groupby, type=str.lower).completer = rsc_grp_group_completer p_lrscgrps.add_argument('-r', '--resource-groups', nargs='+', type=str, help='Filter by list of resource groups' ).completer = self.resource_grp_completer p_lrscgrps.add_argument('--props', nargs='+', type=str, help='Filter list by object properties') p_lrscgrps.set_defaults(func=self.list) # ------------ LIST END # ------------ LISTPROPS START p_sp = res_grp_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given resource group.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'name', help="Resource group for which to print the properties" ).completer = self.resource_grp_completer p_sp.set_defaults(func=self.print_props) # ------------ LISTPROPS END # ------------ SETPROPS START p_setprop = res_grp_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given resource group.') p_setprop.add_argument('name', type=str, help='Name of the resource group' ).completer = self.resource_grp_completer Commands.add_parser_keyvalue(p_setprop, self.OBJECT_NAME) p_setprop.set_defaults(func=self.set_props) # ------------ SETPROPS END # ------------ SETDRBDOPTS START p_drbd_opts = res_grp_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description=DrbdOptions.description("resource")) p_drbd_opts.add_argument( 'name', type=str, help="Resource group name").completer = self.resource_grp_completer DrbdOptions.add_arguments(p_drbd_opts, self.OBJECT_NAME) p_drbd_opts.set_defaults(func=self.set_drbd_opts) # ------------ SETDRBDOPTS END # ------------ SPAWN START p_spawn = res_grp_subp.add_parser( Commands.Subcommands.Spawn.LONG, aliases=[Commands.Subcommands.Spawn.SHORT], description= "Spawns new resource with the settings of the resource group.") p_spawn.add_argument('-p', '--partial', action='store_true', help="Allow mismatching volume sizes.") p_spawn.add_argument( '-d', '--definition-only', action='store_true', help="Do not auto-place resource, only create definitions") p_spawn.add_argument('resource_group_name', help="Resource group name to spawn from." ).completer = self.resource_grp_completer p_spawn.add_argument( 'resource_definition_name', help= "New Resource definition name to create. Will be ignored if EXTERNAL_NAME is set." ) p_spawn.add_argument('--external-name', type=str, help="User specified name") p_spawn.add_argument('volume_sizes', nargs='*') p_spawn.set_defaults(func=self.spawn) # ------------ SPAWN END # ------------ QMVS START p_qmvs = res_grp_subp.add_parser( Commands.Subcommands.QueryMaxVlmSize.LONG, aliases=[Commands.Subcommands.QueryMaxVlmSize.SHORT], description="Queries maximum volume size for a given resource-group" ) p_qmvs.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_qmvs.add_argument( 'resource_group_name', help="Resource group name to read auto-config settings from" ).completer = self.resource_grp_completer p_qmvs.set_defaults(func=self.qmvs) # ------------ QMVS END # ------------ ADJUST START p_adjust = res_grp_subp.add_parser( Commands.Subcommands.Adjust.LONG, aliases=[Commands.Subcommands.Adjust.SHORT], description= "Adjusts all resource-definition of the given resource-group.\n" "CAUTION: This operation might take a long time and even exceed the default 5 min timeout!" ) p_adjust.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_adjust.add_argument( 'resource_group_name', nargs="?", default=None, help= "Resource group to adjust. If omitted, all resource groups will be adjusted" ).completer = self.resource_grp_completer # p_adjust.add_argument('-o', '--overprovision', # help='Multiplier of thin storage pool\'s free space. Default: 2.0') p_adjust.set_defaults(func=self.adjust) # ------------ ADJUST END self.check_subcommands(res_grp_subp, subcmds) def create(self, args): replies = self._linstor.resource_group_create( args.name, description=args.description, place_count=args.place_count, storage_pool=args.storage_pool, do_not_place_with=self.prepare_argparse_list( args.do_not_place_with), do_not_place_with_regex=args.do_not_place_with_regex, replicas_on_same=self.prepare_argparse_list( args.replicas_on_same, linstor.consts.NAMESPC_AUXILIARY + '/'), replicas_on_different=self.prepare_argparse_list( args.replicas_on_different, linstor.consts.NAMESPC_AUXILIARY + '/'), diskless_on_remaining=self.parse_diskless_on_remaining(args), layer_list=self.prepare_argparse_list(args.layer_list), provider_list=self.prepare_argparse_list(args.providers), diskless_storage_pool=self.prepare_argparse_list( args.diskless_storage_pool)) return self.handle_replies(args, replies) def modify(self, args): replies = self._linstor.resource_group_modify( args.name, description=args.description, place_count=args.place_count, storage_pool=args.storage_pool, do_not_place_with=self.prepare_argparse_list( args.do_not_place_with), do_not_place_with_regex=args.do_not_place_with_regex, replicas_on_same=self.prepare_argparse_list( args.replicas_on_same, linstor.consts.NAMESPC_AUXILIARY + '/'), replicas_on_different=self.prepare_argparse_list( args.replicas_on_different, linstor.consts.NAMESPC_AUXILIARY + '/'), diskless_on_remaining=self.parse_diskless_on_remaining(args), layer_list=args.layer_list, provider_list=args.providers, property_dict={}, delete_props=[], diskless_storage_pool=self.prepare_argparse_list( args.diskless_storage_pool)) return self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.resource_group_delete(args.name) return self.handle_replies(args, replies) def show(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in self._rsc_grp_headers: tbl.add_header(hdr) rsc_grps = lstmsg # type: ResourceGroupResponse tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_grp in rsc_grps.resource_groups: vlm_grps = self.get_linstorapi().volume_group_list_raise( rsc_grp.name).volume_groups row = [ rsc_grp.name, str(rsc_grp.select_filter), ",".join([str(x.number) for x in vlm_grps]), rsc_grp.description ] tbl.add_row(row) tbl.show() def list(self, args): lstmsg = [ self._linstor.resource_group_list_raise(args.resource_groups, filter_by_props=args.props) ] return self.output_list(args, lstmsg, self.show) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: for rsc_grp in lstmsg.resource_groups: result.append(rsc_grp.properties) return result def print_props(self, args): lstmsg = [ self._linstor.resource_group_list_raise( filter_by_resource_groups=[args.name]) ] return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value) ]) replies = self._linstor.resource_group_modify( args.name, property_dict=mod_prop_dict['pairs'], delete_props=mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['name'] # remove resource group key mod_props, del_props = DrbdOptions.parse_opts(a, self.OBJECT_NAME) replies = self._linstor.resource_group_modify(args.name, property_dict=mod_props, delete_props=del_props) return self.handle_replies(args, replies) def spawn(self, args): replies = self.get_linstorapi().resource_group_spawn( args.resource_group_name, args.resource_definition_name, vlm_sizes=args.volume_sizes, partial=args.partial, definitions_only=args.definition_only, external_name=args.external_name) return self.handle_replies(args, replies) def qmvs(self, args): replies = self.get_linstorapi().resource_group_qmvs( args.resource_group_name) api_responses = self.get_linstorapi().filter_api_call_response(replies) if api_responses: return self.handle_replies(args, api_responses) return self.output_list(args, replies, self._show_query_max_volume) def adjust(self, args): replies = self.get_linstorapi().resource_group_adjust( args.resource_group_name # args.overprovision ) return self.handle_replies(args, replies)
class VolumeGroupCommands(Commands): OBJECT_NAME = 'volume-definition' _vlm_grp_headers = [linstor_client.TableHeader("VolumeNr")] def __init__(self): super(VolumeGroupCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions ] # volume group subcommands vlm_grp_parser = parser.add_parser( Commands.VOLUME_GRP, aliases=["vg"], formatter_class=argparse.RawTextHelpFormatter, description="Resource definition subcommands") vlm_grp_subp = vlm_grp_parser.add_subparsers( title="resource definition subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # ------------ CREATE START p_new_vlm_grp = vlm_grp_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a Linstor volume group for use with linstor.') p_new_vlm_grp.add_argument('name', type=str, help='Name of the resource group.') p_new_vlm_grp.add_argument('-n', '--vlmnr', type=int) p_new_vlm_grp.set_defaults(func=self.create) # ------------ CREATE END # ------------ DELETE START p_rm_vlm_grp = vlm_grp_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=" Removes a volume group from the linstor cluster.") p_rm_vlm_grp.add_argument('name', help='Name of the resource group' ).completer = self.resource_grp_completer p_rm_vlm_grp.add_argument('volume_nr', type=int, help="Volume number to delete.") p_rm_vlm_grp.set_defaults(func=self.delete) # ------------ DELETE END # ------------ LIST START vlm_grp_groupby = [x.name for x in self._vlm_grp_headers] vlm_grp_group_completer = Commands.show_group_completer( vlm_grp_groupby, "groupby") p_lvlmgrps = vlm_grp_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description= 'Prints a list of all volume groups for a resource group known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lvlmgrps.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lvlmgrps.add_argument( '-g', '--groupby', nargs='+', choices=vlm_grp_groupby).completer = vlm_grp_group_completer p_lvlmgrps.add_argument('-R', '--resources', nargs='+', type=str, help='Filter by list of resource groups' ).completer = self.resource_grp_completer p_lvlmgrps.add_argument('name', help="Resource group name.") p_lvlmgrps.set_defaults(func=self.list) # ------------ LIST END # ------------ LISTPROPS START p_sp = vlm_grp_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given volume group.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'name', help="Resource group for which to print the properties" ).completer = self.resource_grp_completer p_sp.add_argument('volume_nr', type=int, help="Volume number") p_sp.set_defaults(func=self.print_props) # ------------ LISTPROPS END # ------------ SETPROPS START p_setprop = vlm_grp_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given volume group.') p_setprop.add_argument('name', type=str, help='Name of the resource group') p_setprop.add_argument('volume_nr', type=int, help="Volume number") Commands.add_parser_keyvalue(p_setprop, self.OBJECT_NAME) p_setprop.set_defaults(func=self.set_props) # ------------ SETPROPS END # ------------ SETDRBDOPTS START p_drbd_opts = vlm_grp_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description=DrbdOptions.description("resource")) p_drbd_opts.add_argument( 'name', type=str, help="Resource group name").completer = self.resource_grp_completer p_drbd_opts.add_argument('volume_nr', type=int, help="Volume number") DrbdOptions.add_arguments(p_drbd_opts, self.OBJECT_NAME) p_drbd_opts.set_defaults(func=self.set_drbd_opts) # ------------ SETDRBDOPTS END self.check_subcommands(vlm_grp_subp, subcmds) def create(self, args): replies = self._linstor.volume_group_create(args.name, volume_nr=args.vlmnr) return self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.volume_group_delete(args.name, args.volume_nr) return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): vlm_grps = lstmsg # type: linstor.responses.VolumeGroupResponse tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in cls._vlm_grp_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for vlm_grp in vlm_grps.volume_groups: row = [str(vlm_grp.number)] tbl.add_row(row) tbl.show() def list(self, args): lstmsg = [self._linstor.volume_group_list_raise(args.name)] return self.output_list(args, lstmsg, self.show) @classmethod def _props_list(cls, args, lstmsg): """ :param args: :param linstor.responses.VolumeGroupResponse lstmsg: :return: """ result = [] if lstmsg: for vlm_grp in lstmsg.volume_groups: if vlm_grp.number == args.volume_nr: result.append(vlm_grp.properties) break return result def print_props(self, args): lstmsg = [self._linstor.volume_group_list_raise(args.name)] return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.volume_group_modify(args.name, args.volume_nr, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['name'] # remove resource group key del a['volume-nr'] mod_props, del_props = DrbdOptions.parse_opts(a, self.OBJECT_NAME) replies = self._linstor.volume_group_modify(args.name, args.volume_nr, mod_props, del_props) return self.handle_replies(args, replies)
class NodeCommands(Commands): DISKLESS_STORAGE_POOL = 'DfltDisklessStorPool' DISKLESS_RESOURCE_NAME = 'diskless resource' _node_headers = [ linstor_client.TableHeader("Node"), linstor_client.TableHeader("NodeType"), linstor_client.TableHeader("IPs"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] def __init__(self): super(NodeCommands, self).__init__() def setup_commands(self, parser): # Node subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.Lost, Commands.Subcommands.Describe, Commands.Subcommands.Interface, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties ] node_parser = parser.add_parser( Commands.NODE, aliases=["n"], formatter_class=argparse.RawTextHelpFormatter, description="Node subcommands") node_subp = node_parser.add_subparsers( title="Node commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # create node p_new_node = node_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description= 'Creates a node entry for a node that participates in the ' 'linstor cluster.') p_new_node.add_argument( '-p', '--port', type=rangecheck(1, 65535), help='default: Satellite %s for %s; Controller %s for %s; %s for %s' % (DFLT_STLT_PORT_PLAIN, VAL_NETCOM_TYPE_PLAIN, DFLT_CTRL_PORT_PLAIN, VAL_NETCOM_TYPE_PLAIN, DFLT_CTRL_PORT_SSL, VAL_NETCOM_TYPE_SSL)) ntype_def = VAL_NODE_TYPE_STLT p_new_node.add_argument('--node-type', choices=(VAL_NODE_TYPE_CTRL, VAL_NODE_TYPE_AUX, VAL_NODE_TYPE_CMBD, VAL_NODE_TYPE_STLT), default=VAL_NODE_TYPE_STLT, help='Node type (default: %s)' % ntype_def) ctype_def = VAL_NETCOM_TYPE_PLAIN p_new_node.add_argument( '--communication-type', choices=(VAL_NETCOM_TYPE_PLAIN, VAL_NETCOM_TYPE_SSL), default=ctype_def, help='Communication type (default: %s)' % ctype_def) itype_def = VAL_NETIF_TYPE_IP p_new_node.add_argument('--interface-type', choices=(VAL_NETIF_TYPE_IP, ), default=itype_def, help='Interface type (default: %s)' % itype_def) iname_def = 'default' p_new_node.add_argument('--interface-name', default=iname_def, help='Interface name (default: %s)' % iname_def) p_new_node.add_argument( 'name', help='Name of the new node, must match the nodes hostname', type=namecheck(NODE_NAME)) p_new_node.add_argument( 'ip', help='IP address of the new node').completer = ip_completer("name") p_new_node.set_defaults(func=self.create) #describe-node p_desc_node = node_subp.add_parser( Commands.Subcommands.Describe.LONG, aliases=[Commands.Subcommands.Describe.SHORT], description= 'describe a node (or all nodes), list storage pools, resources and volumes under this node, ' 'in this order') p_desc_node.add_argument( 'name', nargs='?', help= 'Name of the node to be described. With no name, all nodes are described' ).completer = self.node_completer p_desc_node.set_defaults(func=self.describe) # remove-node p_rm_node = node_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Removes a node from the linstor cluster. ' 'All linstor resources that are still deployed on the specified ' 'node are marked for undeployment, and the node entry is marked for ' "removal from linstor's data tables. The specified node is " 'expected to undeploy all resources. As soon as all resources have been ' 'undeployed from the node, the node entry is removed from ' "linstor's data tables.") p_rm_node.add_argument( '-q', '--quiet', action="store_true", help= 'Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.' ) p_rm_node.add_argument( 'name', help='Name of the node to remove').completer = self.node_completer p_rm_node.set_defaults(func=self.delete) # lost-node p_lost_node = node_subp.add_parser( Commands.Subcommands.Lost.LONG, aliases=[Commands.Subcommands.Lost.SHORT], description= 'Removes an unrecoverable node from the linstor cluster. ' 'All linstor resources attached to this node will be deleted from the database.' ) p_lost_node.add_argument( 'name', help='Name of the node to delete.').completer = self.node_completer p_lost_node.set_defaults(func=self.lost) # Interface commands netif_subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Modify, Commands.Subcommands.Delete ] interface_parser = node_subp.add_parser( Commands.Subcommands.Interface.LONG, formatter_class=argparse.RawTextHelpFormatter, aliases=[Commands.Subcommands.Interface.SHORT], description="%s subcommands" % Commands.Subcommands.Interface.LONG) interface_subp = interface_parser.add_subparsers( title="%s subcommands" % Commands.Subcommands.Interface.LONG, metavar="", description=Commands.Subcommands.generate_desc(netif_subcmds)) # create net interface p_create_netinterface = interface_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description= 'Creates and adds a new netinterface to a given node. If port is specified this netinterface ' 'is used as satellite port') p_create_netinterface.add_argument( '-p', '--port', type=rangecheck(1, 65535), help='Port to use for satellite connections') p_create_netinterface.add_argument( '--communication-type', choices=(VAL_NETCOM_TYPE_PLAIN, VAL_NETCOM_TYPE_SSL), default=ctype_def, help='Communication type (default: %s)' % ctype_def) p_create_netinterface.add_argument( "node_name", help="Name of the node to add the net interface" ).completer = self.node_completer p_create_netinterface.add_argument("interface_name", help="Interface name") p_create_netinterface.add_argument( 'ip', help='New IP address for the network interface') p_create_netinterface.set_defaults(func=self.create_netif) # modify net interface p_mod_netif = interface_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description= 'Change the ip listen address of a netinterface on the given node.' ) p_mod_netif.add_argument('-p', '--port', type=rangecheck(1, 65535), help='Port to use for satellite connections') p_mod_netif.add_argument( '--communication-type', choices=(VAL_NETCOM_TYPE_PLAIN, VAL_NETCOM_TYPE_SSL), default=ctype_def, help='Communication type (default: %s)' % ctype_def) p_mod_netif.add_argument( "node_name", help="Name of the node").completer = self.node_completer p_mod_netif.add_argument( "interface_name", help="Interface name to change").completer = self.netif_completer p_mod_netif.add_argument( 'ip', help='New IP address for the network interface') p_mod_netif.set_defaults(func=self.modify_netif) # delete net interface p_delete_netinterface = interface_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Delete a netinterface from a node.') p_delete_netinterface.add_argument( "node_name", help="Name of the node to remove the net interface" ).completer = self.node_completer p_delete_netinterface.add_argument( "interface_name", nargs='+', help="Interface name").completer = self.netif_completer p_delete_netinterface.set_defaults(func=self.delete_netif) # list nodes node_groupby = [x.name for x in self._node_headers] node_group_completer = Commands.show_group_completer( node_groupby, "groupby") p_lnodes = node_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all cluster nodes known to linstor. ' 'By default, the list is printed as a human readable table.') p_lnodes.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lnodes.add_argument( '-g', '--groupby', nargs='+', choices=node_groupby).completer = node_group_completer p_lnodes.add_argument( '-N', '--nodes', nargs='+', type=namecheck(NODE_NAME), help='Filter by list of nodes').completer = self.node_completer p_lnodes.set_defaults(func=self.list) # list netinterface p_lnetif = interface_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of netinterfaces from a node.') p_lnetif.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lnetif.add_argument( 'node_name', help='Node name for which to print the net interfaces' ).completer = self.node_completer p_lnetif.set_defaults(func=self.list_netinterfaces) # show properties p_sp = node_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given node.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument('node_name', help="Node for which to print the properties" ).completer = self.node_completer p_sp.set_defaults(func=self.print_props) # set properties p_setp = node_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], description="Set a property on the given node.") p_setp.add_argument('node_name', help="Node for which to set the property" ).completer = self.node_completer Commands.add_parser_keyvalue(p_setp, "node") p_setp.set_defaults(func=self.set_props) self.check_subcommands(interface_subp, netif_subcmds) self.check_subcommands(node_subp, subcmds) def create(self, args): replies = self._linstor.node_create(args.name, args.node_type, args.ip, args.communication_type, args.port, args.interface_name) return self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.node_delete(args.name) return self.handle_replies(args, replies) def lost(self, args): replies = self._linstor.node_lost(args.name) return self.handle_replies(args, replies) @classmethod def show_nodes(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in cls._node_headers: tbl.add_header(hdr) conn_stat_dict = { apiconsts.CONN_STATUS_OFFLINE: ("OFFLINE", Color.RED), apiconsts.CONN_STATUS_CONNECTED: ("Connected", Color.YELLOW), apiconsts.CONN_STATUS_ONLINE: ("Online", Color.GREEN), apiconsts.CONN_STATUS_VERSION_MISMATCH: ("OFFLINE(VERSION MISMATCH)", Color.RED), apiconsts.CONN_STATUS_FULL_SYNC_FAILED: ("OFFLINE(FULL SYNC FAILED)", Color.RED), apiconsts.CONN_STATUS_AUTHENTICATION_ERROR: ("OFFLINE(AUTHENTICATION ERROR)", Color.RED), apiconsts.CONN_STATUS_UNKNOWN: ("Unknown", Color.YELLOW) } tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) node_list = [x for x in lstmsg.nodes if x.name in args.nodes] if args.nodes else lstmsg.nodes for n in node_list: ips = [if_.address for if_ in n.net_interfaces] conn_stat = conn_stat_dict[n.connection_status] tbl.add_row([ n.name, n.type, ",".join(ips), tbl.color_cell(conn_stat[0], conn_stat[1]) ]) tbl.show() def list(self, args): lstmsg = self._linstor.node_list() return self.output_list(args, lstmsg, self.show_nodes) def describe(self, args=None): """ describe the details of a node It will construct a 4 level tree and print it. The four levels are: node, storage pool, resource, and volume :param args: :return: """ try: node_list_replies = self._linstor.node_list() self.check_list_sanity(args, node_list_replies) node_map = self.construct_node(node_list_replies[0].proto_msg) storage_pool_list_replies = self._linstor.storage_pool_list() self.check_list_sanity(args, storage_pool_list_replies) self.construct_storpool(node_map, storage_pool_list_replies[0].proto_msg) rsc_dfn_list_replies = self._linstor.resource_dfn_list() self.check_list_sanity(args, rsc_dfn_list_replies) volume_def_map = self.get_volume_size( rsc_dfn_list_replies[0].proto_msg) rsc_list_replies = self._linstor.resource_list() self.check_list_sanity(args, rsc_list_replies) self.construct_rsc(node_map, rsc_list_replies[0].proto_msg, volume_def_map) outputted = False machine_data = [] for node_name_key in sorted(node_map.keys()): if outputted: print("") if args.name == node_name_key or not args.name: node = node_map[node_name_key] machine_data.append(node.to_data()) if not args.machine_readable: node.print_node(args.no_utf8, args.no_color) outputted = True if args.machine_readable: print(self._to_json(machine_data)) elif not outputted and args.name: sys.stderr.write('%s: no such node\n' % args.name) return ExitCode.OBJECT_NOT_FOUND except LinstorClientError as lce: return lce.exit_code return ExitCode.OK def check_list_sanity(self, args, replies): if replies: if self.check_for_api_replies(replies): rc = self.handle_replies(args, replies) raise LinstorClientError("List reply error", rc) return True @classmethod def get_volume_size(cls, rsc_dfn_list): """ Constructs a map of minor numbers to volume sizes. :param rsc_dfn_list: Protobuf definition list :return: the created minor number to volume size map. :rtype: dict[int, int] """ volume_def_map = {} # type dict[int, int] for rsc_dfn in rsc_dfn_list.rsc_dfns: for vlmdfn in rsc_dfn.vlm_dfns: volume_def_map[vlmdfn.vlm_minor] = vlmdfn.vlm_size return volume_def_map def make_volume_node(self, vlm, volume_def_map): volume_node = TreeNode('volume' + str(vlm.vlm_nr), '', Color.DARKGREEN) volume_node.set_description('minor number: ' + str(vlm.vlm_minor_nr)) volume_node.add_description(', size: ' + str( SizeCalc.approximate_size_string(volume_def_map[vlm.vlm_minor_nr])) ) return volume_node def construct_rsc(self, node_map, rsc_list, volume_map): for rsc in rsc_list.resources: vlm_by_storpool = collections.defaultdict(list) for vlm in rsc.vlms: vlm_by_storpool[vlm.stor_pool_name].append(vlm) for (storpool_name, vlms) in vlm_by_storpool.items(): rsc_node = TreeNode(rsc.name, '', Color.BLUE) rsc_node.set_description('resource') if storpool_name == self.DISKLESS_STORAGE_POOL: storpool_node = node_map[rsc.node_name].find_child( self.DISKLESS_RESOURCE_NAME) if not storpool_node: storpool_node = TreeNode(self.DISKLESS_RESOURCE_NAME, '', Color.PINK) storpool_node.set_description( 'resources may reside on other nodes') node_map[rsc.node_name].add_child(storpool_node) else: storpool_node = node_map[rsc.node_name].find_child( storpool_name) for vlm in vlms: rsc_node.add_child(self.make_volume_node(vlm, volume_map)) storpool_node.add_child(rsc_node) def construct_storpool(self, node_map, storage_pool_list): for storpool in storage_pool_list.stor_pools: storpool_node = TreeNode(storpool.stor_pool_name, '', Color.PINK) storpool_node.set_description('storage pool') node_map[storpool.node_name].add_child(storpool_node) @classmethod def construct_node(cls, node_list): """ Constructs a dict of node names to TreeNodes :param node_list: :return: :rtype: dict[str, TreeNode] """ node_map = {} for n in node_list.nodes: root_node = TreeNode(n.name, '', Color.RED) root_node.set_description('node') node_map[n.name] = root_node return node_map @classmethod def show_netinterfaces(cls, args, lstnodes): node = NodeCommands.find_node(lstnodes, args.node_name) if node: tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_column(node.name, color=Color.GREEN) tbl.add_column("NetInterface") tbl.add_column("IP") for netif in node.net_interfaces: tbl.add_row(["+", netif.name, netif.address]) tbl.show() else: raise LinstorClientError( "Node '{n}' not found on controller.".format(n=args.node_name), ExitCode.OBJECT_NOT_FOUND) def list_netinterfaces(self, args): lstnodes = self._linstor.node_list() return self.output_list(args, lstnodes, self.show_netinterfaces) @classmethod def _props_list(cls, args, lstmsg): result = [] node = NodeCommands.find_node(lstmsg, args.node_name) if node: result.append(node.props) else: raise LinstorClientError( "Node '{n}' not found on controller.".format(n=args.node_name), ExitCode.OBJECT_NOT_FOUND) return result def print_props(self, args): lstmsg = self._linstor.node_list() return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.node_modify(args.node_name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def create_netif(self, args): replies = self._linstor.netinterface_create(args.node_name, args.interface_name, args.ip, args.port, args.communication_type) return self.handle_replies(args, replies) def modify_netif(self, args): replies = self._linstor.netinterface_modify(args.node_name, args.interface_name, args.ip, args.port, args.communication_type) return self.handle_replies(args, replies) def delete_netif(self, args): # execute delete netinterfaces and flatten result list replies = [ x for subx in args.interface_name for x in self._linstor.netinterface_delete(args.node_name, subx) ] return self.handle_replies(args, replies)
class ResourceDefinitionCommands(Commands): OBJECT_NAME = 'resource-definition' _rsc_dfn_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("Port"), linstor_client.TableHeader("ResourceGroup"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] def __init__(self): super(ResourceDefinitionCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.Modify, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions ] # Resource definition subcommands res_def_parser = parser.add_parser( Commands.RESOURCE_DEF, aliases=["rd"], formatter_class=argparse.RawTextHelpFormatter, description="Resource definition subcommands") res_def_subp = res_def_parser.add_subparsers( title="resource definition subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) p_new_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description= 'Defines a Linstor resource definition for use with linstor.') p_new_res_dfn.add_argument('-p', '--port', type=rangecheck(1, 65535)) p_new_res_dfn.add_argument('-e', '--external-name', type=str, help='User specified name.') # p_new_res_dfn.add_argument('-s', '--secret', type=str) p_new_res_dfn.add_argument( '-l', '--layer-list', type=self.layer_data_check, help="Comma separated layer list, order is from right to left. " "This means the top most layer is on the left. " "Possible layers are: " + ",".join(linstor.Linstor.layer_list())) p_new_res_dfn.add_argument('--peer-slots', type=rangecheck(1, 31), help='(DRBD) peer slots for new resources') p_new_res_dfn.add_argument( '--resource-group', help="Attach the resource definition to this resource group" ).completer = self.resource_grp_completer p_new_res_dfn.add_argument( 'name', nargs="?", type=str, help= 'Name of the new resource definition. Will be ignored if EXTERNAL_NAME is set.' ) p_new_res_dfn.set_defaults(func=self.create) # modify-resource definition p_mod_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modifies a Linstor resource definition') p_mod_res_dfn.add_argument('--peer-slots', type=rangecheck(1, 31), help='(DRBD) peer slots for new resources') p_mod_res_dfn.add_argument('name', help='Name of the resource definition' ).completer = self.resource_dfn_completer p_mod_res_dfn.set_defaults(func=self.modify) # remove-resource definition # TODO description p_rm_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=" Removes a resource definition " "from the linstor cluster. The resource is undeployed from all nodes " "and the resource entry is marked for removal from linstor's data " "tables. After all nodes have undeployed the resource, the resource " "entry is removed from linstor's data tables.") p_rm_res_dfn.add_argument( '--async', action='store_true', help= 'Do not wait for actual deletion on satellites before returning') p_rm_res_dfn.add_argument('name', nargs="+", help='Name of the resource to delete' ).completer = self.resource_dfn_completer p_rm_res_dfn.set_defaults(func=self.delete) rsc_dfn_groupby = [x.name for x in self._rsc_dfn_headers] rsc_dfn_group_completer = Commands.show_group_completer( rsc_dfn_groupby, "groupby") p_lrscdfs = res_def_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource definitions known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lrscdfs.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lrscdfs.add_argument( '-g', '--groupby', nargs='+', choices=rsc_dfn_groupby).completer = rsc_dfn_group_completer p_lrscdfs.add_argument('-r', '--resource-definitions', nargs='+', type=str, help='Filter by list of resource definitions' ).completer = self.resource_dfn_completer p_lrscdfs.add_argument('-e', '--external-name', action="store_true", help='Show user specified name.') p_lrscdfs.set_defaults(func=self.list) # show properties p_sp = res_def_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description= "Prints all properties of the given resource definitions.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'resource_name', help="Resource definition for which to print the properties" ).completer = self.resource_dfn_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = res_def_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given resource definition.') p_setprop.add_argument('name', type=str, help='Name of the resource definition') Commands.add_parser_keyvalue(p_setprop, 'resource-definition') p_setprop.set_defaults(func=self.set_props) # drbd options p_drbd_opts = res_def_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description=DrbdOptions.description("resource")) p_drbd_opts.add_argument( 'resource_name', type=str, help="Resource name").completer = self.resource_dfn_completer DrbdOptions.add_arguments(p_drbd_opts, self.OBJECT_NAME) p_drbd_opts.set_defaults(func=self.set_drbd_opts) self.check_subcommands(res_def_subp, subcmds) def create(self, args): if not args.name and not args.external_name: raise ArgumentError( "ArgumentError: At least resource name or external name has to be specified." ) replies = self._linstor.resource_dfn_create( args.name, args.port, external_name=args.external_name if not isinstance(args.external_name, bytes) else args.external_name.decode('utf-8'), # py2-3 layer_list=args.layer_list, resource_group=args.resource_group) return self.handle_replies(args, replies) def modify(self, args): replies = self._linstor.resource_dfn_modify(args.name, {}, [], args.peer_slots) return self.handle_replies(args, replies) def delete(self, args): async_flag = vars(args)["async"] # execute delete rscdfns and flatten result list replies = [ x for subx in args.name for x in self._linstor.resource_dfn_delete(subx, async_flag) ] return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) rsc_dfn_hdr = list(cls._rsc_dfn_headers) if args.external_name: rsc_dfn_hdr.insert(1, linstor_client.TableHeader("External")) for hdr in rsc_dfn_hdr: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in lstmsg.resource_definitions: drbd_data = rsc_dfn.drbd_data row = [rsc_dfn.name] if args.external_name and isinstance(rsc_dfn.external_name, str): row.append(rsc_dfn.external_name) row.append(drbd_data.port if drbd_data else "") row.append(rsc_dfn.resource_group_name) row.append( tbl.color_cell("DELETING", Color.RED) if FLAG_DELETE in rsc_dfn.flags else tbl.color_cell("ok", Color.DARKGREEN)) tbl.add_row(row) tbl.show() def list(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=False, filter_by_resource_definitions=args.resource_definitions) return self.output_list(args, lstmsg, self.show) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: for rsc_dfn in lstmsg.resource_definitions: result.append(rsc_dfn.properties) return result def print_props(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=False, filter_by_resource_definitions=[args.resource_name]) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.resource_dfn_modify(args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] # remove resource name key mod_props, del_props = DrbdOptions.parse_opts(a, self.OBJECT_NAME) replies = self._linstor.resource_dfn_modify(args.resource_name, mod_props, del_props) return self.handle_replies(args, replies)
class StoragePoolCommands(Commands): class Lvm(object): LONG = "lvm" SHORT = "lvm" class LvmThin(object): LONG = "lvmthin" SHORT = "lvmthin" class Zfs(object): LONG = "zfs" SHORT = "zfs" class ZfsThin(object): LONG = "zfsthin" SHORT = "zfsthin" class Diskless(object): LONG = "diskless" SHORT = "diskless" class File(object): LONG = "file" SHORT = "file" class FileThin(object): LONG = "filethin" SHORT = "filethin" class SPDK(object): LONG = "spdk" SHORT = "spdk" class RemoteSPDK(object): LONG = "remotespdk" SHORT = "remotespdk" class OpenFlex(object): LONG = "openflex" SHORT = "openflex" class Exos(object): LONG = "exos" SHORT = "exos" _stor_pool_headers = [ linstor_client.TableHeader("StoragePool"), linstor_client.TableHeader("Node"), linstor_client.TableHeader("Driver"), linstor_client.TableHeader("PoolName"), linstor_client.TableHeader( "FreeCapacity", alignment_text=linstor_client.TableHeader.ALIGN_RIGHT), linstor_client.TableHeader( "TotalCapacity", alignment_text=linstor_client.TableHeader.ALIGN_RIGHT), linstor_client.TableHeader("CanSnapshots"), linstor_client.TableHeader("State"), linstor_client.TableHeader("SharedName") ] def __init__(self): super(StoragePoolCommands, self).__init__() @classmethod def _create_pool_args(cls, parser, shared_space=True, external_locking=True): parser.add_argument('node_name', type=str, help='Name of the node for the new storage pool' ).completer = cls.node_completer parser.add_argument('name', type=str, help='Name of the new storage pool') if shared_space: parser.add_argument( '--shared-space', type=str, help= 'Unique identifier of backing storage shared by multiple nodes. If omitted Linstor will assume ' 'the pool is unique for each node. When using shared volume groups with LVM2 the volume group ' 'UUID could be used as the SHARED_SPACE identifier.') if external_locking: parser.add_argument( '--external-locking', action="store_true", help='Skip Linstors internal locking for shared storage pools') def setup_commands(self, parser): # Storage pool subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties ] sp_parser = parser.add_parser( Commands.STORAGE_POOL, aliases=["sp"], formatter_class=argparse.RawTextHelpFormatter, description="Storage pool subcommands") sp_subp = sp_parser.add_subparsers( title="Storage pool commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) subcmd_create = [ StoragePoolCommands.Lvm, StoragePoolCommands.LvmThin, StoragePoolCommands.Zfs, StoragePoolCommands.ZfsThin, StoragePoolCommands.Diskless, StoragePoolCommands.File, StoragePoolCommands.FileThin, StoragePoolCommands.SPDK, StoragePoolCommands.RemoteSPDK, StoragePoolCommands.OpenFlex, StoragePoolCommands.Exos ] sp_c_parser = sp_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Defines a Linstor storage pool for use with Linstor.') create_subp = sp_c_parser.add_subparsers( title="Storage pool create commands", metavar="", description=Commands.Subcommands.generate_desc(subcmd_create)) p_new_lvm_pool = create_subp.add_parser( StoragePoolCommands.Lvm.LONG, aliases=[StoragePoolCommands.Lvm.SHORT], description='Create a lvm storage pool') self._create_pool_args(p_new_lvm_pool) p_new_lvm_pool.add_argument('driver_pool_name', type=str, help='The Lvm volume group to use.') p_new_lvm_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.LVM) p_new_spdk_pool = create_subp.add_parser( StoragePoolCommands.SPDK.LONG, aliases=[StoragePoolCommands.SPDK.SHORT], description='Create a spdk storage pool') self._create_pool_args(p_new_spdk_pool) p_new_spdk_pool.add_argument( 'driver_pool_name', type=str, help='The Spdk logical volume store to use.') p_new_spdk_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.SPDK) p_new_remote_spdk_pool = create_subp.add_parser( StoragePoolCommands.RemoteSPDK.LONG, aliases=[StoragePoolCommands.RemoteSPDK.SHORT], description='Create a remote-spdk storage pool') self._create_pool_args(p_new_remote_spdk_pool) p_new_remote_spdk_pool.add_argument( 'driver_pool_name', type=str, help='The remote Spdk logical volume store to use.') p_new_remote_spdk_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.REMOTE_SPDK) p_new_lvm_thin_pool = create_subp.add_parser( StoragePoolCommands.LvmThin.LONG, aliases=[StoragePoolCommands.LvmThin.SHORT], description='Create a lvm thin storage pool') self._create_pool_args(p_new_lvm_thin_pool) p_new_lvm_thin_pool.add_argument( 'driver_pool_name', type=str, help= 'The LvmThin volume group to use. The full name of the thin pool, namely VG/LV' ) p_new_lvm_thin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.LVMThin) p_new_zfs_pool = create_subp.add_parser( StoragePoolCommands.Zfs.LONG, aliases=[StoragePoolCommands.Zfs.SHORT], description='Create a zfs storage pool') self._create_pool_args(p_new_zfs_pool) p_new_zfs_pool.add_argument('driver_pool_name', type=str, help='The name of the zpool to use.') p_new_zfs_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.ZFS) p_new_zfsthin_pool = create_subp.add_parser( StoragePoolCommands.ZfsThin.LONG, aliases=[StoragePoolCommands.ZfsThin.SHORT], description='Create a zfs storage pool') self._create_pool_args(p_new_zfsthin_pool) p_new_zfsthin_pool.add_argument('driver_pool_name', type=str, help='The name of the zpool to use.') p_new_zfsthin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.ZFSThin) p_new_diskless_pool = create_subp.add_parser( StoragePoolCommands.Diskless.LONG, aliases=[StoragePoolCommands.Diskless.SHORT], description='Create a diskless pool') self._create_pool_args(p_new_diskless_pool, shared_space=False, external_locking=False) p_new_diskless_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.Diskless, driver_pool_name=None) p_new_file_pool = create_subp.add_parser( StoragePoolCommands.File.LONG, aliases=[StoragePoolCommands.File.SHORT], description='Create a file storage pool') self._create_pool_args(p_new_file_pool) p_new_file_pool.add_argument('driver_pool_name', type=str, help='The directory to use.') p_new_file_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.FILE) p_new_file_thin_pool = create_subp.add_parser( StoragePoolCommands.FileThin.LONG, aliases=[StoragePoolCommands.FileThin.SHORT], description='Create a file thin storage pool') self._create_pool_args(p_new_file_thin_pool) p_new_file_thin_pool.add_argument('driver_pool_name', type=str, help='The directory to use.') p_new_file_thin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.FILEThin) p_new_openflex_pool = create_subp.add_parser( StoragePoolCommands.OpenFlex.LONG, aliases=[StoragePoolCommands.OpenFlex.SHORT], description='Create an openflex storage pool') self._create_pool_args(p_new_openflex_pool, shared_space=False) p_new_openflex_pool.add_argument('driver_pool_name', type=str, help='OpenFlex pool index') p_new_openflex_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.OPENFLEX_TARGET) p_new_exos_pool = create_subp.add_parser( StoragePoolCommands.Exos.LONG, aliases=[StoragePoolCommands.Exos.SHORT], description='Create an EXOS storage pool') self._create_pool_args(p_new_exos_pool) p_new_exos_pool.add_argument('enclosure_name', type=str, help='Enclosure name') p_new_exos_pool.add_argument('pool_sn', type=str, help='Exos Pool Serial Number') p_new_exos_pool.set_defaults(func=self.create_exos, driver=linstor.StoragePoolDriver.EXOS) # END CREATE SUBCMDS # remove-storpool p_rm_storpool = sp_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=' Removes a storage pool ') p_rm_storpool.add_argument( '-q', '--quiet', action="store_true", help= 'Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.' ) p_rm_storpool.add_argument( 'node_name', nargs="+", help='Name of the Node where the storage pool exists.' ).completer = self.node_completer p_rm_storpool.add_argument('name', help='Name of the storage pool to delete' ).completer = self.storage_pool_completer p_rm_storpool.set_defaults(func=self.delete) # list storpool storpoolgroupby = [x.name.lower() for x in self._stor_pool_headers] storpool_group_completer = Commands.show_group_completer( storpoolgroupby, "groupby") p_lstorpool = sp_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all storage pool known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lstorpool.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lstorpool.add_argument( '-g', '--groupby', nargs='+', choices=storpoolgroupby, type=str.lower).completer = storpool_group_completer p_lstorpool.add_argument('-s', '--storage-pools', nargs='+', type=str, help='Filter by list of storage pools' ).completer = self.storage_pool_completer p_lstorpool.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lstorpool.add_argument('--props', nargs='+', type=str, help='Filter list by object properties') p_lstorpool.set_defaults(func=self.list) # show properties p_sp = sp_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given storage pool.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument('node_name', type=str, help='Name of the node for the storage pool' ).completer = self.node_completer p_sp.add_argument('storage_pool_name', help="Storage pool for which to print the properties" ).completer = self.storage_pool_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = sp_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description= 'Sets properties for the given storage pool on the given node.') p_setprop.add_argument('node_name', type=str, help='Name of the node for the storage pool' ).completer = self.node_completer p_setprop.add_argument('name', type=str, help='Name of the storage pool' ).completer = self.storage_pool_completer Commands.add_parser_keyvalue(p_setprop, 'storagepool') p_setprop.set_defaults(func=self.set_props) self.check_subcommands(create_subp, subcmd_create) self.check_subcommands(sp_subp, subcmds) def create(self, args): try: shrd_space = None if "shared_space" not in args else args.shared_space ext_locking = None if "external_locking" not in args else args.external_locking replies = self.get_linstorapi().storage_pool_create( args.node_name, args.name, args.driver, args.driver_pool_name, shared_space=shrd_space, external_locking=ext_locking) except linstor.LinstorError as e: raise ArgumentError(e.message) return self.handle_replies(args, replies) def create_exos(self, args): try: # no shared-space and no external locking. shared-space calculated by server, external locking not allowed props = { apiconsts.NAMESPC_EXOS + '/' + apiconsts.KEY_STOR_POOL_EXOS_ENCLOSURE: args.enclosure_name, apiconsts.NAMESPC_EXOS + '/' + apiconsts.KEY_STOR_POOL_EXOS_POOL_SN: args.pool_sn } replies = self.get_linstorapi().storage_pool_create( args.node_name, args.name, args.driver, args.enclosure_name + '_' + args.pool_sn, property_dict=props) except linstor.LinstorError as e: raise ArgumentError(e.message) return self.handle_replies(args, replies) def delete(self, args): # execute delete storpooldfns and flatten result list replies = [ x for subx in args.node_name for x in self._linstor.storage_pool_delete(subx, args.name) ] return self.handle_replies(args, replies) def show(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in self._stor_pool_headers: tbl.add_header(hdr) storage_pool_resp = lstmsg # type: StoragePoolListResponse tbl.set_groupby(args.groupby if args. groupby else [self._stor_pool_headers[0].name]) errors = [] for storpool in storage_pool_resp.storage_pools: driver_device = linstor.StoragePoolDriver.storage_props_to_driver_pool( storpool.provider_kind, storpool.properties) free_capacity = "" total_capacity = "" if not storpool.is_diskless() and storpool.free_space is not None: free_capacity = SizeCalc.approximate_size_string( storpool.free_space.free_capacity) total_capacity = SizeCalc.approximate_size_string( storpool.free_space.total_capacity) for error in storpool.reports: if error not in errors: errors.append(error) state_str, state_color = self.get_replies_state(storpool.reports) tbl.add_row([ storpool.name, storpool.node_name, storpool.provider_kind, driver_device, free_capacity, total_capacity, storpool.supports_snapshots(), tbl.color_cell(state_str, state_color), storpool.free_space_mgr_name if ':' not in storpool.free_space_mgr_name else '' ]) tbl.show() for err in errors: Output.handle_ret(err, warn_as_error=args.warn_as_error, no_color=args.no_color) def list(self, args): lstmsg = self._linstor.storage_pool_list(args.nodes, args.storage_pools, args.props) return self.output_list(args, lstmsg, self.show) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: response = lstmsg # type: StoragePoolListResponse for stor_pool in response.storage_pools: result.append(stor_pool.properties) return result def print_props(self, args): lstmsg = self._linstor.storage_pool_list([args.node_name], [args.storage_pool_name]) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value) ]) replies = self._linstor.storage_pool_modify(args.node_name, args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies)
class VolumeDefinitionCommands(Commands): OBJECT_NAME = 'volume-definition' _vlm_dfn_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("VolumeNr"), linstor_client.TableHeader("VolumeMinor"), linstor_client.TableHeader("Size"), linstor_client.TableHeader("Gross"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] VOLUME_SIZE_HELP = \ 'Size of the volume. ' \ 'Valid units: ' + SizeCalc.UNITS_LIST_STR + '. ' \ 'The default unit is GiB (2 ^ 30 bytes). ' \ 'The unit can be specified with a postfix. ' \ 'Linstor\'s internal granularity for the capacity of volumes is one ' \ 'kibibyte (2 ^ 10 bytes). The actual size used by linstor ' \ 'is the smallest natural number of kibibytes that is large enough to ' \ 'accommodate a volume of the requested size in the specified size unit.' def __init__(self): super(VolumeDefinitionCommands, self).__init__() def setup_commands(self, parser): # volume definition subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetSize, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions ] vol_def_parser = parser.add_parser( Commands.VOLUME_DEF, aliases=["vd"], formatter_class=argparse.RawTextHelpFormatter, description="Volume definition subcommands") vol_def_subp = vol_def_parser.add_subparsers( title="Volume definition commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds) ) p_new_vol = vol_def_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a volume with a capacity of size for use with ' 'linstore. If the resource resname exists already, a new volume is ' 'added to that resource, otherwise the resource is created automatically ' 'with default settings. Unless minornr is specified, a minor number for ' "the volume's DRBD block device is assigned automatically by the " 'linstor server.') p_new_vol.add_argument( '--storage-pool', '-s', type=str, help="Storage pool name to use.").completer = self.storage_pool_dfn_completer p_new_vol.add_argument('-n', '--vlmnr', type=int) p_new_vol.add_argument('-m', '--minor', type=int) p_new_vol.add_argument( '--encrypt', action="store_true", help="DEPCRECATED - use --layer-list ...,LUKS,... instead (when creating resource /-definition)") p_new_vol.add_argument('--gross', action="store_true") p_new_vol.add_argument('resource_name', type=str, help='Name of an existing resource').completer = self.resource_dfn_completer p_new_vol.add_argument( 'size', help=VolumeDefinitionCommands.VOLUME_SIZE_HELP ).completer = VolumeDefinitionCommands.size_completer p_new_vol.set_defaults(func=self.create) # remove-volume definition p_rm_vol = vol_def_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Removes a volume definition from the linstor cluster, and removes ' 'the volume definition from the resource definition. The volume is ' 'undeployed from all nodes and the volume entry is marked for removal ' "from the resource definition in linstor's data tables. After all " 'nodes have undeployed the volume, the volume entry is removed from ' 'the resource definition.') p_rm_vol.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_rm_vol.add_argument('resource_name', help='Resource name of the volume definition' ).completer = self.resource_dfn_completer p_rm_vol.add_argument( 'volume_nr', type=int, help="Volume number to delete.") p_rm_vol.set_defaults(func=self.delete) # list volume definitions vlm_dfn_groupby = [x.name.lower() for x in self._vlm_dfn_headers] vlm_dfn_group_completer = Commands.show_group_completer(vlm_dfn_groupby, "groupby") p_lvols = vol_def_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description=' Prints a list of all volume definitions known to linstor. ' 'By default, the list is printed as a human readable table.') p_lvols.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lvols.add_argument('-g', '--groupby', nargs='+', choices=vlm_dfn_groupby, type=str.lower).completer = vlm_dfn_group_completer p_lvols.add_argument('-r', '--resource-definitions', nargs='+', type=str, help='Filter by list of resource definitions').completer = self.resource_dfn_completer p_lvols.add_argument('-e', '--external-name', action="store_true", help='Show user specified name.') p_lvols.set_defaults(func=self.list) # show properties p_sp = vol_def_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given volume definition.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'resource_definition', help="Resource definition").completer = self.resource_dfn_completer p_sp.add_argument( 'volume_nr', type=int, help="Volume number") p_sp.set_defaults(func=self.print_props) # set properties p_setprop = vol_def_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given volume definition.') p_setprop.add_argument( 'resource_name', help="Resource name").completer = self.resource_dfn_completer p_setprop.add_argument( 'volume_nr', type=int, help="Volume number") Commands.add_parser_keyvalue(p_setprop, "volume-definition") p_setprop.set_defaults(func=self.set_props) p_drbd_opts = vol_def_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description=DrbdOptions.description("volume") ) p_drbd_opts.add_argument( 'resource_name', type=str, help="Resource name" ).completer = self.resource_dfn_completer p_drbd_opts.add_argument( 'volume_nr', type=int, help="Volume number" ) DrbdOptions.add_arguments(p_drbd_opts, self.OBJECT_NAME) p_drbd_opts.set_defaults(func=self.set_drbd_opts) # set size p_set_size = vol_def_subp.add_parser( Commands.Subcommands.SetSize.LONG, aliases=[Commands.Subcommands.SetSize.SHORT], description='Change the size of a volume. ' 'Decreasing the size is only supported when the resource definition does not have any ' 'resources. ' 'Increasing the size is supported even when the resource definition has resources. ' 'Filesystems present on the volumes will not be resized.') p_set_size.add_argument('resource_name', type=str, help='Name of an existing resource').completer = self.resource_dfn_completer p_set_size.add_argument( 'volume_nr', type=int, help="Volume number" ) p_set_size.add_argument( 'size', help=VolumeDefinitionCommands.VOLUME_SIZE_HELP ).completer = VolumeDefinitionCommands.size_completer p_set_size.add_argument('--gross', action="store_true") p_set_size.set_defaults(func=self.set_volume_size) self.check_subcommands(vol_def_subp, subcmds) def create(self, args): replies = self._linstor.volume_dfn_create( args.resource_name, Commands.parse_size_str(args.size), args.vlmnr, args.minor, args.encrypt, args.storage_pool, args.gross ) return self.handle_replies(args, replies) def delete(self, args): async_flag = vars(args)["async"] replies = self._linstor.volume_dfn_delete(args.resource_name, args.volume_nr, async_flag) return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) vlm_dfn_hdrs = list(cls._vlm_dfn_headers) if args.external_name: vlm_dfn_hdrs.insert(1, linstor_client.TableHeader("External")) for hdr in vlm_dfn_hdrs: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in lstmsg.resource_definitions: for vlmdfn in rsc_dfn.volume_definitions: state = tbl.color_cell("ok", Color.DARKGREEN) if FLAG_DELETE in vlmdfn.flags: state = tbl.color_cell("DELETING", Color.RED) elif FLAG_RESIZE in vlmdfn.flags: state = tbl.color_cell("resizing", Color.DARKPINK) drbd_data = vlmdfn.drbd_data tbl.add_row([ rsc_dfn.name, vlmdfn.number, drbd_data.minor if drbd_data else "", SizeCalc.approximate_size_string(vlmdfn.size), "+" if FLAG_GROSS_SIZE in vlmdfn.flags else "", state ]) tbl.show() def list(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=True, filter_by_resource_definitions=args.resource_definitions ) return self.output_list(args, lstmsg, self.show) @staticmethod def size_completer(prefix, **kwargs): choices = [unit_str for unit_str, _ in SizeCalc.UNITS_MAP.values()] m = re.match(r'(\d+)(\D*)', prefix) digits = m.group(1) unit = m.group(2) if unit and unit != "": p_units = [x for x in choices if x.startswith(unit)] else: p_units = choices return [digits + u for u in p_units] @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg and lstmsg.resource_definitions: for vlmdfn in lstmsg.resource_definitions[0].volume_definitions: if vlmdfn.number == args.volume_nr: result.append(vlmdfn.properties) break return result def print_props(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=True, filter_by_resource_definitions=[args.resource_definition] ) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value)]) replies = self._linstor.volume_dfn_modify( args.resource_name, args.volume_nr, set_properties=mod_prop_dict['pairs'], delete_properties=mod_prop_dict['delete'] ) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] # remove resource name key del a['volume-nr'] mod_props, del_props = DrbdOptions.parse_opts(a, self.OBJECT_NAME) replies = self._linstor.volume_dfn_modify( args.resource_name, args.volume_nr, set_properties=mod_props, delete_properties=del_props ) return self.handle_replies(args, replies) def set_volume_size(self, args): replies = self._linstor.volume_dfn_modify( args.resource_name, args.volume_nr, size=self.parse_size_str(args.size), gross=args.gross ) return self.handle_replies(args, replies)
class ResourceDefinitionCommands(Commands): OBJECT_NAME = 'resource-definition' _rsc_dfn_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("Port"), linstor_client.TableHeader("ResourceGroup"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] def __init__(self): super(ResourceDefinitionCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.AutoPlace, Commands.Subcommands.Modify, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions, Commands.Subcommands.Clone, Commands.Subcommands.WaitSync, ] # Resource definition subcommands res_def_parser = parser.add_parser( Commands.RESOURCE_DEF, aliases=["rd"], formatter_class=argparse.RawTextHelpFormatter, description="Resource definition subcommands") res_def_subp = res_def_parser.add_subparsers( title="resource definition subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) p_new_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description= 'Defines a Linstor resource definition for use with linstor.') p_new_res_dfn.add_argument('-p', '--port', type=rangecheck(1, 65535)) p_new_res_dfn.add_argument('-e', '--external-name', type=str, help='User specified name.') # p_new_res_dfn.add_argument('-s', '--secret', type=str) p_new_res_dfn.add_argument( '-l', '--layer-list', type=self.layer_data_check, help="Comma separated layer list, order is from right to left. " "This means the top most layer is on the left. " "Possible layers are: " + ",".join(linstor.Linstor.layer_list())) p_new_res_dfn.add_argument('--peer-slots', type=rangecheck(1, 31), help='(DRBD) peer slots for new resources') p_new_res_dfn.add_argument( '--resource-group', help="Attach the resource definition to this resource group" ).completer = self.resource_grp_completer p_new_res_dfn.add_argument( 'name', nargs="?", type=str, help= 'Name of the new resource definition. Will be ignored if EXTERNAL_NAME is set.' ) p_new_res_dfn.set_defaults(func=self.create) p_auto_place = res_def_subp.add_parser( Commands.Subcommands.AutoPlace.LONG, aliases=[Commands.Subcommands.AutoPlace.SHORT], description='Auto place a resource definition') self.add_auto_select_argparse_arguments(p_auto_place, use_place_count=True) p_auto_place.add_argument( 'resource_definition_name', help='Name of the resource definition to auto place') p_auto_place.add_argument('--nvme-initiator', action="store_true", help='Mark this resource as initiator') p_auto_place.add_argument('--drbd-diskless', action="store_true", help='Mark this resource as drbd diskless') p_auto_place.set_defaults(func=self.auto_place) # modify-resource definition p_mod_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modifies a Linstor resource definition') p_mod_res_dfn.add_argument('--peer-slots', type=rangecheck(1, 31), help='(DRBD) peer slots for new resources') p_mod_res_dfn.add_argument( '--resource-group', help='Change resource group to the given one.' ).completer = self.resource_grp_completer p_mod_res_dfn.add_argument('name', help='Name of the resource definition' ).completer = self.resource_dfn_completer p_mod_res_dfn.set_defaults(func=self.modify) # remove-resource definition p_rm_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=" Removes a resource definition " "from the linstor cluster. The resource is undeployed from all nodes " "and the resource entry is marked for removal from linstor's data " "tables. After all nodes have undeployed the resource, the resource " "entry is removed from linstor's data tables.") p_rm_res_dfn.add_argument('--async', action='store_true', help='Deprecated, kept for compatibility') p_rm_res_dfn.add_argument('name', nargs="+", help='Name of the resource to delete' ).completer = self.resource_dfn_completer p_rm_res_dfn.set_defaults(func=self.delete) rsc_dfn_groupby = [x.name.lower() for x in self._rsc_dfn_headers] rsc_dfn_group_completer = Commands.show_group_completer( rsc_dfn_groupby, "groupby") p_clone_rscdfn = res_def_subp.add_parser( Commands.Subcommands.Clone.LONG, aliases=[Commands.Subcommands.Clone.SHORT], description= "Clones a resource definition with all resources and volumes(including data)." ) p_clone_rscdfn.add_argument('-e', '--external-name', type=str, help='User specified name.') p_clone_rscdfn.add_argument('--no-wait', action="store_true", help="Wait till cloning is done.") p_clone_rscdfn.add_argument( '--wait-timeout', type=int, help="Wait this seconds for the clone to finish.") p_clone_rscdfn.add_argument( '--use-zfs-clone', action="store_true", default=None, help= "Use ZFS clone instead send/recv, but have a dependent snapshot") p_clone_rscdfn.add_argument('source_resource', help="Source resource definition name" ).completer = self.resource_dfn_completer p_clone_rscdfn.add_argument( 'clone_name', nargs="?", type=str, help='Name of the new resource definition. ' 'Will be ignored if EXTERNAL_NAME is set.') p_clone_rscdfn.set_defaults(func=self.clone) p_wait_sync = res_def_subp.add_parser( Commands.Subcommands.WaitSync.LONG, aliases=[Commands.Subcommands.WaitSync.SHORT], description= "Wait till the given resource is synced or e.g. ready to be resized." ) p_wait_sync.add_argument( '--wait-timeout', type=int, help="Wait this seconds for the clone to finish.") p_wait_sync.add_argument("resource_name", help="Resource name to be checked." ).completer = self.resource_dfn_completer p_wait_sync.set_defaults(func=self.wait_sync) p_lrscdfs = res_def_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource definitions known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lrscdfs.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lrscdfs.add_argument( '-g', '--groupby', nargs='+', choices=rsc_dfn_groupby, type=str.lower).completer = rsc_dfn_group_completer p_lrscdfs.add_argument('-r', '--resource-definitions', nargs='+', type=str, help='Filter by list of resource definitions' ).completer = self.resource_dfn_completer p_lrscdfs.add_argument('-e', '--external-name', action="store_true", help='Show user specified name.') p_lrscdfs.add_argument('--props', nargs='+', type=str, help='Filter list by object properties') p_lrscdfs.set_defaults(func=self.list) # show properties p_sp = res_def_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description= "Prints all properties of the given resource definitions.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'resource_name', help="Resource definition for which to print the properties" ).completer = self.resource_dfn_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = res_def_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Sets properties for the given resource definition.') p_setprop.add_argument('name', type=str, help='Name of the resource definition') Commands.add_parser_keyvalue(p_setprop, 'resource-definition') p_setprop.set_defaults(func=self.set_props) # drbd options p_drbd_opts = res_def_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description=DrbdOptions.description("resource")) p_drbd_opts.add_argument( 'resource_name', type=str, help="Resource name").completer = self.resource_dfn_completer DrbdOptions.add_arguments(p_drbd_opts, self.OBJECT_NAME) p_drbd_opts.set_defaults(func=self.set_drbd_opts) self.check_subcommands(res_def_subp, subcmds) def create(self, args): if not args.name and not args.external_name: raise ArgumentError( "ArgumentError: At least resource name or external name has to be specified." ) replies = self._linstor.resource_dfn_create( args.name, args.port, external_name=args.external_name if not isinstance(args.external_name, bytes) else args.external_name.decode('utf-8'), # py2-3 layer_list=args.layer_list, resource_group=args.resource_group) return self.handle_replies(args, replies) def auto_place(self, args): place_count, additional_place_count, diskless_type = self.parse_place_count_args( args, use_place_count=True) replies = self.get_linstorapi().resource_auto_place( rsc_name=args.resource_definition_name, place_count=place_count, storage_pool=args.storage_pool, do_not_place_with=args.do_not_place_with, do_not_place_with_regex=args.do_not_place_with_regex, replicas_on_same=self.prepare_argparse_list( args.replicas_on_same, linstor.consts.NAMESPC_AUXILIARY + '/'), replicas_on_different=self.prepare_argparse_list( args.replicas_on_different, linstor.consts.NAMESPC_AUXILIARY + '/'), diskless_on_remaining=self.parse_diskless_on_remaining(args), layer_list=args.layer_list, provider_list=args.providers, additional_place_count=additional_place_count, diskless_type=diskless_type) return self.handle_replies(args, replies) def clone(self, args): clone_resp = self.get_linstorapi().resource_dfn_clone( args.source_resource, args.clone_name, args.external_name, use_zfs_clone=args.use_zfs_clone) rc = self.handle_replies(args, clone_resp.messages) if rc == ExitCode.OK and not args.no_wait: if not args.machine_readable: print("Waiting for cloning to complete...") try: res = self.get_linstorapi().resource_dfn_clone_wait_complete( clone_resp.source_name, clone_resp.clone_name, timeout=args.wait_timeout) if not res: rc = ExitCode.API_ERROR if not args.machine_readable: if res: print("{msg} cloning {c}.".format( c=clone_resp.clone_name, msg=Output.color_str("Completed", Color.GREEN, args.no_color))) else: print( "{msg} cloning {c}, please check resource status or satellite errors." .format(c=clone_resp.clone_name, msg=Output.color_str( "Failed", Color.RED, args.no_color))) except linstor.LinstorApiCallError as e: rc = ExitCode.API_ERROR Output.handle_ret(e.main_error, args.no_color, False, sys.stderr) return rc def wait_sync(self, args): # this method either returns True or raises self.get_linstorapi().resource_dfn_wait_synced( args.resource_name, timeout=args.wait_timeout) return ExitCode.OK def modify(self, args): replies = self._linstor.resource_dfn_modify( args.name, {}, [], args.peer_slots, resource_group=args.resource_group) return self.handle_replies(args, replies) def delete(self, args): async_flag = vars(args)["async"] # execute delete rscdfns and flatten result list replies = [ x for subx in args.name for x in self._linstor.resource_dfn_delete(subx, async_flag) ] return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) rsc_dfn_hdr = list(cls._rsc_dfn_headers) if args.external_name: rsc_dfn_hdr.insert(1, linstor_client.TableHeader("External")) for hdr in rsc_dfn_hdr: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in lstmsg.resource_definitions: drbd_data = rsc_dfn.drbd_data row = [rsc_dfn.name] if args.external_name and isinstance(rsc_dfn.external_name, str): row.append(rsc_dfn.external_name) row.append(drbd_data.port if drbd_data else "") row.append(rsc_dfn.resource_group_name) row.append( tbl.color_cell("DELETING", Color.RED) if FLAG_DELETE in rsc_dfn.flags else tbl.color_cell("ok", Color.DARKGREEN)) tbl.add_row(row) tbl.show() def list(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=False, filter_by_resource_definitions=args.resource_definitions, filter_by_props=args.props) return self.output_list(args, lstmsg, self.show) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: for rsc_dfn in lstmsg.resource_definitions: result.append(rsc_dfn.properties) return result def print_props(self, args): lstmsg = self._linstor.resource_dfn_list( query_volume_definitions=False, filter_by_resource_definitions=[args.resource_name]) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value) ]) replies = self._linstor.resource_dfn_modify(args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] # remove resource name key mod_props, del_props = DrbdOptions.parse_opts(a, self.OBJECT_NAME) replies = self._linstor.resource_dfn_modify(args.resource_name, mod_props, del_props) return self.handle_replies(args, replies)
class AdviceCommands(Commands): _issue_headers = [ linstor_client.TableHeader("Resource"), linstor_client.TableHeader("Issue"), linstor_client.TableHeader("Possible fix"), ] class Maintenance(object): LONG = "maintenance" SHORT = "m" def __init__(self): super(AdviceCommands, self).__init__() def setup_commands(self, parser): # Node subcommands subcmds = [ Commands.Subcommands.Resource, AdviceCommands.Maintenance, ] advise_parser = parser.add_parser( Commands.ADVISE, aliases=["adv"], formatter_class=argparse.RawTextHelpFormatter, description="Advise subcommands") advise_subp = advise_parser.add_subparsers( title="Advise commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) p_resource = advise_subp.add_parser( Commands.Subcommands.Resource.LONG, aliases=[Commands.Subcommands.Resource.SHORT], description= 'Points out potential issues with the currently deployed resources.' ) p_resource.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_resource.add_argument('-r', '--resources', nargs='+', type=str, help='Filter by list of resources' ).completer = self.resource_completer p_resource.set_defaults(func=self.resource) p_maintenace = advise_subp.add_parser( AdviceCommands.Maintenance.LONG, aliases=[AdviceCommands.Maintenance.SHORT], description= 'Points out potential issues should a node go down for maintenance.' ) p_maintenace.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_maintenace.add_argument('-r', '--resources', nargs='+', type=str, help='Filter by list of resources' ).completer = self.resource_completer p_maintenace.add_argument( 'node', type=str, help='The node to check').completer = self.node_completer p_maintenace.set_defaults(func=self.maintenance) self.check_subcommands(advise_subp, subcmds) def resource(self, args): state = self._state_of_the_world() found_issues = [] rsc_to_check = args.resources or state.resource_definitions.keys() for rsc_name in rsc_to_check: r_def = state.resource_definitions[rsc_name] rg = state.resource_groups[r_def.resource_group_name or "DfltRscGrp"] r_deployed = { node: r for (node, name), r in state.resources.items() if name == rsc_name } r_states = { node: r for (node, name), r in state.resource_states.items() if name == rsc_name } found_issues.extend( _check_needless_diskless(r_def, r_deployed, r_states, rg, state.storage_pools)) found_issues.extend( _check_expected_replicas(r_def, r_deployed, r_states, rg, state.storage_pools)) if args.machine_readable: json.dump([issue.__dict__ for issue in found_issues], sys.stdout) else: tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_headers(AdviceCommands._issue_headers) for issue in found_issues: tbl.add_row([issue.resource, issue.what, issue.fix]) tbl.show() def maintenance(self, args): state = self._state_of_the_world() found_issues = [] rsc_to_check = args.resources or state.resource_definitions.keys() for rsc_name in rsc_to_check: if (args.node, rsc_name) not in state.resources: # Resource not deployed on node continue diskfull_nodes = [ node for (node, name), rsc in state.resources.items() if name == rsc_name and apiconsts.FLAG_DISKLESS not in rsc.flags ] if diskfull_nodes == [args.node]: found_issues.append( _Issue(resource=rsc_name, what=_Issue.WHAT_SINGLE_REPLICA, fix=_Issue.FIX_AUTOPLACE.format(rsc=rsc_name, count=2))) continue all_deployed = [ node for (node, name), rsc in state.resources.items() if name == rsc_name ] if len(all_deployed) < 3: # TODO: include quorum information: if explicit quorum is set, this advise is likely wrong # Also, in case the node to check is diskless, we may need to advise to add another diskfull replica. found_issues.append( _Issue( resource=rsc_name, what=_Issue.WHAT_POTENTIAL_SPLIT_BRAIN, fix=_Issue.FIX_AUTOPLACE_TIEBREAKER.format( rsc=rsc_name), )) if args.machine_readable: json.dump([issue.__dict__ for issue in found_issues], sys.stdout) else: tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_headers(AdviceCommands._issue_headers) for issue in found_issues: tbl.add_row([issue.resource, issue.what, issue.fix]) tbl.show() def _state_of_the_world(self): """ :return: Current state of all relevant resources we can give advise on. :rtype: StateOfTheWorld """ node_resp = self.get_linstorapi().node_list_raise() rg_resp = self.get_linstorapi().resource_group_list_raise() rd_resp = self.get_linstorapi().resource_dfn_list_raise() r_resp = self.get_linstorapi().resource_list_raise() sp_resp = self.get_linstorapi().storage_pool_list_raise() return StateOfTheWorld( {n.name: n for n in node_resp.nodes}, {(sp.node_name, sp.name): sp for sp in sp_resp.storage_pools}, {rg.name: rg for rg in rg_resp.resource_groups}, {rd.name: rd for rd in rd_resp.resource_definitions}, {(r.node_name, r.name): r for r in r_resp.resources}, {(r.node_name, r.name): r for r in r_resp.resource_states}, )
class ResourceDefinitionCommands(Commands): _rsc_dfn_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("Port"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] def __init__(self): super(ResourceDefinitionCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdOptions ] # Resource subcommands res_def_parser = parser.add_parser( Commands.RESOURCE_DEF, aliases=["rd"], formatter_class=argparse.RawTextHelpFormatter, description="Resource definition subcommands") res_def_subp = res_def_parser.add_subparsers( title="resource definition subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds) ) p_new_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a Linstor resource definition for use with linstor.') p_new_res_dfn.add_argument('-p', '--port', type=rangecheck(1, 65535)) # p_new_res_dfn.add_argument('-s', '--secret', type=str) p_new_res_dfn.add_argument('name', type=namecheck(RES_NAME), help='Name of the new resource definition') p_new_res_dfn.set_defaults(func=self.create) # remove-resource definition # TODO description p_rm_res_dfn = res_def_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=" Removes a resource definition " "from the linstor cluster. The resource is undeployed from all nodes " "and the resource entry is marked for removal from linstor's data " "tables. After all nodes have undeployed the resource, the resource " "entry is removed from linstor's data tables.") p_rm_res_dfn.add_argument('-q', '--quiet', action="store_true", help='Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.') p_rm_res_dfn.add_argument( 'name', nargs="+", help='Name of the resource to delete').completer = self.resource_dfn_completer p_rm_res_dfn.set_defaults(func=self.delete) rsc_dfn_groupby = [x.name for x in self._rsc_dfn_headers] rsc_dfn_group_completer = Commands.show_group_completer(rsc_dfn_groupby, "groupby") p_lrscdfs = res_def_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource definitions known to ' 'linstor. By default, the list is printed as a human readable table.') p_lrscdfs.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lrscdfs.add_argument('-g', '--groupby', nargs='+', choices=rsc_dfn_groupby).completer = rsc_dfn_group_completer p_lrscdfs.add_argument('-R', '--resources', nargs='+', type=namecheck(RES_NAME), help='Filter by list of resources').completer = self.resource_dfn_completer p_lrscdfs.set_defaults(func=self.list) # show properties p_sp = res_def_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given resource definitions.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'resource_name', help="Resource definition for which to print the properties" ).completer = self.resource_dfn_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = res_def_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], description='Sets properties for the given resource definition.') p_setprop.add_argument('name', type=namecheck(RES_NAME), help='Name of the resource definition') Commands.add_parser_keyvalue(p_setprop, 'resource-definition') p_setprop.set_defaults(func=self.set_props) # drbd options p_drbd_opts = res_def_subp.add_parser( Commands.Subcommands.DrbdOptions.LONG, aliases=[Commands.Subcommands.DrbdOptions.SHORT], description="Set drbd resource options." ) p_drbd_opts.add_argument( 'resource_name', type=namecheck(RES_NAME), help="Resource name" ).completer = self.resource_dfn_completer DrbdOptions.add_arguments( p_drbd_opts, [x for x in DrbdOptions.drbd_options()['options'] if x in DrbdOptions.drbd_options()['filters']['resource']] ) p_drbd_opts.set_defaults(func=self.set_drbd_opts) self.check_subcommands(res_def_subp, subcmds) def create(self, args): replies = self._linstor.resource_dfn_create(args.name, args.port) return self.handle_replies(args, replies) def delete(self, args): # execute delete storpooldfns and flatten result list replies = [x for subx in args.name for x in self._linstor.resource_dfn_delete(subx)] return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in cls._rsc_dfn_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for rsc_dfn in cls.filter_rsc_dfn_list(lstmsg.rsc_dfns, args.resources): tbl.add_row([ rsc_dfn.rsc_name, rsc_dfn.rsc_dfn_port, tbl.color_cell("DELETING", Color.RED) if FLAG_DELETE in rsc_dfn.rsc_dfn_flags else tbl.color_cell("ok", Color.DARKGREEN) ]) tbl.show() def list(self, args): lstmsg = self._linstor.resource_dfn_list() return self.output_list(args, lstmsg, self.show) @classmethod def _props_list(cls, args, lstmsg): result = [] if lstmsg: for rsc_dfn in lstmsg.rsc_dfns: if rsc_dfn.rsc_name == args.resource_name: result.append(rsc_dfn.rsc_dfn_props) break return result def print_props(self, args): lstmsg = self._linstor.resource_dfn_list() return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([args.key + '=' + args.value]) replies = self._linstor.resource_dfn_modify(args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def set_drbd_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] # remove resource name key mod_props, del_props = DrbdOptions.parse_opts(a) replies = self._linstor.resource_dfn_modify( args.resource_name, mod_props, del_props ) return self.handle_replies(args, replies)
class FileCommands(Commands): _file_headers = [ linstor_client.TableHeader("Path"), ] def __init__(self): super(FileCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.List, Commands.Subcommands.Show, Commands.Subcommands.Modify, Commands.Subcommands.Delete, Commands.Subcommands.Deploy, Commands.Subcommands.Undeploy, ] # Resource subcommands file_parser = parser.add_parser( Commands.FILE, aliases=["f"], formatter_class=argparse.RawTextHelpFormatter, description="File subcommands") file_subp = file_parser.add_subparsers( title="file commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) p_file_list = file_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Lists all files in the cluster') p_file_list.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_file_list.set_defaults(func=self.list) p_file_show = file_subp.add_parser( Commands.Subcommands.Show.LONG, aliases=[Commands.Subcommands.Show.SHORT], description='Show a single file, including its content') p_file_show.add_argument('file_name', type=str, help='Name of the file to show') p_file_show.set_defaults(func=self.show) p_file_modify = file_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modify the contents of a file') p_file_modify.add_argument('file_name', type=str, help='Name of the file to modify') p_file_modify.set_defaults(func=self.modify) p_file_delete = file_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Delete a file') p_file_delete.add_argument('file_name', type=str, help='Name of the file to delete') p_file_delete.set_defaults(func=self.delete) p_file_deploy = file_subp.add_parser( Commands.Subcommands.Deploy.LONG, aliases=[Commands.Subcommands.Deploy.SHORT], description='Deploy a file with a resource definition') p_file_deploy.add_argument('file_name', type=str, help='Name of the file to deploy') p_file_deploy.add_argument( 'resource_name', type=str, help='Name of the resource definition to deploy the file with') p_file_deploy.set_defaults(func=self.deploy) p_file_undeploy = file_subp.add_parser( Commands.Subcommands.Undeploy.LONG, aliases=[Commands.Subcommands.Undeploy.SHORT], description='Undeploy a file with a resource definition') p_file_undeploy.add_argument('file_name', type=str, help='Name of the file to undeploy') p_file_undeploy.add_argument( 'resource_name', type=str, help='Name of the resource definition to undeploy the file with') p_file_undeploy.set_defaults(func=self.undeploy) self.check_subcommands(file_subp, subcmds) def list(self, args): lstmsg = self._linstor.file_list() return self.output_list(args, lstmsg, self.show_table) def show(self, args): showmsg = self._linstor.file_show(args.file_name) print(base64.b64decode(showmsg.files.content).decode(), end="") def modify(self, args): if sys.stdin.isatty(): editor = os.environ.get('EDITOR', 'vim') try: showmsg = self._linstor.file_show(args.file_name) initial_content = base64.b64decode( showmsg.files.content).decode() except linstor.LinstorApiCallError: # file does not exist yet initial_content = "" with tempfile.NamedTemporaryFile(suffix=".tmp") as tf: tf.write(initial_content.encode()) tf.flush() call([editor, tf.name]) tf.seek(0) input_str = tf.read() else: input_str = sys.stdin.read().encode() replies = self._linstor.file_modify(args.file_name, input_str) self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.file_delete(args.file_name) self.handle_replies(args, replies) def deploy(self, args): replies = self._linstor.file_deploy(args.file_name, args.resource_name) self.handle_replies(args, replies) def undeploy(self, args): replies = self._linstor.file_undeploy(args.file_name, args.resource_name) self.handle_replies(args, replies) def show_table(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in FileCommands._file_headers: tbl.add_header(hdr) for file in lstmsg.files: tbl.add_row([file.path]) tbl.show()
def show_nodes(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) node_hdr = list(cls._node_headers) if args.show_aux_props: node_hdr.insert(-1, linstor_client.TableHeader("AuxProps")) for hdr in node_hdr: tbl.add_header(hdr) conn_stat_dict = { apiconsts.ConnectionStatus.OFFLINE.name: ("OFFLINE", Color.RED), apiconsts.ConnectionStatus.CONNECTED.name: ("Connected", Color.YELLOW), apiconsts.ConnectionStatus.ONLINE.name: ("Online", Color.GREEN), apiconsts.ConnectionStatus.VERSION_MISMATCH.name: ("OFFLINE(VERSION MISMATCH)", Color.RED), apiconsts.ConnectionStatus.FULL_SYNC_FAILED.name: ("OFFLINE(FULL SYNC FAILED)", Color.RED), apiconsts.ConnectionStatus.AUTHENTICATION_ERROR.name: ("OFFLINE(AUTHENTICATION ERROR)", Color.RED), apiconsts.ConnectionStatus.UNKNOWN.name: ("Unknown", Color.YELLOW), apiconsts.ConnectionStatus.HOSTNAME_MISMATCH.name: ("OFFLINE(HOSTNAME MISMATCH)", Color.RED), apiconsts.ConnectionStatus.OTHER_CONTROLLER.name: ("OFFLINE(OTHER_CONTROLLER)", Color.RED), apiconsts.ConnectionStatus.NO_STLT_CONN.name: ("OFFLINE(NO CONNECTION TO SATELLITE)", Color.RED) } tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) show_eviction_info = False for node in lstmsg.nodes: node_is_offline = conn_stat_dict.get(node.connection_status)[0] == apiconsts.ConnectionStatus.OFFLINE.name node_is_evicted = apiconsts.FLAG_EVICTED in node.flags if node.eviction_timestamp and node_is_offline and not node_is_evicted: show_eviction_info = True break for node in lstmsg.nodes: # concat a ip list with satellite connection indicator active_ip = "" for net_if in node.net_interfaces: if net_if.is_active and net_if.stlt_port: active_ip = net_if.address + ":" + str(net_if.stlt_port) + " (" + net_if.stlt_encryption_type + ")" aux_props = ["{k}={v}".format(k=k, v=v) for k, v in node.properties.items() if k.startswith(apiconsts.NAMESPC_AUXILIARY + '/')] if apiconsts.FLAG_EVICTED in node.flags: conn_stat = (apiconsts.FLAG_EVICTED, Color.RED) elif apiconsts.FLAG_DELETE in node.flags: conn_stat = (apiconsts.FLAG_DELETE, Color.RED) elif apiconsts.FLAG_EVACUATE in node.flags: conn_stat = (apiconsts.FLAG_EVACUATE, Color.YELLOW) else: conn_stat = conn_stat_dict.get(node.connection_status) row = [node.name, node.type, active_ip] if args.show_aux_props: row.append("\n".join(aux_props)) state_text = conn_stat[0] node_is_offline = conn_stat_dict.get(node.connection_status)[0] == apiconsts.ConnectionStatus.OFFLINE.name node_is_evicted = apiconsts.FLAG_EVICTED in node.flags if show_eviction_info and node_is_offline and not node_is_evicted: if node.eviction_timestamp: eviction = datetime.fromtimestamp(int(node.eviction_timestamp / 1000)) else: eviction = "Disabled" state_text += " (Auto-eviction: {eviction})".format(eviction=eviction) row += [tbl.color_cell(state_text, conn_stat[1])] tbl.add_row(row) tbl.show() if show_eviction_info: print("To cancel automatic eviction please consider the corresponding " "DrbdOptions/AutoEvict* properties on controller and / or node level") print("See 'linstor controller set-property --help' or 'linstor node set-property --help' for more details")
class NodeCommands(Commands): DISKLESS_STORAGE_POOL = 'DfltDisklessStorPool' DISKLESS_RESOURCE_NAME = 'diskless resource' _node_headers = [ linstor_client.TableHeader("Node"), linstor_client.TableHeader("NodeType"), linstor_client.TableHeader("Addresses"), linstor_client.TableHeader("State", color=Color.DARKGREEN) ] _info_headers_provs = [ linstor_client.TableHeader("Node"), [ linstor_client.TableHeader("Diskless"), linstor_client.TableHeader("LVM"), linstor_client.TableHeader("LVMThin"), linstor_client.TableHeader("ZFS/Thin"), linstor_client.TableHeader("File/Thin") ] ] _info_headers_lrs = [ linstor_client.TableHeader("Node"), [ linstor_client.TableHeader("DRBD"), linstor_client.TableHeader("LUKS"), linstor_client.TableHeader("NVMe"), linstor_client.TableHeader("Storage") ] ] class CreateOpenFlexTarget: LONG = "create-openflex-target" SHORT = "coft" class Reconnect: LONG = "reconnect" SHORT = "rc" def __init__(self): super(NodeCommands, self).__init__() def setup_commands(self, parser): # Node subcommands subcmds = [ Commands.Subcommands.Create, NodeCommands.CreateOpenFlexTarget, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.Lost, Commands.Subcommands.Describe, Commands.Subcommands.Interface, Commands.Subcommands.Info, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.Modify, NodeCommands.Reconnect, Commands.Subcommands.Restore ] node_parser = parser.add_parser( Commands.NODE, aliases=["n"], formatter_class=argparse.RawTextHelpFormatter, description="Node subcommands" ) node_subp = node_parser.add_subparsers( title="Node commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds) ) node_types = [ apiconsts.VAL_NODE_TYPE_CTRL, apiconsts.VAL_NODE_TYPE_AUX, apiconsts.VAL_NODE_TYPE_CMBD, apiconsts.VAL_NODE_TYPE_STLT ] # create node p_new_node = node_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Creates a node entry for a node that participates in the linstor cluster.' ) p_new_node.add_argument('-p', '--port', type=rangecheck(1, 65535), help='default: Satellite %s for %s; Controller %s for %s; %s for %s' % ( apiconsts.DFLT_STLT_PORT_PLAIN, apiconsts.VAL_NETCOM_TYPE_PLAIN, apiconsts.DFLT_CTRL_PORT_PLAIN, apiconsts.VAL_NETCOM_TYPE_PLAIN, apiconsts.DFLT_CTRL_PORT_SSL, apiconsts.VAL_NETCOM_TYPE_SSL)) ntype_def = apiconsts.VAL_NODE_TYPE_STLT p_new_node.add_argument('--node-type', choices=[x.lower() for x in node_types], default=apiconsts.VAL_NODE_TYPE_STLT, type=str.lower, help='Node type (default: %s)' % ntype_def.lower()) ctype_def = apiconsts.VAL_NETCOM_TYPE_PLAIN p_new_node.add_argument('--communication-type', choices=(apiconsts.VAL_NETCOM_TYPE_PLAIN.lower(), apiconsts.VAL_NETCOM_TYPE_SSL.lower()), default=ctype_def, type=str.lower, help='Communication type (default: %s)' % ctype_def.lower()) itype_def = apiconsts.VAL_NETIF_TYPE_IP p_new_node.add_argument('--interface-type', choices=(apiconsts.VAL_NETIF_TYPE_IP.lower(),), type=str.lower, default=itype_def, help='Interface type (default: %s)' % itype_def.lower()) iname_def = 'default' p_new_node.add_argument('--interface-name', default=iname_def, help='Interface name (default: %s)' % iname_def) p_new_node.add_argument( 'name', help='Name of the new node, must match the nodes hostname', type=str) p_new_node.add_argument( 'ip', nargs='?', help='IP address of the new node, if not specified it will be resolved by the name.' ).completer = ip_completer("name") p_new_node.set_defaults(func=self.create) # openflex create p_create_of_target = node_subp.add_parser( NodeCommands.CreateOpenFlexTarget.LONG, aliases=[NodeCommands.CreateOpenFlexTarget.SHORT], description='Creates a virtual on controller openflex target node.' ) p_create_of_target.add_argument( 'node_name', help='Name of the new openflex target node', type=str ) p_create_of_target.add_argument('stor_dev_host', help='OpenFlex storage device host') p_create_of_target.add_argument('stor_dev', help='OpenFlex storage device id') p_create_of_target.set_defaults(func=self.create_of_target) # modify node p_modify_node = node_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Modify a node' ) p_modify_node.add_argument( '--node-type', '-t', choices=[x.lower() for x in node_types], type=str.lower, default=apiconsts.VAL_NODE_TYPE_STLT, help='Node type (default: %s)' % ntype_def.lower() ) p_modify_node.add_argument( 'node_name', help='Name of the node to modify.' ).completer = self.node_completer p_modify_node.set_defaults(func=self.modify_node) # describe-node p_desc_node = node_subp.add_parser( Commands.Subcommands.Describe.LONG, aliases=[Commands.Subcommands.Describe.SHORT], description='describe a node (or all nodes), list storage pools, resources and volumes under this node, ' 'in this order') p_desc_node.add_argument( 'name', nargs='?', help='Name of the node to be described. With no name, all nodes are described' ).completer = self.node_completer p_desc_node.set_defaults(func=self.describe) # remove-node p_rm_node = node_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Removes a node from the linstor cluster. ' 'All linstor resources that are still deployed on the specified ' 'node are marked for undeployment, and the node entry is marked for ' "removal from linstor's data tables. The specified node is " 'expected to undeploy all resources. As soon as all resources have been ' 'undeployed from the node, the node entry is removed from ' "linstor's data tables.") p_rm_node.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_rm_node.add_argument('name', help='Name of the node to remove').completer = self.node_completer p_rm_node.set_defaults(func=self.delete) # lost-node p_lost_node = node_subp.add_parser( Commands.Subcommands.Lost.LONG, aliases=[Commands.Subcommands.Lost.SHORT], description='Removes an unrecoverable node from the linstor cluster. ' 'All linstor resources attached to this node will be deleted from the database.' ) p_lost_node.add_argument( '--async', action='store_true', help='Deprecated, kept for compatibility' ) p_lost_node.add_argument( 'name', help='Name of the node to delete.').completer = self.node_completer p_lost_node.set_defaults(func=self.lost) # reconnect node(s) p_recon_node = node_subp.add_parser( NodeCommands.Reconnect.LONG, aliases=[NodeCommands.Reconnect.SHORT], description='Reconnect a node reinitializing the nodes state.' ) p_recon_node.add_argument( 'nodes', nargs="+", help='List of nodes to reconnect.' ).completer = self.node_completer p_recon_node.set_defaults(func=self.reconnect) # Interface commands netif_subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Modify, Commands.Subcommands.Delete ] interface_parser = node_subp.add_parser( Commands.Subcommands.Interface.LONG, formatter_class=argparse.RawTextHelpFormatter, aliases=[Commands.Subcommands.Interface.SHORT], description="%s subcommands" % Commands.Subcommands.Interface.LONG) interface_subp = interface_parser.add_subparsers( title="%s subcommands" % Commands.Subcommands.Interface.LONG, metavar="", description=Commands.Subcommands.generate_desc(netif_subcmds)) # create net interface p_create_netinterface = interface_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Creates and adds a new netinterface to a given node.' ' If port is specified this net interface is used as satellite port' ) p_create_netinterface.add_argument( '-p', '--port', type=rangecheck(1, 65535), help='Port to use for satellite connections' ) p_create_netinterface.add_argument( '--communication-type', choices=(apiconsts.VAL_NETCOM_TYPE_PLAIN.lower(), apiconsts.VAL_NETCOM_TYPE_SSL.lower()), type=str.lower, default=ctype_def, help='Communication type (default: %s)' % ctype_def.lower() ) p_create_netinterface.add_argument( '--active', action='store_true', help='Create this net interface as the active satellite connection' ) p_create_netinterface.add_argument( "node_name", help="Name of the node to add the net interface" ).completer = self.node_completer p_create_netinterface.add_argument("interface_name", help="Interface name") p_create_netinterface.add_argument('ip', help='New IP address for the network interface') p_create_netinterface.set_defaults(func=self.create_netif) # modify net interface p_mod_netif = interface_subp.add_parser( Commands.Subcommands.Modify.LONG, aliases=[Commands.Subcommands.Modify.SHORT], description='Change the ip listen address of a netinterface on the given node.' ) p_mod_netif.add_argument( '-p', '--port', type=rangecheck(1, 65535), help='Port to use for satellite connections' ) p_mod_netif.add_argument( '--communication-type', choices=(apiconsts.VAL_NETCOM_TYPE_PLAIN.lower(), apiconsts.VAL_NETCOM_TYPE_SSL.lower()), type=str.lower, default=ctype_def, help='Communication type (default: %s)' % ctype_def.lower() ) p_mod_netif.add_argument( '--active', action='store_true', help='Set this net interface as the active satellite connection' ) p_mod_netif.add_argument('--ip', help='New IP address for the network interface') p_mod_netif.add_argument("node_name", help="Name of the node").completer = self.node_completer p_mod_netif.add_argument("interface_name", help="Interface to change").completer = self.netif_completer p_mod_netif.set_defaults(func=self.modify_netif) # delete net interface p_delete_netinterface = interface_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Delete a netinterface from a node.' ) p_delete_netinterface.add_argument( "node_name", help="Name of the node to remove the net interface" ).completer = self.node_completer p_delete_netinterface.add_argument( "interface_name", nargs='+', help="Interface name" ).completer = self.netif_completer p_delete_netinterface.set_defaults(func=self.delete_netif) # list nodes node_groupby = [x.name.lower() for x in self._node_headers] node_group_completer = Commands.show_group_completer(node_groupby, "groupby") p_lnodes = node_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all cluster nodes known to linstor. ' 'By default, the list is printed as a human readable table.') p_lnodes.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lnodes.add_argument('-g', '--groupby', nargs='+', type=str.lower, choices=node_groupby).completer = node_group_completer p_lnodes.add_argument('-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lnodes.add_argument('--show-aux-props', action="store_true", help='Show aux properties for nodes') p_lnodes.add_argument('--props', nargs='+', type=str, help='Filter list by object properties') p_lnodes.set_defaults(func=self.list) # list info p_info_node = node_subp.add_parser( Commands.Subcommands.Info.LONG, aliases=[Commands.Subcommands.Info.SHORT], description='Prints detailed info for all cluster nodes known to linstor. ' 'By default, the list is printed as a human readable table.' ) p_info_node.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_info_node.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes' ).completer = self.node_completer p_info_node.set_defaults(func=self.info) # list netinterface p_lnetif = interface_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of netinterfaces of a node.' ) p_lnetif.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lnetif.add_argument( 'node_name', help='Node name for which to print the net interfaces' ).completer = self.node_completer p_lnetif.set_defaults(func=self.list_netinterfaces) # show properties p_sp = node_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given node.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'node_name', help="Node for which to print the properties").completer = self.node_completer p_sp.set_defaults(func=self.print_props) # set properties p_setp = node_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description="Set a property on the given node." ) p_setp.add_argument( 'node_name', help="Node for which to set the property" ).completer = self.node_completer Commands.add_parser_keyvalue(p_setp, "node") p_setp.set_defaults(func=self.set_props) # restore evicted node p_restore_node = node_subp.add_parser( Commands.Subcommands.Restore.LONG, aliases=[Commands.Subcommands.Restore.SHORT], formatter_class=argparse.RawTextHelpFormatter, description="Restore an evicted node." ) p_restore_node.add_argument( 'node_name', help="Evicted node to restore" ).completer = self.node_completer p_restore_node.set_defaults(func=self.restore_node) self.check_subcommands(interface_subp, netif_subcmds) self.check_subcommands(node_subp, subcmds) @classmethod def _resolve_remote_ip(cls, hostname): """ Tries to resolve a non local ip address of the given hostname :param str hostname: hostname to resolve :return: ip address as string or None if it couldn't be resolved :rtype: str :raise: LinstorClientError if unable to determine an address """ try: addrinfo = socket.getaddrinfo(hostname, None) non_local = [y for y in addrinfo if y[0] == 2 and not y[4][0].startswith('127.')] if non_local: return non_local[0][4][0] raise LinstorClientError( "Unable determine a valid ip address '" + hostname + "'", ExitCode.ARGPARSE_ERROR) except socket.gaierror as err: raise LinstorClientError( "Unable to resolve ip address for '" + hostname + "': " + str(err), ExitCode.ARGPARSE_ERROR) def create(self, args): ip_addr = args.ip if args.ip is None: ip_addr = self._resolve_remote_ip(args.name) replies = self._linstor.node_create( args.name, args.node_type, ip_addr, args.communication_type, args.port, args.interface_name ) return self.handle_replies(args, replies) def create_of_target(self, args): props = { apiconsts.NAMESPC_STORAGE_DRIVER + '/' + apiconsts.KEY_STOR_POOL_OPENFLEX_STOR_DEV_HOST: args.stor_dev_host, apiconsts.NAMESPC_STORAGE_DRIVER + '/' + apiconsts.KEY_STOR_POOL_OPENFLEX_STOR_DEV: args.stor_dev } replies = self.get_linstorapi().node_create( args.node_name, apiconsts.VAL_NODE_TYPE_OPENFLEX_TARGET, "127.0.0.1", property_dict=props ) return self.handle_replies(args, replies) def modify_node(self, args): replies = self.get_linstorapi().node_modify(args.node_name, args.node_type) return self.handle_replies(args, replies) def delete(self, args): async_flag = vars(args)["async"] replies = self._linstor.node_delete(args.name, async_flag) return self.handle_replies(args, replies) def lost(self, args): async_flag = vars(args)["async"] replies = self._linstor.node_lost(args.name, async_flag) return self.handle_replies(args, replies) def reconnect(self, args): replies = self.get_linstorapi().node_reconnect(args.nodes) return self.handle_replies(args, replies) @classmethod def show_nodes(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) node_hdr = list(cls._node_headers) if args.show_aux_props: node_hdr.insert(-1, linstor_client.TableHeader("AuxProps")) for hdr in node_hdr: tbl.add_header(hdr) conn_stat_dict = { apiconsts.ConnectionStatus.OFFLINE.name: ("OFFLINE", Color.RED), apiconsts.ConnectionStatus.CONNECTED.name: ("Connected", Color.YELLOW), apiconsts.ConnectionStatus.ONLINE.name: ("Online", Color.GREEN), apiconsts.ConnectionStatus.VERSION_MISMATCH.name: ("OFFLINE(VERSION MISMATCH)", Color.RED), apiconsts.ConnectionStatus.FULL_SYNC_FAILED.name: ("OFFLINE(FULL SYNC FAILED)", Color.RED), apiconsts.ConnectionStatus.AUTHENTICATION_ERROR.name: ("OFFLINE(AUTHENTICATION ERROR)", Color.RED), apiconsts.ConnectionStatus.UNKNOWN.name: ("Unknown", Color.YELLOW), apiconsts.ConnectionStatus.HOSTNAME_MISMATCH.name: ("OFFLINE(HOSTNAME MISMATCH)", Color.RED), apiconsts.ConnectionStatus.OTHER_CONTROLLER.name: ("OFFLINE(OTHER_CONTROLLER)", Color.RED), apiconsts.ConnectionStatus.NO_STLT_CONN.name: ("OFFLINE(NO CONNECTION TO SATELLITE)", Color.RED) } tbl.set_groupby(args.groupby if args.groupby else [tbl.header_name(0)]) for node in lstmsg.nodes: # concat a ip list with satellite connection indicator active_ip = "" for net_if in node.net_interfaces: if net_if.is_active and net_if.stlt_port: active_ip = net_if.address + ":" + str(net_if.stlt_port) + " (" + net_if.stlt_encryption_type + ")" aux_props = ["{k}={v}".format(k=k, v=v) for k, v in node.properties.items() if k.startswith(apiconsts.NAMESPC_AUXILIARY + '/')] if apiconsts.FLAG_EVICTED in node.flags: conn_stat = (apiconsts.FLAG_EVICTED, Color.RED) elif apiconsts.FLAG_DELETE in node.flags: conn_stat = (apiconsts.FLAG_DELETE, Color.RED) else: conn_stat = conn_stat_dict.get(node.connection_status) row = [node.name, node.type, active_ip] if args.show_aux_props: row.append("\n".join(aux_props)) row += [tbl.color_cell(conn_stat[0], conn_stat[1])] tbl.add_row(row) tbl.show() def list(self, args): lstmsg = self._linstor.node_list(args.nodes, args.props) return self.output_list(args, lstmsg, self.show_nodes) def describe(self, args=None): """ describe the details of a node It will construct a 4 level tree and print it. The four levels are: node, storage pool, resource, and volume :param args: :return: """ try: node_list_replies = self._linstor.node_list() self.check_list_sanity(args, node_list_replies) node_map = self.construct_node(node_list_replies[0]) storage_pool_list_replies = self._linstor.storage_pool_list() self.check_list_sanity(args, storage_pool_list_replies) self.construct_storpool(node_map, storage_pool_list_replies[0]) rsc_dfn_list_replies = self._linstor.resource_dfn_list(query_volume_definitions=False) self.check_list_sanity(args, rsc_dfn_list_replies) rsc_list_replies = self._linstor.resource_list() self.check_list_sanity(args, rsc_list_replies) self.construct_rsc(node_map, rsc_list_replies[0]) outputted = False machine_data = [] for node_name_key in sorted(node_map.keys()): if outputted: print("") if args.name == node_name_key or not args.name: node = node_map[node_name_key] machine_data.append(node.to_data()) if not args.machine_readable: node.print_node(args.no_utf8, args.no_color) outputted = True if args.machine_readable: print(self._to_json(machine_data)) elif not outputted and args.name: sys.stderr.write('%s: no such node\n' % args.name) return ExitCode.OBJECT_NOT_FOUND except LinstorClientError as lce: return lce.exit_code return ExitCode.OK def check_list_sanity(self, args, replies): if replies: if self.check_for_api_replies(replies): rc = self.handle_replies(args, replies) raise LinstorClientError("List reply error", rc) return True @classmethod def get_volume_size(cls, rsc_dfn_list): """ Constructs a map of minor numbers to volume sizes. :param rsc_dfn_list: Protobuf definition list :return: the created minor number to volume size map. :rtype: dict[int, int] """ volume_def_map = {} # type dict[int, int] for rsc_dfn in rsc_dfn_list.resource_definitions: for vlmdfn in rsc_dfn.volume_definitions: if vlmdfn.drbd_data: minor = vlmdfn.drbd_data.minor volume_def_map[minor] = vlmdfn.size return volume_def_map @classmethod def make_volume_node(cls, vlm): """ :param responses.Volume vlm: :return: """ volume_node = TreeNode('volume' + str(vlm.number), '', Color.DARKGREEN) volume_node.set_description('minor number: ' + str(vlm.drbd_data.drbd_volume_definition.minor) if vlm.drbd_data else '') volume_node.add_description( ', size: ' + str(SizeCalc.approximate_size_string(vlm.allocated_size)) ) return volume_node def construct_rsc(self, node_map, rsc_list): for rsc in rsc_list.resources: vlm_by_storpool = collections.defaultdict(list) for vlm in rsc.volumes: vlm_by_storpool[vlm.storage_pool_name].append(vlm) for (storpool_name, vlms) in vlm_by_storpool.items(): rsc_node = TreeNode(rsc.name, '', Color.BLUE) rsc_node.set_description('resource') if storpool_name == self.DISKLESS_STORAGE_POOL: storpool_node = node_map[rsc.node_name].find_child(self.DISKLESS_RESOURCE_NAME) if not storpool_node: storpool_node = TreeNode(self.DISKLESS_RESOURCE_NAME, '', Color.PINK) storpool_node.set_description('resources may reside on other nodes') node_map[rsc.node_name].add_child(storpool_node) else: storpool_node = node_map[rsc.node_name].find_child(storpool_name) for vlm in vlms: rsc_node.add_child(self.make_volume_node(vlm)) storpool_node.add_child(rsc_node) def construct_storpool(self, node_map, storage_pool_list): for storpool in storage_pool_list.storage_pools: storpool_node = TreeNode(storpool.name, '', Color.PINK) storpool_node.set_description('storage pool') node_map[storpool.node_name].add_child(storpool_node) @classmethod def construct_node(cls, node_list): """ Constructs a dict of node names to TreeNodes :param node_list: :return: :rtype: dict[str, TreeNode] """ node_map = {} for n in node_list.nodes: root_node = TreeNode(n.name, '', Color.RED) root_node.set_description('node') node_map[n.name] = root_node return node_map @classmethod def show_netinterfaces(cls, args, lstnodes): node = lstnodes.node(args.node_name) if node: tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_column(node.name, color=Color.GREEN) tbl.add_column("NetInterface") tbl.add_column("IP") tbl.add_column("Port") tbl.add_column("EncryptionType") # warning: system test depends on alphabetical ordering for net_if in node.net_interfaces: tbl.add_row([ "+ StltCon" if net_if.is_active else "+", net_if.name, net_if.address, net_if.stlt_port if net_if.stlt_port else "", net_if.stlt_encryption_type if net_if.stlt_encryption_type else "" ]) tbl.show() else: raise LinstorClientError("Node '{n}' not found on controller.".format(n=args.node_name), ExitCode.OBJECT_NOT_FOUND) def list_netinterfaces(self, args): return self.output_list(args, self._linstor.node_list([args.node_name]), self.show_netinterfaces) @classmethod def show_info(cls, args, lstmsg): tbl_provs = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl_lrs = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl_provs.add_header(cls._info_headers_provs[0]) tbl_lrs.add_header(cls._info_headers_lrs[0]) stor_prov_hdrs = cls._info_headers_provs[1] rsc_layer_hdrs = cls._info_headers_lrs[1] for stor_prov_hdr in stor_prov_hdrs: tbl_provs.add_header(stor_prov_hdr) for rsc_layer_hdr in rsc_layer_hdrs: tbl_lrs.add_header(rsc_layer_hdr) unsp_provs_msgs = {} unsp_lrs_msgs = {} # only satellite nodes got useful info for node in [x for x in lstmsg.nodes if x.type.lower() == apiconsts.VAL_NODE_TYPE_STLT.lower()]: if node.connection_status != "ONLINE": node_offline_msg = "NODE IS OFFLINE!" unsp_provs_msgs.update({node.name: node_offline_msg}) unsp_lrs_msgs.update({node.name: node_offline_msg}) else: if node.unsupported_providers: unsp_provs_msgs.update({node.name: node.unsupported_providers}) if node.unsupported_layers: unsp_lrs_msgs.update({node.name: node.unsupported_layers}) row_provs = [node.name] row_lrs = [node.name] # fill table for supported storage providers stor_provs = [x.replace("_", "") for x in node.storage_providers] for stor_prov_hdr in stor_prov_hdrs: stor_prov_hdr_name = stor_prov_hdr.name.upper() if "/" in stor_prov_hdr_name: stor_prov_hdr_name = stor_prov_hdr_name.replace("/", "").upper() if stor_prov_hdr_name in stor_provs: row_provs.append(tbl_provs.color_cell("+", Color.GREEN)) else: row_provs.append(tbl_provs.color_cell("-", Color.RED)) tbl_provs.add_row(row_provs) # fill table for supported resource layers rsc_layers = [x.replace("_", "") for x in node.resource_layers] for rsc_layer_hdr in rsc_layer_hdrs: if rsc_layer_hdr.name.upper() in rsc_layers: row_lrs.append(tbl_provs.color_cell("+", Color.GREEN)) else: row_lrs.append(tbl_provs.color_cell("-", Color.RED)) tbl_lrs.add_row(row_lrs) # print storage providers tbl_provs.show() if unsp_provs_msgs: print("Unsupported storage providers:") for node_name, unsp_provs_msg in unsp_provs_msgs.items(): is_node_online = isinstance(unsp_provs_msg, dict) print(" " + node_name + ": " + (unsp_provs_msg if not is_node_online else "")) if is_node_online: for prov_name, reasons in unsp_provs_msg.items(): for reason in reasons: print(" " + prov_name + ": " + reason) # print resource layers print("") tbl_lrs.show() if unsp_lrs_msgs: print("Unsupported resource layers:") for node_name, unsp_lrs_msg in unsp_lrs_msgs.items(): is_node_online = isinstance(unsp_lrs_msg, dict) print(" " + node_name + ": " + (unsp_lrs_msg if not is_node_online else "")) if is_node_online: for lr_name, reasons in unsp_lrs_msg.items(): for reason in reasons: print(" " + lr_name + ": " + reason) def info(self, args): return self.output_list(args, self._linstor.node_list(args.nodes), self.show_info) @classmethod def _props_list(cls, args, lstmsg): result = [] node = NodeCommands.find_node(lstmsg, args.node_name) if node: result.append(node.props) else: raise LinstorClientError("Node '{n}' not found on controller.".format(n=args.node_name), ExitCode.OBJECT_NOT_FOUND) return result def print_props(self, args): lstmsg = self._linstor.node_list([args.node_name]) return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([(args.key, args.value)]) replies = self.get_linstorapi().node_modify( args.node_name, property_dict=mod_prop_dict['pairs'], delete_props=mod_prop_dict['delete'] ) return self.handle_replies(args, replies) def restore_node(self, args): return self.handle_replies(args, self.get_linstorapi().node_restore(node_name=args.node_name)) def create_netif(self, args): replies = self._linstor.netinterface_create( args.node_name, args.interface_name, args.ip, args.port, args.communication_type, args.active ) return self.handle_replies(args, replies) def modify_netif(self, args): replies = self._linstor.netinterface_modify( args.node_name, args.interface_name, args.ip, args.port, args.communication_type, args.active ) return self.handle_replies(args, replies) def delete_netif(self, args): # execute delete netinterfaces and flatten result list replies = [x for subx in args.interface_name for x in self._linstor.netinterface_delete(args.node_name, subx)] return self.handle_replies(args, replies)
class StoragePoolCommands(Commands): _stor_pool_headers = [ linstor_client.TableHeader("StoragePool"), linstor_client.TableHeader("Node"), linstor_client.TableHeader("Driver"), linstor_client.TableHeader("PoolName"), linstor_client.TableHeader("Free", alignment_text='>'), linstor_client.TableHeader("SupportsSnapshots") ] def __init__(self): super(StoragePoolCommands, self).__init__() def setup_commands(self, parser): # Storage pool subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties ] sp_parser = parser.add_parser( Commands.STORAGE_POOL, aliases=["sp"], formatter_class=argparse.RawTextHelpFormatter, description="Storage pool subcommands") sp_subp = sp_parser.add_subparsers( title="Storage pool commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds) ) # new-storpol p_new_storpool = sp_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Defines a Linstor storage pool for use with Linstor.') p_new_storpool.add_argument( 'node_name', type=namecheck(NODE_NAME), help='Name of the node for the new storage pool').completer = self.node_completer p_new_storpool.add_argument('name', type=namecheck(STORPOOL_NAME), help='Name of the new storage pool') p_new_storpool.add_argument( 'driver', choices=StoragePoolCommands.driver_completer(""), help='Name of the driver used for the new storage pool').completer = StoragePoolCommands.driver_completer p_new_storpool.add_argument( 'driver_pool_name', type=str, nargs='?', help='Volume group/pool to use, e.g. drbdpool. ' 'For \'lvm\', the volume group; ' 'for \'lvmthin\', the full name of the thin pool, namely VG/LV; ' 'for \'zfs\', the zPool.') p_new_storpool.set_defaults(func=self.create) # remove-storpool # TODO description p_rm_storpool = sp_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=' Removes a storage pool ') p_rm_storpool.add_argument( '-q', '--quiet', action="store_true", help='Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.') p_rm_storpool.add_argument( 'node_name', nargs="+", help='Name of the Node where the storage pool exists.').completer = self.node_completer p_rm_storpool.add_argument('name', help='Name of the storage pool to delete').completer = self.storage_pool_completer p_rm_storpool.set_defaults(func=self.delete) # list storpool storpoolgroupby = [x.name for x in self._stor_pool_headers] storpool_group_completer = Commands.show_group_completer(storpoolgroupby, "groupby") p_lstorpool = sp_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all storage pool known to ' 'linstor. By default, the list is printed as a human readable table.') p_lstorpool.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lstorpool.add_argument('-g', '--groupby', nargs='+', choices=storpoolgroupby).completer = storpool_group_completer p_lstorpool.add_argument('-s', '--storpools', nargs='+', type=namecheck(STORPOOL_NAME), help='Filter by list of storage pools').completer = self.storage_pool_completer p_lstorpool.add_argument('-n', '--nodes', nargs='+', type=namecheck(NODE_NAME), help='Filter by list of nodes').completer = self.node_completer p_lstorpool.set_defaults(func=self.list) # show properties p_sp = sp_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given storage pool.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument( 'node_name', type=namecheck(NODE_NAME), help='Name of the node for the storage pool').completer = self.node_completer p_sp.add_argument( 'storage_pool_name', help="Storage pool for which to print the properties").completer = self.storage_pool_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = sp_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], description='Sets properties for the given storage pool on the given node.') p_setprop.add_argument( 'node_name', type=namecheck(NODE_NAME), help='Name of the node for the storage pool').completer = self.node_completer p_setprop.add_argument( 'name', type=namecheck(STORPOOL_NAME), help='Name of the storage pool' ).completer = self.storage_pool_completer Commands.add_parser_keyvalue(p_setprop, 'storagepool') p_setprop.set_defaults(func=self.set_props) self.check_subcommands(sp_subp, subcmds) def create(self, args): # construct correct driver name driver = 'LvmThin' if args.driver == 'lvmthin' else args.driver.title() try: replies = self._linstor.storage_pool_create(args.node_name, args.name, driver, args.driver_pool_name) except linstor.LinstorError as e: raise ArgumentError(e.message) return self.handle_replies(args, replies) def delete(self, args): # execute delete storpooldfns and flatten result list replies = [x for subx in args.node_name for x in self._linstor.storage_pool_delete(subx, args.name)] return self.handle_replies(args, replies) def show(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in self._stor_pool_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [self._stor_pool_headers[0].name]) for storpool in lstmsg.stor_pools: driver_device = self._linstor.storage_props_to_driver_pool(storpool.driver[:-len('Driver')], storpool.props) supports_snapshots_prop = [x for x in storpool.static_traits if x.key == KEY_STOR_POOL_SUPPORTS_SNAPSHOTS] supports_snapshots = supports_snapshots_prop[0].value if supports_snapshots_prop else '' provisioning_prop = [x for x in storpool.static_traits if x.key == KEY_STOR_POOL_PROVISIONING] provisioning = provisioning_prop[0].value if provisioning_prop else '' freespace = "" if provisioning == VAL_STOR_POOL_PROVISIONING_THIN: freespace = "(thin)" elif storpool.driver != 'DisklessDriver' and storpool.HasField("free_space"): freespace = SizeCalc.approximate_size_string(storpool.free_space.free_space) tbl.add_row([ storpool.stor_pool_name, storpool.node_name, storpool.driver, driver_device, freespace, supports_snapshots ]) tbl.show() def list(self, args): lstmsg = self._linstor.storage_pool_list(args.nodes, args.storpools) return self.output_list(args, lstmsg, self.show) @classmethod def _props_list(cls, args, lstmsg): result = [] if lstmsg: for stp in lstmsg.stor_pools: if stp.stor_pool_name == args.storage_pool_name and stp.node_name == args.node_name: result.append(stp.props) break return result def print_props(self, args): lstmsg = self._linstor.storage_pool_list() return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs([args.key + '=' + args.value]) replies = self._linstor.storage_pool_modify( args.node_name, args.name, mod_prop_dict['pairs'], mod_prop_dict['delete'] ) return self.handle_replies(args, replies) @staticmethod def driver_completer(prefix, **kwargs): possible = ["lvm", "lvmthin", "zfs", "diskless"] if prefix: return [e for e in possible if e.startswith(prefix)] return possible
class ResourceCommands(Commands): _resource_headers = [ linstor_client.TableHeader("ResourceName"), linstor_client.TableHeader("Node"), linstor_client.TableHeader("Port"), linstor_client.TableHeader("State", Color.DARKGREEN, alignment_text='>') ] def __init__(self): super(ResourceCommands, self).__init__() def setup_commands(self, parser): """ :param argparse.ArgumentParser parser: :return: """ subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.ListVolumes, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties, Commands.Subcommands.DrbdPeerDeviceOptions ] # Resource subcommands res_parser = parser.add_parser( Commands.RESOURCE, aliases=["r"], formatter_class=argparse.RawTextHelpFormatter, description="Resouce subcommands") res_subp = res_parser.add_subparsers( title="resource commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # new-resource p_new_res = res_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Deploys a resource definition to a node.') p_new_res.add_argument('--storage-pool', '-s', type=namecheck(STORPOOL_NAME), help="Storage pool name to use." ).completer = self.storage_pool_dfn_completer p_new_res.add_argument('--diskless', '-d', action="store_true", help='Should the resource be diskless') p_new_res.add_argument( '--async', action='store_true', help='Do not wait for deployment on satellites before returning') p_new_res.add_argument( '--auto-place', type=int, metavar="REPLICA_COUNT", help='Auto place a resource to a specified number of nodes') p_new_res.add_argument( '--do-not-place-with', type=namecheck(RES_NAME), nargs='+', metavar="RESOURCE_NAME", help= 'Try to avoid nodes that already have a given resource deployed.' ).completer = self.resource_completer p_new_res.add_argument( '--do-not-place-with-regex', type=str, metavar="RESOURCE_REGEX", help='Try to avoid nodes that already have a resource ' + 'deployed whos name is matching the given regular expression.') p_new_res.add_argument( '--replicas-on-same', nargs='+', default=[], metavar="AUX_NODE_PROPERTY", help= 'Tries to place resources on nodes with the same given auxiliary node property values.' ) p_new_res.add_argument( '--replicas-on-different', nargs='+', default=[], metavar="AUX_NODE_PROPERTY", help= 'Tries to place resources on nodes with a different value for the given auxiliary node property.' ) p_new_res.add_argument( '--diskless-on-remaining', action="store_true", help='Will add a diskless resource on all non replica nodes.') p_new_res.add_argument('node_name', type=namecheck(NODE_NAME), nargs='*', help='Name of the node to deploy the resource' ).completer = self.node_completer p_new_res.add_argument('resource_definition_name', type=namecheck(RES_NAME), help='Name of the resource definition' ).completer = self.resource_dfn_completer p_new_res.set_defaults(func=self.create) # remove-resource p_rm_res = res_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Removes a resource. ' 'The resource is undeployed from the node ' "and the resource entry is marked for removal from linstor's data " 'tables. After the node has undeployed the resource, the resource ' "entry is removed from linstor's data tables.") p_rm_res.add_argument( '-q', '--quiet', action="store_true", help= 'Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.' ) p_rm_res.add_argument( '--async', action='store_true', help='Do not wait for deployment on satellites before returning') p_rm_res.add_argument( 'node_name', nargs="+", help='Name of the node').completer = self.node_completer p_rm_res.add_argument('name', help='Name of the resource to delete' ).completer = self.resource_completer p_rm_res.set_defaults(func=self.delete) resgroupby = [x.name for x in ResourceCommands._resource_headers] res_group_completer = Commands.show_group_completer( resgroupby, "groupby") p_lreses = res_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all resource definitions known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lreses.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lreses.add_argument( '-g', '--groupby', nargs='+', choices=resgroupby).completer = res_group_completer p_lreses.add_argument('-r', '--resources', nargs='+', type=namecheck(RES_NAME), help='Filter by list of resources' ).completer = self.resource_completer p_lreses.add_argument( '-n', '--nodes', nargs='+', type=namecheck(NODE_NAME), help='Filter by list of nodes').completer = self.node_completer p_lreses.set_defaults(func=self.list) # list volumes p_lvlms = res_subp.add_parser( Commands.Subcommands.ListVolumes.LONG, aliases=[Commands.Subcommands.ListVolumes.SHORT], description='Prints a list of all volumes.') p_lvlms.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lvlms.add_argument( '-n', '--nodes', nargs='+', type=namecheck(NODE_NAME), help='Filter by list of nodes').completer = self.node_completer p_lvlms.add_argument('-s', '--storpools', nargs='+', type=namecheck(STORPOOL_NAME), help='Filter by list of storage pools' ).completer = self.storage_pool_completer p_lvlms.add_argument('-r', '--resources', nargs='+', type=namecheck(RES_NAME), help='Filter by list of resources' ).completer = self.resource_completer p_lvlms.set_defaults(func=self.list_volumes) # show properties p_sp = res_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given resource.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument('node_name', help="Node name where the resource is deployed." ).completer = self.node_completer p_sp.add_argument( 'resource_name', help="Resource name").completer = self.resource_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = res_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], description= 'Sets properties for the given resource on the given node.') p_setprop.add_argument('node_name', type=namecheck(NODE_NAME), help='Node name where resource is deployed.' ).completer = self.node_completer p_setprop.add_argument( 'name', type=namecheck(RES_NAME), help='Name of the resource').completer = self.resource_completer Commands.add_parser_keyvalue(p_setprop, "resource") p_setprop.set_defaults(func=self.set_props) # drbd peer device options p_drbd_peer_opts = res_subp.add_parser( Commands.Subcommands.DrbdPeerDeviceOptions.LONG, aliases=[Commands.Subcommands.DrbdPeerDeviceOptions.SHORT], description="Set drbd peer-device options.") p_drbd_peer_opts.add_argument('node_a', type=namecheck(NODE_NAME), help="1. Node in the node connection" ).completer = self.node_completer p_drbd_peer_opts.add_argument('node_b', type=namecheck(NODE_NAME), help="1. Node in the node connection" ).completer = self.node_completer p_drbd_peer_opts.add_argument( 'resource_name', type=namecheck(RES_NAME), help="Resource name").completer = self.resource_completer DrbdOptions.add_arguments(p_drbd_peer_opts, [ x for x in DrbdOptions.drbd_options()['options'] if DrbdOptions. drbd_options()['options'][x]['category'] == 'peer-device-options' ]) p_drbd_peer_opts.set_defaults(func=self.drbd_peer_opts) self.check_subcommands(res_subp, subcmds) @staticmethod def _satellite_not_connected(replies): return any( reply.ret_code & apiconsts.WARN_NOT_CONNECTED == apiconsts.WARN_NOT_CONNECTED for reply in replies) def create(self, args): all_replies = [] if args.auto_place: # auto-place resource all_replies = self._linstor.resource_auto_place( args.resource_definition_name, args.auto_place, args.storage_pool, args.do_not_place_with, args.do_not_place_with_regex, [ linstor.consts.NAMESPC_AUXILIARY + '/' + x for x in args.replicas_on_same ], [ linstor.consts.NAMESPC_AUXILIARY + '/' + x for x in args.replicas_on_different ], diskless_on_remaining=args.diskless_on_remaining) if not self._linstor.all_api_responses_success(all_replies): return self.handle_replies(args, all_replies) if not args. async: def event_handler(event_header, event_data): if event_header.event_name == apiconsts.EVENT_RESOURCE_DEFINITION_READY: if event_header.event_action == apiconsts.EVENT_STREAM_CLOSE_REMOVED: print((Output.color_str('ERROR:', Color.RED, args.no_color)) + " Resource removed") return ExitCode.API_ERROR if event_data is not None: if event_data.error_count > 0: return ExitCode.API_ERROR if event_data.ready_count == args.auto_place: return ExitCode.OK return None watch_result = self._linstor.watch_events( self._linstor.return_if_failure, event_handler, linstor.ObjectIdentifier( resource_name=args.resource_definition_name)) if isinstance(watch_result, list): all_replies += watch_result if not self._linstor.all_api_responses_success( watch_result): return self.handle_replies(args, all_replies) elif watch_result != ExitCode.OK: return watch_result else: # normal create resource # check that node is given if not args.node_name: raise ArgumentError( "resource create: too few arguments: Node name missing.") for node_name in args.node_name: all_replies += self._linstor.resource_create( node_name, args.resource_definition_name, args.diskless, args.storage_pool) if not self._linstor.all_api_responses_success(all_replies): return self.handle_replies(args, all_replies) def event_handler(event_header, event_data): if event_header.node_name == node_name: if event_header.event_name in [ apiconsts.EVENT_RESOURCE_STATE, apiconsts.EVENT_RESOURCE_DEPLOYMENT_STATE ]: if event_header.event_action == apiconsts.EVENT_STREAM_CLOSE_NO_CONNECTION: print( Output.color_str('WARNING:', Color.YELLOW, args.no_color) + " Satellite connection lost") return ExitCode.NO_SATELLITE_CONNECTION if event_header.event_action == apiconsts.EVENT_STREAM_CLOSE_REMOVED: print((Output.color_str('ERROR:', Color.RED, args.no_color)) + " Resource removed") return ExitCode.API_ERROR if event_header.event_name == apiconsts.EVENT_RESOURCE_STATE and \ event_data is not None and event_data.ready: return ExitCode.OK return self.check_failure_events(event_header.event_name, event_data) return None if not ResourceCommands._satellite_not_connected( all_replies) and not args. async: for node_name in args.node_name: watch_result = self._linstor.watch_events( self._linstor.return_if_failure, event_handler, linstor.ObjectIdentifier( node_name=node_name, resource_name=args.resource_definition_name)) if isinstance(watch_result, list): all_replies += watch_result if not self._linstor.all_api_responses_success( watch_result): return self.handle_replies(args, all_replies) elif watch_result != ExitCode.OK: return watch_result return self.handle_replies(args, all_replies) @classmethod def check_failure_events(cls, event_name, event_data): if event_name == apiconsts.EVENT_RESOURCE_DEPLOYMENT_STATE and event_data is not None: api_call_responses = [ linstor.ApiCallResponse(response) for response in event_data.responses ] failure_responses = [ api_call_response for api_call_response in api_call_responses if not api_call_response.is_success() ] return failure_responses if failure_responses else None return None def delete(self, args): if args. async: # execute delete resource and flatten result list replies = [ x for subx in args.node_name for x in self._linstor.resource_delete(subx, args.name) ] return self.handle_replies(args, replies) else: def event_handler(event_header, event_data): if event_header.event_name in [ apiconsts.EVENT_RESOURCE_DEPLOYMENT_STATE ]: if event_header.event_action == apiconsts.EVENT_STREAM_CLOSE_NO_CONNECTION: print( Output.color_str('WARNING:', Color.YELLOW, args.no_color) + " Satellite connection lost") return ExitCode.NO_SATELLITE_CONNECTION if event_header.event_action == apiconsts.EVENT_STREAM_CLOSE_REMOVED: return [ linstor.ApiCallResponse(response) for response in event_data.responses ] return self.check_failure_events(event_header.event_name, event_data) return None all_delete_replies = [] for node in args.node_name: replies = self.get_linstorapi().resource_delete( node, args.name) all_delete_replies += replies if not self._linstor.all_api_responses_success(replies): return self.handle_replies(args, all_delete_replies) watch_result = self.get_linstorapi().watch_events( self._linstor.return_if_failure, event_handler, linstor.ObjectIdentifier(node_name=node, resource_name=args.name)) if isinstance(watch_result, list): all_delete_replies += watch_result if not self._linstor.all_api_responses_success( watch_result): return self.handle_replies(args, all_delete_replies) elif watch_result != ExitCode.OK: return watch_result return self.handle_replies(args, all_delete_replies) @staticmethod def find_rsc_state(rsc_states, rsc_name, node_name): for rscst in rsc_states: if rscst.rsc_name == rsc_name and rscst.node_name == node_name: return rscst return None def show(self, args, lstmsg): rsc_dfns = self._linstor.resource_dfn_list() if isinstance(rsc_dfns[0], linstor.ApiCallResponse): return self.handle_replies(args, rsc_dfns) rsc_dfns = rsc_dfns[0].proto_msg.rsc_dfns rsc_dfn_map = {x.rsc_name: x for x in rsc_dfns} tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in ResourceCommands._resource_headers: tbl.add_header(hdr) tbl.set_groupby(args.groupby if args.groupby else [ResourceCommands._resource_headers[0].name]) for rsc in lstmsg.resources: rsc_dfn = rsc_dfn_map[rsc.name] marked_delete = apiconsts.FLAG_DELETE in rsc.rsc_flags rsc_state_proto = ResourceCommands.find_rsc_state( lstmsg.resource_states, rsc.name, rsc.node_name) rsc_state = tbl.color_cell("Unknown", Color.YELLOW) if marked_delete: rsc_state = tbl.color_cell("DELETING", Color.RED) elif rsc_state_proto: if rsc_state_proto.HasField( 'in_use') and rsc_state_proto.in_use: rsc_state = tbl.color_cell("InUse", Color.GREEN) else: for vlm in rsc.vlms: vlm_state = ResourceCommands.get_volume_state( rsc_state_proto.vlm_states, vlm.vlm_nr) if rsc_state_proto else None state_txt, color = self.volume_state_cell( vlm_state, rsc.rsc_flags, vlm.vlm_flags) rsc_state = tbl.color_cell(state_txt, color) if color is not None: break tbl.add_row( [rsc.name, rsc.node_name, rsc_dfn.rsc_dfn_port, rsc_state]) tbl.show() def list(self, args): lstmsg = self._linstor.resource_list( filter_by_nodes=args.nodes, filter_by_resources=args.resources) return self.output_list(args, lstmsg, self.show) @staticmethod def get_resource_state(res_states, node_name, resource_name): for rsc_state in res_states: if rsc_state.node_name == node_name and rsc_state.rsc_name == resource_name: return rsc_state return None @staticmethod def get_volume_state(volume_states, volume_nr): for volume_state in volume_states: if volume_state.vlm_nr == volume_nr: return volume_state return None @staticmethod def volume_state_cell(vlm_state, rsc_flags, vlm_flags): """ Determains the status of a drbd volume for table display. :param vlm_state: vlm_state proto :param rsc_flags: rsc flags :param vlm_flags: vlm flags :return: A tuple (state_text, color) """ tbl_color = None state_prefix = 'Resizing, ' if apiconsts.FLAG_RESIZE in vlm_flags else '' state = state_prefix + "Unknown" if vlm_state and vlm_state.HasField( "disk_state") and vlm_state.disk_state: disk_state = vlm_state.disk_state if disk_state == 'DUnknown': state = state_prefix + "Unknown" tbl_color = Color.YELLOW elif disk_state == 'Diskless': if apiconsts.FLAG_DISKLESS not in rsc_flags: # unintentional diskless state = state_prefix + disk_state tbl_color = Color.RED else: state = state_prefix + disk_state # green text elif disk_state in ['Inconsistent', 'Failed']: state = state_prefix + disk_state tbl_color = Color.RED elif disk_state in ['UpToDate']: state = state_prefix + disk_state # green text else: state = state_prefix + disk_state tbl_color = Color.YELLOW else: tbl_color = Color.YELLOW return state, tbl_color @classmethod def show_volumes(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_column("Node") tbl.add_column("Resource") tbl.add_column("StoragePool") tbl.add_column("VolumeNr") tbl.add_column("MinorNr") tbl.add_column("DeviceName") tbl.add_column("State", color=Output.color(Color.DARKGREEN, args.no_color), just_txt='>') for rsc in lstmsg.resources: rsc_state = ResourceCommands.get_resource_state( lstmsg.resource_states, rsc.node_name, rsc.name) for vlm in rsc.vlms: vlm_state = ResourceCommands.get_volume_state( rsc_state.vlm_states, vlm.vlm_nr) if rsc_state else None state_txt, color = cls.volume_state_cell( vlm_state, rsc.rsc_flags, vlm.vlm_flags) state = tbl.color_cell(state_txt, color) if color else state_txt tbl.add_row([ rsc.node_name, rsc.name, vlm.stor_pool_name, str(vlm.vlm_nr), str(vlm.vlm_minor_nr), vlm.device_path, state ]) tbl.show() def list_volumes(self, args): lstmsg = self._linstor.volume_list(args.nodes, args.storpools, args.resources) return self.output_list(args, lstmsg, self.show_volumes) @classmethod def _props_list(cls, args, lstmsg): result = [] if lstmsg: for rsc in lstmsg.resources: if rsc.name == args.resource_name and rsc.node_name == args.node_name: result.append(rsc.props) break return result def print_props(self, args): lstmsg = self._linstor.resource_list() return self.output_props_list(args, lstmsg, self._props_list) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.resource_modify(args.node_name, args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies) def drbd_peer_opts(self, args): a = DrbdOptions.filter_new(args) del a['resource-name'] del a['node-a'] del a['node-b'] mod_props, del_props = DrbdOptions.parse_opts(a) replies = self._linstor.resource_conn_modify(args.resource_name, args.node_a, args.node_b, mod_props, del_props) return self.handle_replies(args, replies) @staticmethod def completer_volume(prefix, **kwargs): possible = set() return possible
class BackupCommands(Commands): class Info(object): LONG = "info" SHORT = "i" class Ship(object): LONG = "ship" SHORT = "s" class DeleteById(object): LONG = "id" SHORT = "id" class DeleteByFilter(object): LONG = "filter" SHORT = "f" class DeleteAll(object): LONG = "all" SHORT = "a" class DeleteS3Key(object): LONG = "s3key" SHORT = "s3" _backup_headers = [ linstor_client.TableHeader("Resource"), linstor_client.TableHeader("Snapshot"), linstor_client.TableHeader("Finished at"), linstor_client.TableHeader("Based On"), linstor_client.TableHeader("Status") ] _backup_other_headers = [ linstor_client.TableHeader("S3 Key"), ] def __init__(self): super(BackupCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.List, Commands.Subcommands.Create, Commands.Subcommands.Delete, Commands.Subcommands.Abort, Commands.Subcommands.Restore, BackupCommands.Ship, BackupCommands.Info ] bkp_parser = parser.add_parser( Commands.BACKUP, aliases=['b'], formatter_class=argparse.RawTextHelpFormatter, description="Commands to manage Backups") bkp_sub = bkp_parser.add_subparsers( title="Backup subcommands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # list backup p_lbackups = bkp_sub.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of backups.') p_lbackups.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lbackups.add_argument('-r', '--resource', help='Only show backups for given resource') p_lbackups.add_argument( '-s', '--snapshot', help='Only show backups with the given snapshot name') p_lbackups.add_argument('remote_name', help='Remote name to show backups for') p_lbackups.add_argument( '-o', '--others', action="store_true", help='Only show s3 objects that are unknown to Linstor') p_lbackups.add_argument('-i', '--show-id', action="store_true", help='Include the full ID in the output') p_lbackups.set_defaults(func=self.list_backups) # create backup p_crtbak = bkp_sub.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description="Create a new remote backup.") self._add_remote(p_crtbak) p_crtbak.add_argument("-f", "--full", action="store_true", help="Create a full backup") p_crtbak.add_argument("-n", "--node", help="Node to prefer to upload backup") p_crtbak.add_argument("-s", "--snapshot", help="Name of the local snapshot to create") p_crtbak.add_argument("resource", help="Resource used for the backup" ).completer = self.resource_completer p_crtbak.set_defaults(func=self.create) # delete backup subcmd_delete = [ BackupCommands.DeleteById, BackupCommands.DeleteByFilter, BackupCommands.DeleteAll, BackupCommands.DeleteS3Key ] p_delbak = bkp_sub.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], formatter_class=argparse.RawTextHelpFormatter, description="Delete backup(s) from a remote") p_delbak_subp = p_delbak.add_subparsers( title="Delete backup commands", metavar="", description=Commands.Subcommands.generate_desc(subcmd_delete)) p_delbak_id = p_delbak_subp.add_parser( BackupCommands.DeleteById.LONG, aliases=[BackupCommands.DeleteById.SHORT], description="Delete a remote backup by ID") self._add_remote(p_delbak_id) p_delbak_id.add_argument("id", type=str, help="ID of the backup to delete") p_delbak_id.add_argument( "--prefix", action="store_true", help="Use the ID as a prefix instead of full match") p_delbak_id.set_defaults(func=self.del_by_id) p_delbak_filter = p_delbak_subp.add_parser( BackupCommands.DeleteByFilter.LONG, aliases=[BackupCommands.DeleteByFilter.SHORT], description= "Delete a remote backup by resource name, uploader node name or older than specifc time" ) self._add_remote(p_delbak_filter) p_delbak_filter.add_argument( "-t", "--time", type=str, help= "Delete backups older than specified time. Expected format is YYYYMMDD_HHMMSS" ) p_delbak_filter.add_argument( "-r", "--resource", type=str, help="Delete backups matching the given resource name") p_delbak_filter.add_argument( "-n", "--node", type=str, help="Delete backups uploaded by the given node name") p_delbak_filter.set_defaults(func=self.del_by_filter) p_delbak_all = p_delbak_subp.add_parser( BackupCommands.DeleteAll.LONG, aliases=[BackupCommands.DeleteAll.SHORT], description= "Delete all Linstor backups of the given remote. Will NOT delete non-Linstor S3 objects" ) self._add_remote(p_delbak_all) p_delbak_all.add_argument( "-c", "--cluster", action="store_true", help="Only delete Linstor backups created by the local cluster") p_delbak_all.set_defaults(func=self.del_all) p_delbak_s3 = p_delbak_subp.add_parser( BackupCommands.DeleteS3Key.LONG, aliases=[BackupCommands.DeleteS3Key.SHORT], description= "Delete a given S3 object. Use this option to delete non-Linstor S3 objects" ) self._add_remote(p_delbak_s3) p_delbak_s3.add_argument("s3key", type=str, help="S3 key to delete") self._add_cascading(p_delbak_id, p_delbak_filter) self._add_dry_run(p_delbak_id, p_delbak_filter, p_delbak_all, p_delbak_s3) p_delbak_s3.set_defaults(func=self.del_s3) # restore backup p_rstbak = bkp_sub.add_parser( Commands.Subcommands.Restore.LONG, aliases=[Commands.Subcommands.Restore.SHORT], description= "Restore a backup. Either --id OR --resource must be used (not both)" ) self._add_remote(p_rstbak) p_rstbak.add_argument( "target_node", help="Target node to restore").completer = self.node_completer p_rstbak.add_argument("target_resource", help="Target resource name to restore into") p_rstbak.add_argument( "-r", "--resource", help="Restore the latest backup of the given resource") p_rstbak.add_argument( "-s", "--snapshot", help="Restore the latest backup with the given snapshot name") p_rstbak.add_argument("--id", help="Specific backup to restore") p_rstbak.add_argument( "--passphrase", type=str, nargs='?', action='store', help= "The passphrase of the uploader cluster. Required if the resource to restore has a LUKS layer", const='') p_rstbak.add_argument( "--storpool-rename", nargs='*', help="Rename storage pool names. Format: $oldname=$newname", action=BackupCommands._KeyValue) p_rstbak.add_argument("--download-only", action='store_true', help="Only download backups") p_rstbak.set_defaults(func=self.restore) # abort backup p_crtabort = bkp_sub.add_parser( Commands.Subcommands.Abort.LONG, aliases=[Commands.Subcommands.Abort.SHORT], description= "Aborts a backup. If neither --create nor --restore is given, both will be aborted (if any in " "progress)") self._add_remote(p_crtabort) p_crtabort.add_argument("resource", help="The resource to abort") p_crtabort.add_argument( "-r", "--restore", action="store_true", help="Only abort a restoration of the given resource") p_crtabort.add_argument( "-c", "--create", action="store_true", help="Only abort a creation of the given resource") p_crtabort.set_defaults(func=self.abort) # ship backup p_shipbak = bkp_sub.add_parser( BackupCommands.Ship.LONG, aliases=[BackupCommands.Ship.SHORT], description="Ships a backup to another Linstor cluster") self._add_remote(p_shipbak) p_shipbak.add_argument("source_resource", help="The local resource name to ship") p_shipbak.add_argument( "target_resource", help="The resource name on the target Linstor cluster") p_shipbak.add_argument("--source-node", help="Prefer the given node to send the backup") p_shipbak.add_argument( "--target-node", help= "Specify which node in the target Linstor cluster should receive the backup" ) p_shipbak.add_argument( "--target-net-if", help= "Specify on which Linstor network interface the target node should listen" ) p_shipbak.add_argument( "--target-storage-pool", help= "Specify in which target storage pool the backup should be received" ) p_shipbak.add_argument("--download-only", action='store_true', help="Only download backups") # TODO: add stor_pool_renaming p_shipbak.set_defaults(func=self.ship) # restore backup p_infobak = bkp_sub.add_parser( BackupCommands.Info.LONG, aliases=[BackupCommands.Info.SHORT], description= "Retrieve information about a given backup. Either --id OR --resource must be used (not both)." " Option --storpool-rename must be used in combination with --target-node" ) self._add_remote(p_infobak) p_infobak.add_argument( "-r", "--resource", help="Get info about the latest backup of the given resource") p_infobak.add_argument( "-s", "--snapshot", help="Get info about the last backup with this snapshot name") p_infobak.add_argument( "--id", help="Get info about the given backup to restore") p_infobak.add_argument( "-n", "--target_node", help="Target node to calculate remaining free space" ).completer = self.node_completer p_infobak.add_argument( "--storpool-rename", nargs='*', help="Rename storage pool names. Format: $oldname=$newname", action=BackupCommands._KeyValue) p_infobak.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_infobak.set_defaults(func=self.info) self.check_subcommands(p_delbak_subp, subcmd_delete) self.check_subcommands(bkp_sub, subcmds) def _add_remote(self, parser): parser.add_argument("remote", help="Remote used for deletion of backup(s)" ).completer = self.remote_completer @classmethod def _add_cascading(cls, *parsers): for p in parsers: p.add_argument( "--cascade", "--cascading", action="store_true", help="Also delete backups depending on selected backups") @classmethod def _add_dry_run(cls, *parsers): for p in parsers: p.add_argument( "--dry-run", "--dryrun", action="store_true", help= "Does not delete anything, only shows what would be deleted") @classmethod def show_backups(cls, args, lstmsg): tbl = Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) if args.others: for hdr in cls._backup_other_headers: tbl.add_header(hdr) for entry in lstmsg.other.files: tbl.add_row([entry]) else: backup_hdr = list(cls._backup_headers) for hdr in backup_hdr: tbl.add_header(hdr) if args.show_id: tbl.add_header(linstor_client.TableHeader("Backup Name(ID)")) for backup in lstmsg.linstor: # resource, snapshot, finish time, base, status row = [backup.origin_rsc_name, backup.origin_snap_name] if backup.finished_timestamp: row += [ datetime.fromtimestamp( int(backup.finished_timestamp / 1000)) ] else: row += [""] row += [backup.based_on[0:-5] if backup.based_on else ""] status_text = "Success" status_color = Color.GREEN if backup.shipping: status_text = "Shipping" status_color = Color.YELLOW elif not backup.restorable: status_text = "Not restorable" status_color = Color.RED row += [tbl.color_cell(status_text, status_color)] if args.show_id: row += [backup.id] tbl.add_row(row) tbl.show() def list_backups(self, args): lstmsg = self.get_linstorapi().backup_list( remote_name=args.remote_name, resource_name=args.resource, snap_name=args.snapshot) return self.output_list(args, lstmsg, BackupCommands.show_backups, machine_readable_raw=True) def create(self, args): replies = self.get_linstorapi().backup_create( remote_name=args.remote, resource_name=args.resource, incremental=not args.full, node_name=args.node, snap_name=args.snapshot) return self.handle_replies(args, replies) def del_by_id(self, args): replies = self.get_linstorapi().backup_delete( args.remote, bak_id=args.id if not args.prefix else None, bak_id_prefix=args.id if args.prefix else None, cascade=args.cascade, dryrun=args.dry_run) return self.handle_replies(args, replies) def del_by_filter(self, args): if not args.time and not args.resource and not args.node: args.parser.error( "At least one of --time, --resource or --node has to be used") # raise LinstorArgumentError("At least one of --time, --resource or --node has to be used") replies = self.get_linstorapi().backup_delete( args.remote, timestamp=args.time, resource_name=args.resource, node_name=args.node, cascade=args.cascade, dryrun=args.dry_run) return self.handle_replies(args, replies) def del_all(self, args): replies = self.get_linstorapi().backup_delete( args.remote, all_linstor=True if not args.cluster else None, all_local_cluster=True if args.cluster else None, dryrun=args.dry_run) return self.handle_replies(args, replies) def del_s3(self, args): replies = self.get_linstorapi().backup_delete(args.remote, s3_key=args.s3key, dryrun=args.dry_run) return self.handle_replies(args, replies) def restore(self, args): replies = self.get_linstorapi().backup_restore( args.remote, args.target_node, args.target_resource, resource_name=args.resource, bak_id=args.id, passphrase=self._get_passphrase(args, "Origin clusters passphrase: "), stor_pool_map=args.storpool_rename, download_only=args.download_only, snap_name=args.snapshot, ) return self.handle_replies(args, replies) def abort(self, args): replies = self.get_linstorapi().backup_abort(args.remote, args.resource, restore=args.restore, create=args.create) return self.handle_replies(args, replies) def ship(self, args): replies = self.get_linstorapi().backup_ship( args.remote, args.source_resource, args.target_resource, src_node=args.source_node, dst_node=args.target_node, dst_net_if=args.target_net_if, dst_stor_pool=args.target_storage_pool, download_only=args.download_only) return self.handle_replies(args, replies) @classmethod def _get_passphrase(cls, args, message): if args.passphrase is None: return None elif args.passphrase: return args.passphrase else: return getpass.getpass(message) def info(self, args): lstmsg = self.get_linstorapi().backup_info( args.remote, resource_name=args.resource, bak_id=args.id, target_node=args.target_node, stor_pool_map=args.storpool_rename, snap_name=args.snapshot, ) return self.output_list(args, lstmsg, BackupCommands.show_backups_info, machine_readable_raw=True) @classmethod def show_backups_info(cls, args, lstmsg): rsc_tbl = Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) rsc_tbl.add_column("Resource") rsc_tbl.add_column("Snapshot") rsc_tbl.add_column("Full Backup") rsc_tbl.add_column("Latest Backup") rsc_tbl.add_column("Backup Count") rsc_tbl.add_column("Download Size") rsc_tbl.add_column("Allocated Size") # table will only have a single row row = [ lstmsg.rsc, lstmsg.snap, lstmsg.full, lstmsg.latest, lstmsg.count ] row += [ SizeCalc.approximate_size_string(lstmsg.dl_size), SizeCalc.approximate_size_string(lstmsg.alloc_size) ] rsc_tbl.add_row(row) rsc_tbl.show() stor_pool_tbl = Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) stor_pool_tbl.add_column("Origin StorPool (Type)") if args.target_node: stor_pool_tbl.add_column("Target Pool") stor_pool_tbl.add_column("Remaining Free Space") stor_pool_tbl.add_column("Volume to Download") stor_pool_tbl.add_column("Type") stor_pool_tbl.add_column("Download Size") stor_pool_tbl.add_column("Allocated Size") stor_pool_tbl.add_column("Usable Size") for stor_pool in lstmsg.storpools: row = [stor_pool.name + " (" + stor_pool.provider_kind + ")"] if args.target_node: row += [ stor_pool.target_name, ] if stor_pool.remaining_space < 0: row += [ stor_pool_tbl.color_cell( "-" + SizeCalc.approximate_size_string( -stor_pool.remaining_space), Color.RED) ] else: row += [ SizeCalc.approximate_size_string( stor_pool.remaining_space) ] vlm_to_dl_cell = [] type_cell = [] dl_size_cell = [] alloc_size_cell = [] usable_size_cell = [] for volume in stor_pool.volumes: vlm_to_dl_cell += [volume.name if volume.name else "-"] type_cell += [volume.layer_type] dl_size_cell += [ SizeCalc.approximate_size_string(volume.dl_size) if volume.dl_size else "-" ] alloc_size_cell += [ SizeCalc.approximate_size_string(volume.alloc_size) if volume.alloc_size else "-" ] usable_size_cell += [ SizeCalc.approximate_size_string(volume.usable_size) if volume.usable_size else "-" ] row += [ "\n".join(vlm_to_dl_cell), "\n".join(type_cell), "\n".join(dl_size_cell), "\n".join(alloc_size_cell), "\n".join(usable_size_cell) ] stor_pool_tbl.add_row(row) stor_pool_tbl.show() # create a keyvalue class class _KeyValue(argparse.Action): # Constructor calling def __call__(self, parser, namespace, values, option_string=None): setattr(namespace, self.dest, dict()) for value in values: # split it into key and value key, value = value.split('=') # assign into dictionary getattr(namespace, self.dest)[key] = value
class SnapshotCommands(Commands): _shipping_headers = [ linstor_client.TableHeader("ResName"), linstor_client.TableHeader("SnapName"), linstor_client.TableHeader("FromNode"), linstor_client.TableHeader("ToNode"), linstor_client.TableHeader( "Status", Color.DARKGREEN, alignment_text=linstor_client.TableHeader.ALIGN_RIGHT) ] def __init__(self): super(SnapshotCommands, self).__init__() def setup_commands(self, parser): subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.Rollback, Commands.Subcommands.Resource, Commands.Subcommands.VolumeDefinition, Commands.Subcommands.Ship, Commands.Subcommands.ShipList ] # Snapshot subcommands snapshot_parser = parser.add_parser( Commands.SNAPSHOT, aliases=["s"], formatter_class=argparse.RawTextHelpFormatter, description="Snapshot subcommands") snapshot_subp = snapshot_parser.add_subparsers( title="shapshot commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) # new snapshot p_new_snapshot = snapshot_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], description='Creates a snapshot of a resource.') p_new_snapshot.add_argument('--async', action='store_true', help='Deprecated, kept for compatibility') p_new_snapshot.add_argument( 'node_name', type=str, nargs='*', help='Names of the nodes where the snapshot should be created. ' 'If none are given, the snapshot will be taken on all nodes ' 'where the given resources is present.' ).completer = self.node_completer p_new_snapshot.add_argument('resource_definition_name', type=str, help='Name of the resource definition' ).completer = self.resource_dfn_completer p_new_snapshot.add_argument( 'snapshot_name', type=str, help='Name of the snapshot local to the resource definition') p_new_snapshot.set_defaults(func=self.create) # delete snapshot p_delete_snapshot = snapshot_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description='Deletes a snapshot.') p_delete_snapshot.add_argument( 'resource_definition_name', type=str, help='Name of the resource definition' ).completer = self.resource_dfn_completer p_delete_snapshot.add_argument( 'snapshot_name', type=str, help='Name of the snapshot local to the resource definition') p_delete_snapshot.add_argument( '-n', '--nodes', type=str, nargs='+', help= 'Only delete the snapshot from the given nodes. Default: Delete given snapshot from all nodes' ) p_delete_snapshot.set_defaults(func=self.delete) p_ship = snapshot_subp.add_parser( Commands.Subcommands.Ship.LONG, aliases=[Commands.Subcommands.Ship.SHORT], description='Ship a snapshot to another node.') p_ship.add_argument( '--from-node', required=True, type=str, help='Source node name').completer = self.node_completer p_ship.add_argument( '--to-node', required=True, type=str, help='Destination node name').completer = self.node_completer p_ship.add_argument('--resource', required=True, type=str, help='Name of the resource to ship' ).completer = self.resource_dfn_completer p_ship.set_defaults(func=self.ship) p_ship_list = snapshot_subp.add_parser( Commands.Subcommands.ShipList.LONG, aliases=[Commands.Subcommands.ShipList.SHORT], description='List overview over snapshot shippings') p_ship_list.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_ship_list.add_argument('-r', '--resources', nargs='+', type=str, help='Filter by list of resources' ).completer = self.resource_completer p_ship_list.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_ship_list.add_argument( '-s', '--snapshots', nargs='+', type=str, help='Filter by list of snapshots').completer = self.node_completer p_ship_list.add_argument( '--status', nargs='+', choices=[x.value.lower() for x in consts.SnapshotShipStatus], type=str.lower, help='Filter by list of statuses').completer = self.node_completer p_ship_list.set_defaults(func=self.shiplist) # roll back to snapshot p_rollback_snapshot = snapshot_subp.add_parser( Commands.Subcommands.Rollback.LONG, aliases=[Commands.Subcommands.Rollback.SHORT], description='Rolls resource data back to snapshot state. ' 'The resource must not be in use. ' 'The snapshot will not be removed and can be used for subsequent rollbacks. ' 'Only the most recent snapshot may be used; ' 'to roll back to an earlier snapshot, the intermediate snapshots must first be deleted.' ) p_rollback_snapshot.add_argument( 'resource_definition_name', type=str, help='Name of the resource definition' ).completer = self.resource_dfn_completer p_rollback_snapshot.add_argument( 'snapshot_name', type=str, help='Name of the snapshot local to the resource definition') p_rollback_snapshot.set_defaults(func=self.rollback) # list snapshot definitions p_lsnapshots = snapshot_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description=' Prints a list of all snapshots known to linstor. ' 'By default, the list is printed as a human readable table.') p_lsnapshots.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lsnapshots.add_argument('-r', '--resources', nargs='+', type=str, help='Filter by list of resources' ).completer = self.resource_completer p_lsnapshots.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lsnapshots.set_defaults(func=self.list) # volume definition commands volume_definition_subcmds = [Commands.Subcommands.Restore] volume_definition_parser = snapshot_subp.add_parser( Commands.Subcommands.VolumeDefinition.LONG, formatter_class=argparse.RawTextHelpFormatter, aliases=[Commands.Subcommands.VolumeDefinition.SHORT], description="%s subcommands" % Commands.Subcommands.VolumeDefinition.LONG) volume_definition_subp = volume_definition_parser.add_subparsers( title="%s subcommands" % Commands.Subcommands.VolumeDefinition.LONG, metavar="", description=Commands.Subcommands.generate_desc( volume_definition_subcmds)) # restore resource from snapshot p_restore_volume_definition = volume_definition_subp.add_parser( Commands.Subcommands.Restore.LONG, aliases=[Commands.Subcommands.Restore.SHORT], description='Creates volume definitions from a snapshot. ' 'Only the basic structure is restored, that is volume numbers and sizes. ' 'Additional configuration such as properties is not restored.') p_restore_volume_definition.add_argument( '--from-resource', '--fr', required=True, type=str, help='Name of the resource definition containing the snapshot' ).completer = self.resource_dfn_completer p_restore_volume_definition.add_argument( '--from-snapshot', '--fs', required=True, type=str, help='Name of the snapshot to restore from') p_restore_volume_definition.add_argument( '--to-resource', '--tr', required=True, type=str, help= 'Name of the resource definition in which to create the volume definitions' ).completer = self.resource_dfn_completer p_restore_volume_definition.set_defaults( func=self.restore_volume_definition) # resource commands resource_subcmds = [Commands.Subcommands.Restore] resource_parser = snapshot_subp.add_parser( Commands.Subcommands.Resource.LONG, formatter_class=argparse.RawTextHelpFormatter, aliases=[Commands.Subcommands.Resource.SHORT], description="%s subcommands" % Commands.Subcommands.Resource.LONG) resource_subp = resource_parser.add_subparsers( title="%s subcommands" % Commands.Subcommands.Resource.LONG, metavar="", description=Commands.Subcommands.generate_desc(resource_subcmds)) # restore resource from snapshot p_restore_snapshot = resource_subp.add_parser( Commands.Subcommands.Restore.LONG, aliases=[Commands.Subcommands.Restore.SHORT], description='Restores a snapshot on a node. ' 'Creates a new resource initialized with the data from a given snapshot. ' 'The volume definitions of the target resource must match those from the snapshot.' ) p_restore_snapshot.add_argument( 'node_name', type=str, nargs='*', help='Names of the nodes where the snapshot should be restored. ' 'If none are given, resources will be created on all nodes where the snapshot is present.' ).completer = self.node_completer p_restore_snapshot.add_argument( '--from-resource', '--fr', required=True, type=str, help='Name of the resource definition containing the snapshot' ).completer = self.resource_dfn_completer p_restore_snapshot.add_argument( '--from-snapshot', '--fs', required=True, type=str, help='Name of the snapshot to restore from') p_restore_snapshot.add_argument( '--to-resource', '--tr', required=True, type=str, help= 'Name of the resource definition in which to create the resource from this snapshot' ).completer = self.resource_dfn_completer p_restore_snapshot.set_defaults(func=self.restore) self.check_subcommands(snapshot_subp, subcmds) def create(self, args): async_flag = vars(args)["async"] replies = self._linstor.snapshot_create(args.node_name, args.resource_definition_name, args.snapshot_name, async_flag) return self.handle_replies(args, replies) def restore_volume_definition(self, args): replies = self._linstor.snapshot_volume_definition_restore( args.from_resource, args.from_snapshot, args.to_resource) return self.handle_replies(args, replies) def restore(self, args): replies = self._linstor.snapshot_resource_restore( args.node_name, args.from_resource, args.from_snapshot, args.to_resource) return self.handle_replies(args, replies) def delete(self, args): replies = self._linstor.snapshot_delete(args.resource_definition_name, args.snapshot_name, args.nodes) return self.handle_replies(args, replies) def rollback(self, args): replies = self._linstor.snapshot_rollback( args.resource_definition_name, args.snapshot_name) return self.handle_replies(args, replies) @classmethod def show(cls, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_column("ResourceName") tbl.add_column("SnapshotName") tbl.add_column("NodeNames") tbl.add_column("Volumes") tbl.add_column("CreatedOn") tbl.add_column("State", color=Output.color(Color.DARKGREEN, args.no_color)) for snapshot_dfn in lstmsg.snapshots: if FLAG_DELETE in snapshot_dfn.flags: state_cell = tbl.color_cell("DELETING", Color.RED) elif FLAG_FAILED_DEPLOYMENT in snapshot_dfn.flags: state_cell = tbl.color_cell("Failed", Color.RED) elif FLAG_FAILED_DISCONNECT in snapshot_dfn.flags: state_cell = tbl.color_cell("Satellite disconnected", Color.RED) elif FLAG_SUCCESSFUL in snapshot_dfn.flags: in_backup_restore = False in_backup_create = False if FLAG_BACKUP in snapshot_dfn.flags and FLAG_SHIPPING in snapshot_dfn.flags: for snap in snapshot_dfn.snapshots: in_backup_create |= FLAG_BACKUP_SOURCE in snap.flags in_backup_restore |= FLAG_BACKUP_TARGET in snap.flags if in_backup_create: state_cell = tbl.color_cell("Shipping", Color.YELLOW) elif in_backup_restore: state_cell = tbl.color_cell("Restoring", Color.YELLOW) else: state_cell = tbl.color_cell("Successful", Color.DARKGREEN) else: state_cell = tbl.color_cell("Incomplete", Color.DARKBLUE) snapshot_date = "" if snapshot_dfn.snapshots and snapshot_dfn.snapshots[ 0].create_datetime: snapshot_date = str( snapshot_dfn.snapshots[0].create_datetime)[:19] tbl.add_row([ snapshot_dfn.resource_name, snapshot_dfn.name, ", ".join([node_name for node_name in snapshot_dfn.nodes]), ", ".join([ str(snapshot_vlm_dfn.number) + ": " + SizeCalc.approximate_size_string(snapshot_vlm_dfn.size) for snapshot_vlm_dfn in snapshot_dfn.snapshot_volume_definitions ]), snapshot_date, state_cell ]) tbl.show() def list(self, args): lstmsg = self._linstor.snapshot_dfn_list( filter_by_nodes=args.nodes, filter_by_resources=args.resources) return self.output_list(args, lstmsg, self.show) def ship(self, args): replies = self.get_linstorapi().snapshot_ship(rsc_name=args.resource, from_node=args.from_node, to_node=args.to_node) return self.handle_replies(args, replies) def show_ship_list(self, args, shipping_resp): """ :param args: :param shipping_resp: ShippingResponse :return: """ tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) tbl.add_headers(self._shipping_headers) for shipping in shipping_resp.shippings: tbl.add_row([ shipping.snapshot_dfn.resource_name, shipping.snapshot_dfn.snapshot_name, shipping.from_node_name, shipping.to_node_name, tbl.color_cell( shipping.status.value, None if shipping.status == consts.SnapshotShipStatus.COMPLETE else Color.YELLOW) ]) tbl.show() def shiplist(self, args): lstmsg = self.get_linstorapi().snapshot_shipping_list( filter_by_nodes=args.nodes, filter_by_resources=args.resources, filter_by_snapshots=args.snapshots, filter_by_status=args.status) return self.output_list(args, lstmsg, self.show_ship_list)
class StoragePoolCommands(Commands): class Lvm(object): LONG = "lvm" SHORT = "lvm" class LvmThin(object): LONG = "lvmthin" SHORT = "lvmthin" class Zfs(object): LONG = "zfs" SHORT = "zfs" class ZfsThin(object): LONG = "zfsthin" SHORT = "zfsthin" class SwordfishTarget(object): LONG = "swordfish_target" SHORT = "sft" class SwordfishInitiator(object): LONG = "swordfish_initiator" SHORT = "sfi" class Diskless(object): LONG = "diskless" SHORT = "diskless" class File(object): LONG = "file" SHORT = "file" class FileThin(object): LONG = "filethin" SHORT = "filethin" class SPDK(object): LONG = "spdk" SHORT = "spdk" _stor_pool_headers = [ linstor_client.TableHeader("StoragePool"), linstor_client.TableHeader("Node"), linstor_client.TableHeader("Driver"), linstor_client.TableHeader("PoolName"), linstor_client.TableHeader( "FreeCapacity", alignment_text=linstor_client.TableHeader.ALIGN_RIGHT), linstor_client.TableHeader( "TotalCapacity", alignment_text=linstor_client.TableHeader.ALIGN_RIGHT), linstor_client.TableHeader("SupportsSnapshots"), linstor_client.TableHeader("State") ] def __init__(self): super(StoragePoolCommands, self).__init__() @classmethod def _create_pool_args(cls, parser, shared_space=True): parser.add_argument('node_name', type=str, help='Name of the node for the new storage pool' ).completer = cls.node_completer parser.add_argument('name', type=str, help='Name of the new storage pool') if shared_space: parser.add_argument('--shared-space', type=str, help='Name of used shared space') def setup_commands(self, parser): # Storage pool subcommands subcmds = [ Commands.Subcommands.Create, Commands.Subcommands.List, Commands.Subcommands.Delete, Commands.Subcommands.SetProperty, Commands.Subcommands.ListProperties ] sp_parser = parser.add_parser( Commands.STORAGE_POOL, aliases=["sp"], formatter_class=argparse.RawTextHelpFormatter, description="Storage pool subcommands") sp_subp = sp_parser.add_subparsers( title="Storage pool commands", metavar="", description=Commands.Subcommands.generate_desc(subcmds)) subcmd_create = [ StoragePoolCommands.Lvm, StoragePoolCommands.LvmThin, StoragePoolCommands.Zfs, StoragePoolCommands.ZfsThin, StoragePoolCommands.Diskless, StoragePoolCommands.File, StoragePoolCommands.FileThin, StoragePoolCommands.SwordfishTarget, StoragePoolCommands.SwordfishInitiator, StoragePoolCommands.SPDK ] sp_c_parser = sp_subp.add_parser( Commands.Subcommands.Create.LONG, aliases=[Commands.Subcommands.Create.SHORT], formatter_class=argparse.RawTextHelpFormatter, description='Defines a Linstor storage pool for use with Linstor.') create_subp = sp_c_parser.add_subparsers( title="Storage pool create commands", metavar="{" + ",".join([x.LONG for x in subcmd_create]) + "}", description=Commands.Subcommands.generate_desc(subcmd_create)) p_new_lvm_pool = create_subp.add_parser( StoragePoolCommands.Lvm.LONG, aliases=[StoragePoolCommands.Lvm.SHORT], description='Create a lvm storage pool') self._create_pool_args(p_new_lvm_pool) p_new_lvm_pool.add_argument('driver_pool_name', type=str, help='The Lvm volume group to use.') p_new_lvm_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.LVM) p_new_spdk_pool = create_subp.add_parser( StoragePoolCommands.SPDK.LONG, aliases=[StoragePoolCommands.SPDK.SHORT], description='Create a spdk storage pool') self._create_pool_args(p_new_spdk_pool) p_new_spdk_pool.add_argument('driver_pool_name', type=str, help='The Spdk volume group to use.') p_new_spdk_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.SPDK) p_new_lvm_thin_pool = create_subp.add_parser( StoragePoolCommands.LvmThin.LONG, aliases=[StoragePoolCommands.LvmThin.SHORT], description='Create a lvm thin storage pool') self._create_pool_args(p_new_lvm_thin_pool) p_new_lvm_thin_pool.add_argument( 'driver_pool_name', type=str, help= 'The LvmThin volume group to use. The full name of the thin pool, namely VG/LV' ) p_new_lvm_thin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.LVMThin) p_new_zfs_pool = create_subp.add_parser( StoragePoolCommands.Zfs.LONG, aliases=[StoragePoolCommands.Zfs.SHORT], description='Create a zfs storage pool') self._create_pool_args(p_new_zfs_pool) p_new_zfs_pool.add_argument('driver_pool_name', type=str, help='The name of the zpool to use.') p_new_zfs_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.ZFS) p_new_zfsthin_pool = create_subp.add_parser( StoragePoolCommands.ZfsThin.LONG, aliases=[StoragePoolCommands.ZfsThin.SHORT], description='Create a zfs storage pool') self._create_pool_args(p_new_zfsthin_pool) p_new_zfsthin_pool.add_argument('driver_pool_name', type=str, help='The name of the zpool to use.') p_new_zfsthin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.ZFSThin) p_new_diskless_pool = create_subp.add_parser( StoragePoolCommands.Diskless.LONG, aliases=[StoragePoolCommands.Diskless.SHORT], description='Create a diskless pool') self._create_pool_args(p_new_diskless_pool, shared_space=False) p_new_diskless_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.Diskless, driver_pool_name=None) p_new_file_pool = create_subp.add_parser( StoragePoolCommands.File.LONG, aliases=[StoragePoolCommands.File.SHORT], description='Create a file storage pool') self._create_pool_args(p_new_file_pool) p_new_file_pool.add_argument('driver_pool_name', type=str, help='The directory to use.') p_new_file_pool.set_defaults(func=self.create, driver=linstor.StoragePoolDriver.FILE) p_new_file_thin_pool = create_subp.add_parser( StoragePoolCommands.FileThin.LONG, aliases=[StoragePoolCommands.FileThin.SHORT], description='Create a file thin storage pool') self._create_pool_args(p_new_file_thin_pool) p_new_file_thin_pool.add_argument('driver_pool_name', type=str, help='The directory to use.') p_new_file_thin_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.FILEThin) p_new_swordfish_target_pool = create_subp.add_parser( StoragePoolCommands.SwordfishTarget.LONG, aliases=[StoragePoolCommands.SwordfishTarget.SHORT], description='Create a swordfish target') self._create_pool_args(p_new_swordfish_target_pool) p_new_swordfish_target_pool.add_argument('swordfish_storage_pool', type=str, help="Swordfish storage pool") p_new_swordfish_target_pool.set_defaults( func=self.create_swordfish, driver=linstor.StoragePoolDriver.SwordfishTarget) p_new_swordfish_initiator_pool = create_subp.add_parser( StoragePoolCommands.SwordfishInitiator.LONG, aliases=[StoragePoolCommands.SwordfishInitiator.SHORT], description='Create a swordfish initiator') self._create_pool_args(p_new_swordfish_initiator_pool) p_new_swordfish_initiator_pool.set_defaults( func=self.create, driver=linstor.StoragePoolDriver.SwordfishInitiator, driver_pool_name=None) # END CREATE SUBCMDS # remove-storpool p_rm_storpool = sp_subp.add_parser( Commands.Subcommands.Delete.LONG, aliases=[Commands.Subcommands.Delete.SHORT], description=' Removes a storage pool ') p_rm_storpool.add_argument( '-q', '--quiet', action="store_true", help= 'Unless this option is used, linstor will issue a safety question ' 'that must be answered with yes, otherwise the operation is canceled.' ) p_rm_storpool.add_argument( 'node_name', nargs="+", help='Name of the Node where the storage pool exists.' ).completer = self.node_completer p_rm_storpool.add_argument('name', help='Name of the storage pool to delete' ).completer = self.storage_pool_completer p_rm_storpool.set_defaults(func=self.delete) # list storpool storpoolgroupby = [x.name for x in self._stor_pool_headers] storpool_group_completer = Commands.show_group_completer( storpoolgroupby, "groupby") p_lstorpool = sp_subp.add_parser( Commands.Subcommands.List.LONG, aliases=[Commands.Subcommands.List.SHORT], description='Prints a list of all storage pool known to ' 'linstor. By default, the list is printed as a human readable table.' ) p_lstorpool.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_lstorpool.add_argument( '-g', '--groupby', nargs='+', choices=storpoolgroupby).completer = storpool_group_completer p_lstorpool.add_argument('-s', '--storage-pools', nargs='+', type=str, help='Filter by list of storage pools' ).completer = self.storage_pool_completer p_lstorpool.add_argument( '-n', '--nodes', nargs='+', type=str, help='Filter by list of nodes').completer = self.node_completer p_lstorpool.set_defaults(func=self.list) # show properties p_sp = sp_subp.add_parser( Commands.Subcommands.ListProperties.LONG, aliases=[Commands.Subcommands.ListProperties.SHORT], description="Prints all properties of the given storage pool.") p_sp.add_argument('-p', '--pastable', action="store_true", help='Generate pastable output') p_sp.add_argument('node_name', type=str, help='Name of the node for the storage pool' ).completer = self.node_completer p_sp.add_argument('storage_pool_name', help="Storage pool for which to print the properties" ).completer = self.storage_pool_completer p_sp.set_defaults(func=self.print_props) # set properties p_setprop = sp_subp.add_parser( Commands.Subcommands.SetProperty.LONG, aliases=[Commands.Subcommands.SetProperty.SHORT], formatter_class=argparse.RawTextHelpFormatter, description= 'Sets properties for the given storage pool on the given node.') p_setprop.add_argument('node_name', type=str, help='Name of the node for the storage pool' ).completer = self.node_completer p_setprop.add_argument('name', type=str, help='Name of the storage pool' ).completer = self.storage_pool_completer Commands.add_parser_keyvalue(p_setprop, 'storagepool') p_setprop.set_defaults(func=self.set_props) self.check_subcommands(create_subp, subcmd_create) self.check_subcommands(sp_subp, subcmds) def create(self, args): try: shrd_space = None if args.driver == linstor.StoragePoolDriver.Diskless else args.shared_space replies = self.get_linstorapi().storage_pool_create( args.node_name, args.name, args.driver, args.driver_pool_name, shared_space=shrd_space) except linstor.LinstorError as e: raise ArgumentError(e.message) return self.handle_replies(args, replies) def create_swordfish(self, args): prefix_key = linstor.consts.NAMESPC_STORAGE_DRIVER + '/' properties = { prefix_key + linstor.consts.KEY_STOR_POOL_SF_STOR_POOL: args.swordfish_storage_pool } try: replies = self.get_linstorapi().storage_pool_create( args.node_name, args.name, args.driver, None, shared_space=args.shared_space, property_dict=properties) except linstor.LinstorError as e: raise ArgumentError(e.message) return self.handle_replies(args, replies) def delete(self, args): # execute delete storpooldfns and flatten result list replies = [ x for subx in args.node_name for x in self._linstor.storage_pool_delete(subx, args.name) ] return self.handle_replies(args, replies) def show(self, args, lstmsg): tbl = linstor_client.Table(utf8=not args.no_utf8, colors=not args.no_color, pastable=args.pastable) for hdr in self._stor_pool_headers: tbl.add_header(hdr) storage_pool_resp = lstmsg # type: StoragePoolListResponse tbl.set_groupby(args.groupby if args. groupby else [self._stor_pool_headers[0].name]) errors = [] for storpool in storage_pool_resp.storage_pools: driver_device = linstor.StoragePoolDriver.storage_props_to_driver_pool( storpool.provider_kind, storpool.properties) free_capacity = "" total_capacity = "" if not storpool.is_diskless() and storpool.free_space is not None: free_capacity = SizeCalc.approximate_size_string( storpool.free_space.free_capacity) total_capacity = SizeCalc.approximate_size_string( storpool.free_space.total_capacity) for error in storpool.reports: if error not in errors: errors.append(error) state_str, state_color = self.get_replies_state(storpool.reports) tbl.add_row([ storpool.name, storpool.node_name, storpool.provider_kind, driver_device, free_capacity, total_capacity, storpool.supports_snapshots(), tbl.color_cell(state_str, state_color) ]) tbl.show() for err in errors: Output.handle_ret(err, warn_as_error=args.warn_as_error, no_color=args.no_color) def list(self, args): lstmsg = self._linstor.storage_pool_list(args.nodes, args.storage_pools) return self.output_list(args, lstmsg, self.show) @classmethod def _props_show(cls, args, lstmsg): result = [] if lstmsg: response = lstmsg # type: StoragePoolListResponse for stor_pool in response.storage_pools: result.append(stor_pool.properties) return result def print_props(self, args): lstmsg = self._linstor.storage_pool_list([args.node_name], [args.storage_pool_name]) return self.output_props_list(args, lstmsg, self._props_show) def set_props(self, args): args = self._attach_aux_prop(args) mod_prop_dict = Commands.parse_key_value_pairs( [args.key + '=' + args.value]) replies = self._linstor.storage_pool_modify(args.node_name, args.name, mod_prop_dict['pairs'], mod_prop_dict['delete']) return self.handle_replies(args, replies)