def ui_command_create(self, name, file_or_dev, size=None, generate_wwn=None, buffered=None, sparse=None): ''' Creates a FileIO storage object. If I{file_or_dev} is a path to a regular file to be used as backend, then the I{size} parameter is mandatory. Else, if I{file_or_dev} is a path to a block device, the size parameter B{must} be ommited. If present, I{size} is the size of the file to be used, I{file} the path to the file or I{dev} the path to a block device. The optional I{generate_wwn} parameter is a boolean specifying whether or not we should generate a T10 wwn Serial for the unit (by default, yes). The I{buffered} parameter is a boolean stating whether or not to enable buffered mode. It is enabled by default (asynchronous mode). The I{sparse} parameter is only applicable when creating a new backing file. It is a boolean stating if the created file should be created as a sparse file (the default), or fully initialized. SIZE SYNTAX =========== - If size is an int, it represents a number of bytes. - If size is a string, the following units can be used: - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) ''' self.assert_root() self.assert_available_so_name(name) self.shell.log.debug("Using params size=%s generate_wwn=%s buffered=%s" " sparse=%s" % (size, generate_wwn, buffered, sparse)) sparse = self.ui_eval_param(sparse, 'bool', True) backstore = FileIOBackstore(self.next_hba_index(), mode='create') is_dev = get_block_type(file_or_dev) is not None \ or is_disk_partition(file_or_dev) if size is None and is_dev: backstore = FileIOBackstore(self.next_hba_index(), mode='create') try: so = FileIOStorageObject( backstore, name, file_or_dev, gen_wwn=self.prm_gen_wwn(generate_wwn), buffered_mode=self.prm_buffered(buffered)) except Exception, exception: backstore.delete() raise exception self.shell.log.info("Created fileio %s with size %s." % (name, size)) self.shell.log.info("Note: block backstore preferred for " " best results.") ui_so = UIStorageObject(so, self) return self.new_node(ui_so)
def ui_command_create(self, name, file_or_dev, size=None, buffered=None, sparse=None): ''' Creates a FileIO storage object. If I{file_or_dev} is a path to a regular file to be used as backend, then the I{size} parameter is mandatory. Else, if I{file_or_dev} is a path to a block device, the size parameter B{must} be ommited. If present, I{size} is the size of the file to be used, I{file} the path to the file or I{dev} the path to a block device. The I{buffered} parameter is a boolean stating whether or not to enable buffered mode. It is enabled by default (asynchronous mode). The I{sparse} parameter is only applicable when creating a new backing file. It is a boolean stating if the created file should be created as a sparse file (the default), or fully initialized. SIZE SYNTAX =========== - If size is an int, it represents a number of bytes. - If size is a string, the following units can be used: - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) ''' self.assert_root() self.assert_available_so_name(name) self.shell.log.debug("Using params size=%s buffered=%s" " sparse=%s" % (size, buffered, sparse)) sparse = self.ui_eval_param(sparse, 'bool', True) backstore = FileIOBackstore(self.next_hba_index(), mode='create') is_dev = get_block_type(file_or_dev) is not None \ or is_disk_partition(file_or_dev) if size is None and is_dev: backstore = FileIOBackstore(self.next_hba_index(), mode='create') try: so = FileIOStorageObject( backstore, name, file_or_dev, buffered_mode=self.prm_buffered(buffered)) except Exception, exception: backstore.delete() raise exception self.shell.log.info("Created fileio %s with size %s." % (name, size)) self.shell.log.info("Note: block backstore preferred for " " best results.") ui_so = UIStorageObject(so, self) return self.new_node(ui_so)
def apply_create_obj(obj): ''' Creates an object on the live system. ''' # TODO Factorize this when stable, merging it with update and delete, # leveraging rtslib 'any' mode (create if not exist) # TODO storage root = get_root() log.debug("apply_create(%s)" % obj.data) if obj.key[0] == 'mapped_lun': acl = obj.parent if acl.parent.key[0] == 'tpgt': tpg = acl.parent target = tpg.parent else: tpg = None target = acl.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') node_wwn = acl.key[1] lio_acl = NodeACL(lio_tpg, node_wwn, mode='lookup') mlun = int(obj.key[1]) write_protect = obj_attr(obj, "write_protect") tpg_lun = int(obj_attr(obj, "target_lun").rpartition(' ')[2]) lio_mlun = MappedLUN(lio_acl, mlun, tpg_lun, write_protect) apply_group_attrs(obj, lio_mlun) elif obj.key[0] == 'acl': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') node_wwn = obj.key[1] lio_acl = NodeACL(lio_tpg, node_wwn) apply_group_attrs(obj, lio_acl) elif obj.key[0] == 'portal': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') (address, _, port) = obj.key[1].partition(':') port = int(port) lio_portal = NetworkPortal(lio_tpg, address, port) apply_group_attrs(obj, lio_portal) elif obj.key[0] == 'lun': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') lun = int(obj.key[1]) (plugin, name) = obj_attr(obj, "backend") # TODO move that to a separate function, use for disk too matching_lio_so = [ so for so in root.storage_objects if so.backstore.plugin == plugin and so.name == name ] if len(matching_lio_so) > 1: raise ConfigError("Detected unsupported configfs storage objects " "allocation schema for storage object '%s'" % obj.path_str) elif len(matching_lio_so) == 0: raise ConfigError( "Could not find storage object '%s %s' for '%s'" % (plugin, name, obj.path_str)) else: lio_so = matching_lio_so[0] lio_lun = LUN(lio_tpg, lun, lio_so) apply_group_attrs(obj, lio_lun) elif obj.key[0] == 'tpgt': target = obj.parent fabric = target.parent has_enable = len(obj.search([("enable", ".*")])) != 0 if has_enable: enable = obj_attr(obj, "enable") lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') tpgt = int(obj.key[1]) try: nexus_wwn = obj_attr(obj, "nexus_wwn") lio_tpg = TPG(lio_target, tpgt, nexus_wwn=nexus_wwn) except ConfigError: lio_tpg = TPG(lio_target, tpgt) if has_enable: lio_tpg.enable = enable apply_group_attrs(obj, lio_tpg) elif obj.key[0] == 'target': fabric = obj.parent wwn = obj.key[1] lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=wwn) apply_group_attrs(obj, lio_target) if not lio_target.has_feature("tpgts"): try: nexus_wwn = obj_attr(obj, "nexus_wwn") lio_tpg = TPG(lio_target, 1, nexus_wwn=nexus_wwn) except ConfigError: lio_tpg = TPG(lio_target, 1) if len(obj.search([("enable", ".*")])) != 0: lio_tpg.enable = True elif obj.key[0] == 'fabric': lio_fabric = FabricModule(obj.key[1]) apply_group_attrs(obj, lio_fabric) elif obj.key[0] == 'disk': plugin = obj.parent.key[1] name = obj.key[1] idx = max([0] + [b.index for b in root.backstores if b.plugin == plugin]) + 1 if plugin == 'fileio': dev = obj_attr(obj, "path") size = obj_attr(obj, "size") try: wwn = obj_attr(obj, "wwn") except ConfigError: wwn = None buffered = obj_attr(obj, "buffered") lio_bs = FileIOBackstore(idx) lio_so = lio_bs.storage_object(name, dev, size, wwn, buffered) apply_group_attrs(obj, lio_so) elif plugin == 'iblock': # TODO Add policy for iblock lio_bs = IBlockBackstore(idx) dev = obj_attr(obj, "path") wwn = obj_attr(obj, "wwn") lio_so = lio_bs.storage_object(name, dev, wwn) apply_group_attrs(obj, lio_so) elif plugin == 'pscsi': # TODO Add policy for pscsi lio_bs = PSCSIBackstore(idx) dev = obj_attr(obj, "path") lio_so = lio_bs.storage_object(name, dev) apply_group_attrs(obj, lio_so) elif plugin == 'rd_mcp': # TODO Add policy for rd_mcp lio_bs = RDMCPBackstore(idx) size = obj_attr(obj, "size") wwn = obj_attr(obj, "wwn") nullio = obj_attr(obj, "nullio") lio_so = lio_bs.storage_object(name, size, wwn, nullio) apply_group_attrs(obj, lio_so) else: raise ConfigError("Unknown backend '%s' for backstore '%s'" % (plugin, obj)) matching_lio_so = [ so for so in root.storage_objects if so.backstore.plugin == plugin and so.name == name ] if len(matching_lio_so) > 1: raise ConfigError("Detected unsupported configfs storage objects " "allocation schema for '%s'" % obj.path_str) elif len(matching_lio_so) == 0: raise ConfigError("Could not find backstore '%s'" % obj.path_str) else: lio_so = matching_lio_so[0]
def ui_command_create(self, backstore_plugin): ''' Creates a new backstore, using the chosen I{backstore_plugin}. More than one backstores using the same I{backstore_plugin} can co-exist. They will be identified by incremental index numbers, starting from 0. AVAILABLE BACKSTORE PLUGINS =========================== B{iblock} --------- This I{backstore_plugin} provides I{SPC-4}, along with I{ALUA} and I{Persistent Reservations} emulation on top of Linux BLOCK devices: B{any block device} that appears in /sys/block. B{pscsi} -------- Provides pass-through for Linux physical SCSI devices. It can be used with any storage object that does B{direct pass-through} of SCSI commands without SCSI emulation. This assumes an underlying SCSI device that appears with lsscsi in /proc/scsi/scsi, such as a SAS hard drive, such as any SCSI device. The Linux kernel code for device SCSI drivers resides in linux/drivers/scsi. SCSI-3 and higher is supported with this subsystem, but only for control CDBs capable by the device firmware. B{fileio} --------- This I{backstore_plugin} provides I{SPC-4}, along with I{ALUA} and I{Persistent Reservations} emulation on top of Linux VFS devices: B{any file on a mounted filesystem}. It may be backed by a file or an underlying real block device. FILEIO is using struct file to serve block I/O with various methods (synchronous or asynchronous) and (buffered or direct). B{rd_mcp} -------- This I{backstore_plugin} uses a ramdisk with a separate mapping using memory copy. Typically used for bandwidth testing. EXAMPLE ======= B{create iblock} ---------------- Creates a new backstore, using the B{iblock} I{backstore_plugin}. ''' self.assert_root() self.shell.log.debug("%r" % [(backstore.plugin, backstore.index) for backstore in RTSRoot().backstores]) indexes = [ backstore.index for backstore in RTSRoot().backstores if backstore.plugin == backstore_plugin ] self.shell.log.debug("Existing %s backstore indexes: %r" % (backstore_plugin, indexes)) for index in range(1048576): if index not in indexes: backstore_index = index break if backstore_index is None: self.shell.log.error("Cannot find an available backstore index.") return else: self.shell.log.info("First available %s backstore index is %d." % (backstore_plugin, backstore_index)) if backstore_plugin == 'pscsi': backstore = PSCSIBackstore(backstore_index, mode='create') return self.new_node(UIPSCSIBackstoreLegacy(backstore, self)) elif backstore_plugin == 'rd_mcp': backstore = RDMCPBackstore(backstore_index, mode='create') return self.new_node(UIRDMCPBackstoreLegacy(backstore, self)) elif backstore_plugin == 'fileio': backstore = FileIOBackstore(backstore_index, mode='create') return self.new_node(UIFileIOBackstoreLegacy(backstore, self)) elif backstore_plugin == 'iblock': backstore = IBlockBackstore(backstore_index, mode='create') return self.new_node(UIIBlockBackstoreLegacy(backstore, self)) else: self.shell.log.error("Invalid backstore plugin %s" % backstore_plugin) return self.shell.log.info("Created new backstore %s" % backstore.name)
def apply_create_obj(obj): ''' Creates an object on the live system. ''' # TODO Factorize this when stable, merging it with update and delete, # leveraging rtslib 'any' mode (create if not exist) # TODO storage root = get_root() log.debug("apply_create(%s)" % obj.data) if obj.key[0] == 'mapped_lun': acl = obj.parent if acl.parent.key[0] == 'tpgt': tpg = acl.parent target = tpg.parent else: tpg = None target = acl.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') node_wwn = acl.key[1] lio_acl = NodeACL(lio_tpg, node_wwn, mode='lookup') mlun = int(obj.key[1]) write_protect = obj_attr(obj, "write_protect") tpg_lun = int(obj_attr(obj, "target_lun").rpartition(' ')[2]) lio_mlun = MappedLUN(lio_acl, mlun, tpg_lun, write_protect) apply_group_attrs(obj, lio_mlun) elif obj.key[0] == 'acl': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') node_wwn = obj.key[1] lio_acl = NodeACL(lio_tpg, node_wwn) apply_group_attrs(obj, lio_acl) elif obj.key[0] == 'portal': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') (address, _, port) = obj.key[1].partition(':') port = int(port) lio_portal = NetworkPortal(lio_tpg, address, port) apply_group_attrs(obj, lio_portal) elif obj.key[0] == 'lun': if obj.parent.key[0] == 'tpgt': tpg = obj.parent target = tpg.parent else: tpg = None target = obj.parent fabric = target.parent lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') if tpg is None: tpgt = 1 else: tpgt = int(tpg.key[1]) lio_tpg = TPG(lio_target, tpgt, mode='lookup') lun = int(obj.key[1]) (plugin, name) = obj_attr(obj, "backend") # TODO move that to a separate function, use for disk too matching_lio_so = [so for so in root.storage_objects if so.backstore.plugin == plugin and so.name == name] if len(matching_lio_so) > 1: raise ConfigError("Detected unsupported configfs storage objects " "allocation schema for storage object '%s'" % obj.path_str) elif len(matching_lio_so) == 0: raise ConfigError("Could not find storage object '%s %s' for '%s'" % (plugin, name, obj.path_str)) else: lio_so = matching_lio_so[0] lio_lun = LUN(lio_tpg, lun, lio_so) apply_group_attrs(obj, lio_lun) elif obj.key[0] == 'tpgt': target = obj.parent fabric = target.parent has_enable = len(obj.search([("enable", ".*")])) != 0 if has_enable: enable = obj_attr(obj, "enable") lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=target.key[1], mode='lookup') tpgt = int(obj.key[1]) lio_tpg = TPG(lio_target, tpgt) if has_enable: lio_tpg.enable = enable apply_group_attrs(obj, lio_tpg) elif obj.key[0] == 'target': fabric = obj.parent wwn = obj.key[1] lio_fabric = FabricModule(fabric.key[1]) lio_target = Target(lio_fabric, wwn=wwn) apply_group_attrs(obj, lio_target) if not lio_target.has_feature("tpgts"): lio_tpg = TPG(lio_target, 1) if len(obj.search([("enable", ".*")])) != 0: lio_tpg.enable = True elif obj.key[0] == 'fabric': lio_fabric = FabricModule(obj.key[1]) apply_group_attrs(obj, lio_fabric) elif obj.key[0] == 'disk': plugin = obj.parent.key[1] name = obj.key[1] idx = max([0] + [b.index for b in root.backstores if b.plugin == plugin]) + 1 if plugin == 'fileio': dev = obj_attr(obj, "path") size = obj_attr(obj, "size") buffered = obj_attr(obj, "buffered") lio_bs = FileIOBackstore(idx) lio_so = lio_bs.storage_object(name, dev, size, True, buffered) apply_group_attrs(obj, lio_so) elif plugin == 'iblock': # TODO Add policy for iblock lio_bs = IBlockBackstore(idx) dev = obj_attr(obj, "path") lio_so = lio_bs.storage_object(name, dev, True) apply_group_attrs(obj, lio_so) elif plugin == 'pscsi': # TODO Add policy for pscsi lio_bs = PSCSIBackstore(idx) dev = obj_attr(obj, "path") lio_so = lio_bs.storage_object(name, dev) apply_group_attrs(obj, lio_so) elif plugin == 'rd_mcp': # TODO Add policy for rd_mcp lio_bs = RDMCPBackstore(idx) size = obj_attr(obj, "size") nullio = obj_attr(obj, "nullio") lio_so = lio_bs.storage_object(name, size, True, nullio) apply_group_attrs(obj, lio_so) else: raise ConfigError("Unknown backend '%s' for backstore '%s'" % (plugin, obj)) matching_lio_so = [so for so in root.storage_objects if so.backstore.plugin == plugin and so.name == name] if len(matching_lio_so) > 1: raise ConfigError("Detected unsupported configfs storage objects " "allocation schema for '%s'" % obj.path_str) elif len(matching_lio_so) == 0: raise ConfigError("Could not find backstore '%s'" % obj.path_str) else: lio_so = matching_lio_so[0]
def ui_command_create(self, name, file_or_dev, size=None, buffered=None, sparse=None): ''' Creates a FileIO storage object. If I{file_or_dev} is a path to a regular file to be used as backend, then the I{size} parameter is mandatory. Else, if I{file_or_dev} is a path to a block device, the size parameter B{must} be ommited. If present, I{size} is the size of the file to be used, I{file} the path to the file or I{dev} the path to a block device. The I{buffered} parameter is a boolean stating whether or not to enable buffered mode. It is enabled by default (asynchronous mode). The I{sparse} parameter is only applicable when creating a new backing file. It is a boolean stating if the created file should be created as a sparse file (the default), or fully initialized. SIZE SYNTAX =========== - If size is an int, it represents a number of bytes. - If size is a string, the following units can be used: - B{B} or no unit present for bytes - B{k}, B{K}, B{kB}, B{KB} for kB (kilobytes) - B{m}, B{M}, B{mB}, B{MB} for MB (megabytes) - B{g}, B{G}, B{gB}, B{GB} for GB (gigabytes) - B{t}, B{T}, B{tB}, B{TB} for TB (terabytes) ''' self.assert_root() self.assert_available_so_name(name) self.shell.log.debug("Using params size=%s buffered=%s sparse=%s" % (size, buffered, sparse)) sparse = self.ui_eval_param(sparse, 'bool', True) backstore = FileIOBackstore(self.next_hba_index(), mode='create') is_dev = get_block_type(file_or_dev) is not None \ or is_disk_partition(file_or_dev) if is_dev: if size: self.shell.log.info("Block device, size parameter ignored") size = None self.shell.log.info("Note: block backstore preferred for best results") else: # use given file size only if backing file does not exist if os.path.isfile(file_or_dev): new_size = str(os.path.getsize(file_or_dev)) if size: self.shell.log.info("%s exists, using its size (%s bytes) instead" % (file_or_dev, new_size)) size = new_size else: # create file and extend to given file size if not size: raise ExecutionError("Attempting to create file for new" + " fileio backstore, need a size") self._create_file(file_or_dev, human_to_bytes(size), sparse) try: so = FileIOStorageObject( backstore, name, file_or_dev, size, buffered_mode=self.prm_buffered(buffered)) except Exception, exception: backstore.delete() raise exception