def create_tpg(self, target, tag): tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) if tpg_list_tag is None: Log.info('creating tpg (%s, %s)' % (target, tag)) # Create and configure the target portal group tpg = TPG(target, tag) tpg.set_attribute("authentication", 0) tpg.enable = 1 # Set up the list of TPGs for this target tpg_list[tag] = { 'tpg': tpg, 'acl': {'mapped_lun': {}}, 'lun': {}, 'portal': {} } else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) tpg = tpg_list[tag]['tpg'] return tpg
def clone(self, params): if params is None: raise ValueError('zfs clone function received no parameters') command = ['clone'] snapshot = params.get('src', None) properties = params.get('properties', None) dest = params.get('dst', None) createParent = params.get('createParent', None) if not snapshot: raise ValueError('zfs clone function needs a snapshot parameter') if not dest: raise ValueError('zfs clone function needs a dest parameter') if createParent: command.append('-p') if not properties is None: for property, value in properties.iteritems(): command.append('-o') command.append('%s=%s' % (property, value)) command.append(snapshot) command.append(dest) if self.all.has_key(snapshot): if self.all.has_key(dest): Log.info('%s already exists. Not creating clone' % dest) else: self.run(command) else: raise ValueError('Snapshot %s does not exist, clone %s cannot be created' % (snapshot, dest))
def process(self): try: errors = [] for task in LUNCloneJob.pending(): try: Log.info('%s job %s started' % (task.job_type.code, task.id)) task.set_status_start_job() if task.is_create(): self.create_clones(task) else: self.delete_clones(task) task.set_status_complete() Log.info('%s job %s completed' % (task.job_type.code, task.id)) except (Exception), e: error = u'%s:\n%s' % (str(e), traceback.format_exc()) Log.error('%s job %s failed' % (task.job_type.code, task.id)) Log.error(error) task.set_status_failed(error) errors.append(task.error) if len(errors) > 0: message = {'message': errors} return {'status': 'error', 'errors': errors}
def create_tpg(self, target, tag): tpg_list = self.target[target.wwn]['tpg'] tpg_list_tag = tpg_list.get(tag, None) if tpg_list_tag is None: Log.info('creating tpg (%s, %s)' % (target, tag)) # Create and configure the target portal group tpg = TPG(target, tag) tpg.set_attribute("authentication", 0) tpg.enable = 1 # Set up the list of TPGs for this target tpg_list[tag] = { 'tpg': tpg, 'acl': { 'mapped_lun': {} }, 'lun': {}, 'portal': {} } else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) tpg = tpg_list[tag]['tpg'] return tpg
def __init__(self, params): Log.info('params: %s' % params) required = ['target_ip', 'target_wwn', 'target_lun', 'target_part', 'console_port', 'console_speed', 'console_params', 'ctrl_iface', 'ctrl_mac', 'initiator_fqdn', 'initiator_wwn'] missing = [] for arg in required: if params.get(arg, None) is None: missing.append(arg) Log.info(params) if len(missing) == 1: raise ValueError('Missing parameter: %s' % missing[0]) elif len(missing) > 1: raise ValueError('Missing parameters: %s' % ', '.join(missing)) params['device'] = self.iscsi_device(params) params['chroot'] = self.chroot_dir(params) # Something outside this library lowercase the wwn, so # we lowercase the input to stay consistent params['target_wwn'] = params['target_wwn'].lower() self.prep(params)
def process(self): try: errors = [] for task in LUNCloneJob.pending(): try: Log.info('%s job %s started' % (task.job_type.code, task.id)) task.set_status_start_job() if task.is_create(): self.create_clones(task) else: self.delete_clones(task) task.set_status_complete() Log.info('%s job %s completed' % (task.job_type.code, task.id)) except (Exception), e: error = u'%s:\n%s' % (str(e), traceback.format_exc()) Log.error('%s job %s failed' % (task.job_type.code, task.id)) Log.error(error) task.set_status_failed(error) errors.append(task.error) if len(errors) > 0: message = {'message': errors} return {'status': 'error', 'errors': errors}
def powerCycle(self): """ Power cycle all slots """ if self.ping(): for slot in range(1, 15): self.powerCycleSlot(slot) else: Log.info('%s not pingable, cannot power cycle all blades' % self.host)
def create(self, params): '''zfs create params -- { name: name, properties: {property: value, p2: v2, ...}, volume: {size: size, sparse: true|false} createParent: true or false} } ''' # If no arguments provided, return immediately if params is None: raise ValueError('zfs create function received no parameters') command = ['create'] name = params.get('name', None) properties = params.get('properties', None) volume = params.get('volume', None) createParent = params.get('createParent', None) if not name: raise ValueError('zfs create function needs a name parameter') if createParent: command.append('-p') if not properties is None: for property, value in properties.iteritems(): command.append('-o') command.append('%s=%s' % (property, value)) if not volume is None: size = volume.get('size', None) sparse = volume.get('sparse', None) if not size: raise ValueError('Volumes must have a size attribute') command.append('-V') command.append(size) if sparse and sparse != False: command.append('-s') command.append(name) # Flag is set if the dataset is created created = None if self.all.has_key(name): if volume: Log.info('%s already exists. Not creating volume' % name) else: Log.info('%s already exists. Not creating filesystem' % name) else: self.run(command) created = True return created
def create_mapped_lun(self, acl, num, lun): mapped_lun = None if not list(acl.mapped_luns): Log.info('creating mapped lun (%s, %s, %s)' % (acl, num, lun)) mapped_lun = MappedLUN(acl, num, lun) else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return mapped_lun
def run(self, args): '''Run a zfs command''' cmd = args cmd.insert(0, self.zfs) output = '' try: Log.info(' '.join(cmd)) output = subprocess.check_output(cmd, stderr=subprocess.STDOUT).split('\n') except subprocess.CalledProcessError, e: raise ValueError(str(e.output))
def powerCycle(self): """ Power cycle all slots """ if self.ping(): for slot in range(1, 15): self.powerCycleSlot(slot) else: Log.info('%s not pingable, cannot power cycle all blades' % self.host)
def create_task(self, job_type_name): try: args = self.array_parser.parse_args() details = LUNCloneJob.with_args(args, job_type_name) Log.info('%s job %s added to queue' % (job_type_name, details.id)) return {'status': 'ok', 'job_id': details.id}, 201 except (Exception), e: Log.error('%s:\n%s' % (str(e), traceback.format_exc())) return {'status': str(e), 'stacktrace': traceback.format_exc()}, 501
def create_mapped_lun(self, acl, num, lun): mapped_lun = None if not list(acl.mapped_luns): Log.info('creating mapped lun (%s, %s, %s)' % (acl, num, lun)) mapped_lun = MappedLUN(acl, num, lun) else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return mapped_lun
def collectAndClearEventLog(self, couch): log = {} # OIDs to retrieve oids = ['BLADE-MIB::readEnhancedEventLogAttribute', 'BLADE-MIB::readEnhancedEventLogMessage'] # Get the mapping of blade serial numbers to blade document ids serialNumberToDocId = Blade.serialNumberToDocId(couch) for oid in oids: # Start async snmpwalk. This call returns immediately # and spawns a background thread to perform the SNMPWalk snmpWalk = SNMPWalk.withConfigFile(self.host, oid) # While the snmpwalk is running while not snmpWalk.eof: # Get snmp oid/value pairs off the queue while not snmpWalk.queue.empty(): (oid, value) = snmpWalk.queue.get() (mibName, oidBase, lastOctet) = snmpWalk.extractOidParts(oid) if oidBase != 'readEnhancedEventLogNumber': # Start with an empty dictionary dict = {} # Get the existing log entry, if it exists if log.has_key(lastOctet): dict = log[lastOctet] # Update the dictionary with this line from the snmpwalk if oidBase == 'readEnhancedEventLogAttribute': dict.update(self.parseEventLogAttribute(value, serialNumberToDocId)) else: match = re.search('^Text:(.*)$', value) if match: value = match.group(1) dict.update({'message': value}) # Update the log entry list log[lastOctet] = dict # On the final snmp walk command, create CouchDB objects if dict and oidBase == 'readEnhancedEventLogMessage': logEntry = LogEntry(dict) logEntry.persist(couch) # Join snmpwalk background thread snmpWalk.join() snmp = SNMP.withConfigFile(self.host) snmp.set('BLADE-MIB::clearEventLog.0', 'i', '1') Log.info('%s system log entries collected from %s' % (len(log), self.name))
def run(self, args): '''Run a command, capture stderr and report the exception, if an exception happens''' Log.info('%s' % ' '.join(args)) pipes = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout, stderr = pipes.communicate() if pipes.returncode != 0: # an error happened! err_msg = "%s. Code: %s" % (stderr.strip(), pipes.returncode) raise Exception(err_msg)
def powerOnOff(self, func): """ Power on / off all slots in the chassis func: int 0 = off, 1 = on, 2 = soft off """ if self.ping(): for slot in range(1, 15): self.powerOnOffSlot(slot, func) else: Log.info('%s is not pingable, cannot apply power func %d all slots' % (self.host, func))
def delete_block_store(self, name): store = self.block_store.get(name) # If blockstore doesn't exist, do not proceed if store is None: Log.info('No block store %s. Not deleting' % name) return Log.info('deleting block store %s' % (name)) # Delete the block store. The backing device, file, etc, still exists store.delete() del self.block_store[name]
def delete_block_store(self, name): store = self.block_store.get(name) # If blockstore doesn't exist, do not proceed if store is None: Log.info('No block store %s. Not deleting' % name) return Log.info('deleting block store %s' % (name)) # Delete the block store. The backing device, file, etc, still exists store.delete() del self.block_store[name]
def remove_clone(self, task, zfs, args): # If deleteClones = true, then the dst parameter is also required. deleteClones = args.get('deleteClones', None) if deleteClones: if args.get('dst', None) is None: message = {'message': 'deleteClones parameter requires dst ' 'parameter'} return message zfs.destroy(args) else: Log.info('Delete task %s did not specify deleteClones param for %s' % (task.id, args[u'dst']))
def snapshot(self, params): if params is None: raise ValueError('zfs snapshot function received no parameters') name = params.get('name', None) if name is None: raise ValueError('zfs snapshot function needs a name parameter') if not self.all.has_key(name): command = ['snapshot', name] self.run(command) else: Log.info('%s already exists. Not creating snapshot' % name)
def powerOnOff(self, func): """ Power on / off all slots in the chassis func: int 0 = off, 1 = on, 2 = soft off """ if self.ping(): for slot in range(1, 15): self.powerOnOffSlot(slot, func) else: Log.info( '%s is not pingable, cannot apply power func %d all slots' % (self.host, func))
def create_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] if initiator_name in acl_list: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator_name)) acl = acl_list[initiator_name] else: Log.info('creating acl (%s, %s)' % (tpg, initiator_name)) acl = NodeACL(tpg, initiator_name) acl_list[initiator_name] = acl return acl
def create_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] if portal_id in portal_list: Log.info('portal %s already exists, not creating' % (portal_id)) portal = portal_list[portal_id] else: Log.info('creating portal (%s, %s, %s)' % (tpg, ip, port)) portal = NetworkPortal(tpg, ip, port) portal_list[portal_id] = portal return portal
def create_portal(self, tpg, ip, port): portal = None portal_id = self.get_portal_id(ip, port) wwn = tpg.parent_target.wwn portal_list = self.target[wwn]['tpg'][tpg.tag]['portal'] if portal_id in portal_list: Log.info('portal %s already exists, not creating' % (portal_id)) portal = portal_list[portal_id] else: Log.info('creating portal (%s, %s, %s)' % (tpg, ip, port)) portal = NetworkPortal(tpg, ip, port) portal_list[portal_id] = portal return portal
def create_acl(self, tpg, initiator_name): acl = None wwn = tpg.parent_target.wwn acl_list = self.target[wwn]['tpg'][tpg.tag]['acl'] if initiator_name in acl_list: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator_name)) acl = acl_list[initiator_name] else: Log.info('creating acl (%s, %s)' % (tpg, initiator_name)) acl = NodeACL(tpg, initiator_name) acl_list[initiator_name] = acl return acl
def create_lun(self, tpg, blockstore): wwn = tpg.parent_target.wwn lun_list = self.target[wwn]['tpg'][tpg.tag]['lun'] lun = lun_list.get(blockstore.name, None) if lun is None: Log.info('creating lun %s, blockstore %s' % (tpg, blockstore)) # Create the LUN lun = LUN(tpg, 0, blockstore) # Add it to the local data structure for tracking LUNs lun_list[blockstore.name] = lun else: # LUN already exists Log.info('lun %s already exists, not creating' % (blockstore.name)) return lun
def destroy(self, params): # Verify that parameters were received if params is None: raise ValueError('zfs destroy function received no parameters') # Get the name name = params.get('dst', None) if not name: raise ValueError('zfs destroy function needs a name parameter') if self.all.has_key(name): command = ['destroy', name] self.run(command) else: Log.info('%s does not exist. Cannot destroy' % name)
def create_lun(self, tpg, blockstore): wwn = tpg.parent_target.wwn lun_list = self.target[wwn]['tpg'][tpg.tag]['lun'] lun = lun_list.get(blockstore.name, None) if lun is None: Log.info('creating lun %s, blockstore %s' % (tpg, blockstore)) # Create the LUN lun = LUN(tpg, 0, blockstore) # Add it to the local data structure for tracking LUNs lun_list[blockstore.name] = lun else: # LUN already exists Log.info('lun %s already exists, not creating' % (blockstore.name)) return lun
def remove_clone(self, task, zfs, args): # If deleteClones = true, then the dst parameter is also required. deleteClones = args.get('deleteClones', None) if deleteClones: if args.get('dst', None) is None: message = { 'message': 'deleteClones parameter requires dst ' 'parameter' } return message zfs.destroy(args) else: Log.info( 'Delete task %s did not specify deleteClones param for %s' % (task.id, args[u'dst']))
def create_target(self, wwn): target_dict = self.target.get(wwn, None) target = None if target_dict is None: Log.info('creating target with wwn %s' % (wwn)) # The wwn will be lowercased automatically by something # outside this library. I'm not sure if its RTSLib or # the underlying Linux target system target = Target(self.iscsi, wwn) # Add target to data structure, initialize empty child nodes self.target[wwn] = {'target': target, 'tpg': {}} else: Log.info('target %s already exists, not creating' % (wwn)) target = target_dict['target'] return target
def create_target(self, wwn): target_dict = self.target.get(wwn, None) target = None if target_dict is None: Log.info('creating target with wwn %s' % (wwn)) # The wwn will be lowercased automatically by something # outside this library. I'm not sure if its RTSLib or # the underlying Linux target system target = Target(self.iscsi, wwn) # Add target to data structure, initialize empty child nodes self.target[wwn] = {'target': target, 'tpg': {}} else: Log.info('target %s already exists, not creating' % (wwn)) target = target_dict['target'] return target
def mount(self, params): '''Mount an image Parameters ---------- params : string Image parameter dictionary ''' chroot = params['chroot'] device = params['device'] Log.info('device %s' % device) # Sanity check, destination shouldn't be a file if os.path.isfile(chroot): log_error_node(params, '%s is a file, can not mount %s at this location' % (chroot, device, params['vnode_name'])) return # Make sure the destination exists if not os.path.exists(chroot): log_info_node(params, 'mkdir %s' % chroot) os.makedirs(chroot) # If the destination is already mounted, then # no need to mount again. It's likely the correctly mounted # device from a previous prep run that died if not os.path.ismount(chroot): log_info_node(params, 'mount %s %s' % (device, chroot)) self.run([mount_cmd, device, chroot]) else: log_info_node(params, '%s already mounted, hoping for the best' % chroot) # Mount /sys, /proc and /dev in image chroot bind_mounts = ['/sys', '/proc', '/dev'] if not os.path.ismount(chroot): log_error_node(params, 'mount of %s failed, cannot mount %s in chroot' % (chroot, bind_mounts)) else: for bind_mount in bind_mounts: dest = chroot + bind_mount log_info_node(params, 'mount -o bind %s %s' % (bind_mount, dest)) self.run([mount_cmd, '-o', 'bind', bind_mount, dest])
def create_block_store(self, wwn, device): """Create a blockstore with the given wwn. It is assumed that the blockstore does not already exists. Calling this method when the storage already exists can potentially result in an exception being thrown. Call get_block_store first to check for existence. Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- blockstore object, if it was successfully created """ Log.info('creating block backstore %s for device %s' % (wwn, device)) storage = BlockStorageObject(wwn, device, wwn) self.block_store[wwn] = storage return storage
def delete_target(self, wwn): # See if the target exists target_dict = self.target.get(wwn, None) # Doesn't exist, don't proceed if target_dict is None: Log.info('No target %s. Not deleting' % wwn) return target = target_dict.get('target', None) # Surprising, but possible, because processes can die # and the state can strange if target is None: return Log.info('deleting target %s' % (wwn)) # Delete the target target.delete() del self.target[wwn]
def delete_target(self, wwn): # See if the target exists target_dict = self.target.get(wwn, None) # Doesn't exist, don't proceed if target_dict is None: Log.info('No target %s. Not deleting' % wwn) return target = target_dict.get('target', None) # Surprising, but possible, because processes can die # and the state can strange if target is None: return Log.info('deleting target %s' % (wwn)) # Delete the target target.delete() del self.target[wwn]
def create_block_store(self, wwn, device): """Create a blockstore with the given wwn. It is assumed that the blockstore does not already exists. Calling this method when the storage already exists can potentially result in an exception being thrown. Call get_block_store first to check for existence. Parameters ---------- wwn : string World Wide Name for the block store device : string Path to a block device Returns: -------- blockstore object, if it was successfully created """ Log.info('creating block backstore %s for device %s' % (wwn, device)) storage = BlockStorageObject(wwn, device, wwn) self.block_store[wwn] = storage return storage
def log_info_node(params, message): message = '%s: %s' % (params['initiator_fqdn'], message) Log.info(message)
def collectInfoAndPersist(self, couch): """ Collect blade info for this chassis via SNMP, and persist those blades and slots and this Chassis object to CouchDB when done """ if not self.ping(): Log.info('%s not pingable, not collecting SNMP info' % self.host) else: Log.debug(5, '%s is pingable' % self.host) self.isPingable = 1 snmp = SNMP.withConfigFile(self.host) self.collectChassisInfo(snmp) self.collectFanPackInfo() bladesCommunicating = bitarray(self.bladesCommunicating) bladesInstalled = bitarray(self.bladesInstalled) oids = { 'mac0': 'BLADE-MIB::bladeMACAddress1Vpd', 'mac1': 'BLADE-MIB::bladeMACAddress2Vpd', 'biosVersion': 'BLADE-MIB::bladeBiosVpdRevision', 'bmcVersion': 'BLADE-MIB::bladeSysMgmtProcVpdRevision', 'diagVersion': 'BLADE-MIB::bladeDiagsVpdRevision', 'serialNumber': 'BLADE-MIB::bladeBiosVpdName', 'healthState': 'BLADE-MIB::bladeHealthState', 'healthSummarySeverity': 'BLADE-MIB::bladeHealthSummarySeverity', 'healthSummaryDescription': 'BLADE-MIB::bladeHealthSummaryDescription', } blade = {} for (attr, oid) in oids.items(): values = self.bladeInfo(snmp, oid) if values: for (slot, value) in values: if not blade.has_key(slot): blade[slot] = {} blade[slot][attr] = value if len(blade) > 0: self.collectedSNMP = 1 for (slotNum, params) in blade.items(): Log.debug( 10, 'chassis %d slot %s blade params: %s' % (self.num, slotNum, params)) nodeNum = (int(slotNum) - 1) + self.firstNode nodeName = self.nodeNameFormat % nodeNum blade = Blade(params) slotInt = int(slotNum) slotParams = { 'num': slotInt, 'nodeName': nodeName, 'chassisDocId': self.docId, 'bladeDocId': blade.mac0, 'bladeInstalled': bladesInstalled[slotInt - 1], 'bladeCommunicating': bladesCommunicating[slotInt - 1], } slot = Slot(slotParams) # This appears to be pointless. We shouldn't save any information about blades that # are not available # if blade.mac0 == 'not available': # blade.mac0 = '%s-%s' % (blade.mac0, slot.docId) slot.persist(couch) if blade.validMac0: blade.persist(couch) self.persist(couch)
def collectAndClearEventLog(self, couch): log = {} # OIDs to retrieve oids = [ 'BLADE-MIB::readEnhancedEventLogAttribute', 'BLADE-MIB::readEnhancedEventLogMessage' ] # Get the mapping of blade serial numbers to blade document ids serialNumberToDocId = Blade.serialNumberToDocId(couch) for oid in oids: # Start async snmpwalk. This call returns immediately # and spawns a background thread to perform the SNMPWalk snmpWalk = SNMPWalk.withConfigFile(self.host, oid) # While the snmpwalk is running while not snmpWalk.eof: # Get snmp oid/value pairs off the queue while not snmpWalk.queue.empty(): (oid, value) = snmpWalk.queue.get() (mibName, oidBase, lastOctet) = snmpWalk.extractOidParts(oid) if oidBase != 'readEnhancedEventLogNumber': # Start with an empty dictionary dict = {} # Get the existing log entry, if it exists if log.has_key(lastOctet): dict = log[lastOctet] # Update the dictionary with this line from the snmpwalk if oidBase == 'readEnhancedEventLogAttribute': dict.update( self.parseEventLogAttribute( value, serialNumberToDocId)) else: match = re.search('^Text:(.*)$', value) if match: value = match.group(1) dict.update({'message': value}) # Update the log entry list log[lastOctet] = dict # On the final snmp walk command, create CouchDB objects if dict and oidBase == 'readEnhancedEventLogMessage': logEntry = LogEntry(dict) logEntry.persist(couch) # Join snmpwalk background thread snmpWalk.join() snmp = SNMP.withConfigFile(self.host) snmp.set('BLADE-MIB::clearEventLog.0', 'i', '1') Log.info('%s system log entries collected from %s' % (len(log), self.name))
def collectInfoAndPersist(self, couch): """ Collect blade info for this chassis via SNMP, and persist those blades and slots and this Chassis object to CouchDB when done """ if not self.ping(): Log.info('%s not pingable, not collecting SNMP info' % self.host) else: Log.debug(5, '%s is pingable' % self.host) self.isPingable = 1 snmp = SNMP.withConfigFile(self.host) self.collectChassisInfo(snmp) self.collectFanPackInfo() bladesCommunicating = bitarray(self.bladesCommunicating) bladesInstalled = bitarray(self.bladesInstalled) oids = {'mac0': 'BLADE-MIB::bladeMACAddress1Vpd', 'mac1': 'BLADE-MIB::bladeMACAddress2Vpd', 'biosVersion': 'BLADE-MIB::bladeBiosVpdRevision', 'bmcVersion': 'BLADE-MIB::bladeSysMgmtProcVpdRevision', 'diagVersion': 'BLADE-MIB::bladeDiagsVpdRevision', 'serialNumber': 'BLADE-MIB::bladeBiosVpdName', 'healthState': 'BLADE-MIB::bladeHealthState', 'healthSummarySeverity': 'BLADE-MIB::bladeHealthSummarySeverity', 'healthSummaryDescription': 'BLADE-MIB::bladeHealthSummaryDescription', } blade = {} for (attr, oid) in oids.items(): values = self.bladeInfo(snmp, oid) if values: for (slot, value) in values: if not blade.has_key(slot): blade[slot] = {} blade[slot][attr] = value if len(blade) > 0: self.collectedSNMP = 1 for (slotNum, params) in blade.items(): Log.debug(10, 'chassis %d slot %s blade params: %s' % (self.num, slotNum, params)) nodeNum = (int(slotNum) - 1) + self.firstNode nodeName = self.nodeNameFormat % nodeNum blade = Blade(params) slotInt = int(slotNum) slotParams = {'num': slotInt, 'nodeName': nodeName, 'chassisDocId': self.docId, 'bladeDocId': blade.mac0, 'bladeInstalled': bladesInstalled[slotInt - 1], 'bladeCommunicating': bladesCommunicating[slotInt - 1], } slot = Slot(slotParams) # This appears to be pointless. We shouldn't save any information about blades that # are not available # if blade.mac0 == 'not available': # blade.mac0 = '%s-%s' % (blade.mac0, slot.docId) slot.persist(couch) if blade.validMac0: blade.persist(couch) self.persist(couch)
def create_iscsi_target(self, params): """Create an iSCSI target Parameters ---------- params : dict Dictionary of parameters wwn: The World Wide Name of the share, eg, the IQN device: the backing device initiators: list of initiators """ wwn = params.get('wwn', None) device = params.get('device', None) initiators = params.get('initiators', None) ip = params.get('ip', '0.0.0.0') port = params.get('port', 3260) # Something outside this library lowercase the wwn, so # we lowercase the input to stay consistent if wwn is not None: wwn = wwn.lower() # If at any step, something needs to be created, # then true is returned to the caller to show that # this iscsi target needed to be created. # # It is possible to call this method for an existing # iscsi target, in which case this method does nothing # # By tracking this behavior, the caller can be informed # whether or not any action was taken created = None # Create blockstore, if needed blockstore = self.get_block_store(wwn) if blockstore is None: blockstore = self.create_block_store(wwn, device) created = True else: Log.info('block backstore %s already exists, not creating' % (wwn)) # Create target target = self.get_target(wwn) if target is None: target = self.create_target(wwn) created = True else: Log.info('target %s already exists, not creating' % (wwn)) # Create TPG tag = 1 tpg = self.get_tpg(target, tag) if tpg is None: tpg = self.create_tpg(target, tag) created = True else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) # Create LUN # First, check to see if there are any LUNs. More than one LUN is not # supported, so we just iterate over all (eg, the one) lun and set it. # If there's more than one LUN, then the last one will be the LUN that # is used, which may result in undefined behavior lun = None for lun in tpg.luns: pass if lun is None: lun = self.create_lun(tpg, blockstore) created = True else: Log.info('lun %s already exists, not creating' % (blockstore.name)) # Create portal portal = self.get_portal(tpg, ip, port) if portal is None: portal = self.create_portal(tpg, ip, port) created = True else: portal_id = self.get_portal_id(ip, port) Log.info('portal %s already exists, not creating' % (portal_id)) # Set up ACLs and mapped LUNs for initiator in initiators: # Create ACL acl = self.get_acl(tpg, initiator) if acl is None: acl = self.create_acl(tpg, initiator) created = True else: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator)) # Map LUN num = 0 # Like with LUNs, only one mapped lun is supported. Check for # a mapped lun by iterating over the entire set of mapped luns, # use the last one in the list, if any exist. # # If things are working properly, there should be only one mapped_lun = None for mapped_lun in acl.mapped_luns: pass if mapped_lun is None: mapped_lun = self.create_mapped_lun(acl, num, lun) created = True else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return created
def create_iscsi_target(self, params): """Create an iSCSI target Parameters ---------- params : dict Dictionary of parameters wwn: The World Wide Name of the share, eg, the IQN device: the backing device initiators: list of initiators """ wwn = params.get('wwn', None) device = params.get('device', None) initiators = params.get('initiators', None) ip = params.get('ip', '0.0.0.0') port = params.get('port', 3260) # Something outside this library lowercase the wwn, so # we lowercase the input to stay consistent if wwn is not None: wwn = wwn.lower() # If at any step, something needs to be created, # then true is returned to the caller to show that # this iscsi target needed to be created. # # It is possible to call this method for an existing # iscsi target, in which case this method does nothing # # By tracking this behavior, the caller can be informed # whether or not any action was taken created = None # Create blockstore, if needed blockstore = self.get_block_store(wwn) if blockstore is None: blockstore = self.create_block_store(wwn, device) created = True else: Log.info('block backstore %s already exists, not creating' % (wwn)) # Create target target = self.get_target(wwn) if target is None: target = self.create_target(wwn) created = True else: Log.info('target %s already exists, not creating' % (wwn)) # Create TPG tag = 1 tpg = self.get_tpg(target, tag) if tpg is None: tpg = self.create_tpg(target, tag) created = True else: Log.info('tpg (%s, %s) already exists, not creating' % (target, tag)) # Create LUN # First, check to see if there are any LUNs. More than one LUN is not # supported, so we just iterate over all (eg, the one) lun and set it. # If there's more than one LUN, then the last one will be the LUN that # is used, which may result in undefined behavior lun = None for lun in tpg.luns: pass if lun is None: lun = self.create_lun(tpg, blockstore) created = True else: Log.info('lun %s already exists, not creating' % (blockstore.name)) # Create portal portal = self.get_portal(tpg, ip, port) if portal is None: portal = self.create_portal(tpg, ip, port) created = True else: portal_id = self.get_portal_id(ip, port) Log.info('portal %s already exists, not creating' % (portal_id)) # Set up ACLs and mapped LUNs for initiator in initiators: # Create ACL acl = self.get_acl(tpg, initiator) if acl is None: acl = self.create_acl(tpg, initiator) created = True else: Log.info('acl (%s, %s) already exists, not creating' % (tpg, initiator)) # Map LUN num = 0 # Like with LUNs, only one mapped lun is supported. Check for # a mapped lun by iterating over the entire set of mapped luns, # use the last one in the list, if any exist. # # If things are working properly, there should be only one mapped_lun = None for mapped_lun in acl.mapped_luns: pass if mapped_lun is None: mapped_lun = self.create_mapped_lun(acl, num, lun) created = True else: Log.info('mapped lun (%s, %s, %s) already exists' % (acl, num, lun)) return created