def run(self, node): # here you can relocate your VM in rocks DB # node is of type rocks.db.mappings.base.Node if not node.vm_defs.physNode or len(node.vm_defs.disks) <= 0: raise rocks.util.CommandError("Unable to allocate " + \ "storage for " + node.name) disk = node.vm_defs.disks[0] phys = node.vm_defs.physNode.name sync = self.owner.str2bool(self.db.getHostAttr(phys, 'img_sync')) sync_virt = self.db.getHostAttr(node.name, 'img_sync') if (sync_virt is not None): sync = sync and self.owner.str2bool(sync_virt) remotepool = self.db.getHostAttr(phys, 'vm_container_zpool') size = str(disk.size) volume = node.name + '-vol' if not (disk.img_nas_server and disk.img_nas_server.server_name): # the node does not use img-storage system return nas_name = disk.img_nas_server.server_name zpool_name = disk.img_nas_server.zpool_name # nas, zpool, volume, remotehost, remotepool, size, sync, launcher = CommandLauncher() initiator = launcher.callListInitiator(phys) device = launcher.callAddHostStoragemap(nas_name, zpool_name, volume, phys, remotepool, size, sync, initiator) disk.vbd_type = "phy" disk.prefix = os.path.dirname(device) disk.name = os.path.basename(device) print nas_name + ":" + volume + " mapped to " + phys + ":" + device return
def run(self, params, args): (args, nas, zpool, volume, remotehost, size) = self.fillPositionalArgs( ('nas', 'zpool', 'volume', 'remotehost', 'size')) sync,remotepool, = self.fillParams( [('img_sync', None),('remotepool',None)] ) if not nas or not zpool or not volume or not remotehost or not size: self.abort("you must pass 5 arguments nas_name zpool volume remotehost size") if sync is None: sync = self.db.getHostAttr(remotehost,'img_sync') sync = False if sync is None else self.str2bool(sync) if sync and remotepool is None: self.abort("img_sync was true but not remotepool was specified") # debugging output print "mapping ", nas, ":", zpool, "/", volume, " on ", remotehost launcher = CommandLauncher() initiator = launcher.callListInitiator(remotehost) device = launcher.callAddHostStoragemap(nas, zpool, volume, remotehost, remotepool, size, sync,initiator) self.beginOutput() self.addOutput(nas, device) self.endOutput(padChar='')
def run(self, params, args): if len(args) == 0: self.abort('Must supply at least one host name') self.beginOutput() for host in self.newdb.getNodesfromNames(args, preload=['membership']): attrs = CommandLauncher().callListAttrs(host.name) for k in attrs.keys(): self.addOutput(host.name, (k,attrs[k])) headers=['nas','attr','value'] self.endOutput(headers)
def run(self, params, args): (args, nas) = self.fillPositionalArgs(('nas')) if not nas: self.abort("you must enter the nas name") # debugging output list = CommandLauncher().callListHostStoragemap(nas) self.beginOutput() for d in list: state = 'mapped' if (d['remotehost'] == None): state = 'unmapped' elif (d['is_sending'] == 1): state = 'NAS->VM' elif (d['is_sending'] == 0): state = 'NAS<-VM' if (d['is_delete_remote'] == 0): state += ' sched' self.addOutput( nas, (d['zvol'], d['zpool'], d['remotehost'], d['remotepool'], d['iscsi_target'], state, str( datetime.timedelta(seconds=(int(time.time() - d.get('time'))))) if d.get('time') else None, time.strftime("%a %H:%M.%S", time.localtime(int(d.get('nextsync')))) if d.get('nextsync') and state == "mapped" else None, True if d['locked'] else None)) headers = [ 'nas', 'zvol', 'zpool', 'remotehost', 'remotepool', 'target', 'state', 'time', 'nextsync', 'locked' ] self.endOutput(headers)
def run(self, params, args): (args, host) = self.fillPositionalArgs(('host')) if not host: self.abort("you must enter the host name") response = CommandLauncher().callListHostStoragedev(host) map = response['body'] self.beginOutput() for volume in map.keys(): self.addOutput( host, (volume, map[volume].get('sync'), map[volume].get('target'), map[volume].get('device'), map[volume].get('status'), map[volume].get('size'), map[volume].get('bdev'), map[volume].get('started'), map[volume].get('synced'), str( datetime.timedelta( seconds=(int(time.time() - map[volume].get('time'))))) if map[volume].get('time') else None)) headers = [ 'host', 'volume', 'sync', 'target', 'device', 'status', 'size (GB)', 'block dev', 'is started', 'synced', 'time' ] self.endOutput(headers)
def run(self, params, args): if len(args) != 3: self.abort('Must supply at (nas,attr,value) tuple') (args,nas,attr,value) = self.fillPositionalArgs(('nas','attr','value')) if value.lower() == "none": value = None setDict = {attr:value} CommandLauncher().callSetAttrs(nas,setDict)
def run(self, params, args): (args, nas, volume) = self.fillPositionalArgs(('nas', 'volume')) # debugging output if not (nas and volume): self.abort("2 arguments are required for this command nas volume") # debugging output print "unmapping ", nas, ":", volume CommandLauncher().callDelHostStoragemap(nas, volume) self.beginOutput() self.addOutput(nas, "Success") self.endOutput(padChar='')
def run(self, params, args): (args, nas, zpool, volume) = self.fillPositionalArgs( ('nas', 'zpool', 'volume')) # debugging output if not (nas and zpool and volume): self.abort("3 arguments are required for this command nas zpool volume") print "removing ", nas, ":", zpool, "/", volume CommandLauncher().callDelHostStorageimg(nas, zpool, volume) self.beginOutput() self.addOutput(nas, "Success") self.endOutput(padChar='')
def run(self, node): # here you can disallocate the resource used by your VM # in rocks DB # node is of type rocks.db.mappings.base.Node if not node.vm_defs.physNode or len(node.vm_defs.disks) <= 0: raise rocks.util.CommandError("Unable to release " + \ "storage for " + node.name) disk = node.vm_defs.disks[0] volume = node.name + '-vol' if not (disk.img_nas_server and disk.img_nas_server.server_name): # the node does not use img-storage system return nas_name = disk.img_nas_server.server_name CommandLauncher().callDelHostStoragemap(nas_name, volume) return
def run(self, params, args): (args, nas, zvol) = self.fillPositionalArgs(('nas', 'zvol')) if not zvol: self.abort("you must enter the zvol name") self.beginOutput() attrs = CommandLauncher().callListZvolAttrs(nas, zvol) fields = [ 'zvol', 'frequency', 'nextsync', 'uploadspeed', 'downloadspeed' ] line = [] for f in fields: line.extend([attrs[f]]) self.addOutput(nas, line) headers = ['nas'] headers.extend(fields) self.endOutput(headers)
def run(self, params, args): (args, nas, volume) = self.fillPositionalArgs(('nas', 'volume')) (nodetype, ) = self.fillParams([('nodetype', '')]) if not (nas and volume): self.abort("2 arguments are required for this " + \ "command nas and volume") if nodetype: # ok we have to clear either a vmc or a nas if nodetype == 'vmc' and \ os.path.exists('/etc/init.d/img-storage-vm'): print " -- clearing VM Container --" self.clean_vm(nas, volume) elif nodetype == 'vmc': self.abort("/etc/init.d/img-storage-vm is missing, " "are you sure this is a virtual machine " "container") elif nodetype == 'nas' and \ os.path.exists('/etc/init.d/img-storage-nas'): print " -- clearing NAS --" self.clean_nas(nas, volume) elif nodetype == 'nas': self.abort("/etc/init.d/img-storage-nas is missing, " "are you sure this is a nas enabled for " "serving VM disk images?") else: self.abort("nodetype can be only nas or vmc (%s)" % nodetype) else: #no nodetype specified so we need to query the nas list = CommandLauncher().callListHostStoragemap(nas) entry = [d for d in list if d['zvol'] == volume] if len(entry) == 0: self.abort('Unable to find volume %s on nas %s' % (volume, nas)) elif len(entry) > 1: self.abort('Major failure: found %d volumes with ' 'the same %s name' % (len(entry), volume)) entry = entry[0] if entry['remotehost'] == None: self.abort('Volume %s is unmapped' % volume) if entry['is_sending'] == 1 or (entry['is_sending'] == 0 and d['is_delete_remote'] != 0): self.abort( 'Volume %s is currently getting transfered, please wait' % volume) # ok we are good to go, we can destroy the mapping # all possible error situation have been cleared # clear the remote host first cmdline = 'rocks clean host storagemap %s %s nodetype=' % (nas, volume) print "cleaning ", d['remotehost'] self.command('run.host', [str(d['remotehost']), cmdline + 'vmc']) # then clean the nas print "cleaning ", nas self.command('run.host', [nas, cmdline + 'nas'])