def action(machineid, disks, iotune): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil import libvirt connection = LibvirtUtil() domain = connection.get_domain_obj(machineid) if domain is None: return domaindisks = list(connection.get_domain_disks(domain.XMLDesc())) flags = [] if domain.isPersistent(): flags.append("--config") if domain.state()[0] == libvirt.VIR_DOMAIN_RUNNING: flags.append("--live") for diskurl in disks: dev = connection.get_domain_disk(diskurl, domaindisks) if dev: cmd = ["virsh", "blkdeviotune", str(machineid), str(dev)] for key, value in iotune.items(): if value is not None: cmd.extend(["--%s" % key, str(value)]) cmd.extend(flags) j.system.process.execute(" ".join(cmd)) return True
def action(domainid, temppath, name, storageparameters): import ujson, time from JumpScale.lib.backuptools import object_store from JumpScale.lib.backuptools import backup from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil from CloudscalerLibcloud.utils.qcow2 import Qcow2 connection = LibvirtUtil() store = object_store.ObjectStore(storageparameters['storage_type']) bucketname = storageparameters['bucket'] mdbucketname = storageparameters['mdbucketname'] domain = connection.connection.lookupByUUIDString(domainid) files = connection._getDomainDiskFiles(domain) if storageparameters['storage_type'] == 'S3': store.conn.connect(storageparameters['aws_access_key'], storageparameters['aws_secret_key'], storageparameters['host'], is_secure=storageparameters['is_secure']) else: #rados has config on local cpu node store.conn.connect() backupmetadata = [] if not j.system.fs.exists(temppath): j.system.fs.createDir(temppath) for f in files: basefile = j.system.fs.getBaseName(f) tempfilepath = j.system.fs.joinPaths(temppath, basefile) q2 = Qcow2(f) q2.export(tempfilepath) metadata = backup.backup(store, bucketname, tempfilepath) j.system.fs.remove(tempfilepath) backupmetadata.append(metadata) backup.store_metadata(store, mdbucketname, name,backupmetadata) return {'files':backupmetadata, 'timestamp':time.time()}
def action(xml, machineid, ipcidr=None, vlan=None): import libvirt from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil from CloudscalerLibcloud.utils.network import Network, NetworkTool connection = LibvirtUtil() netinfo = [] if vlan: netinfo.append({'id': vlan, 'type': vlan}) try: with NetworkTool(netinfo, connection): domain = connection.get_domain_obj(machineid) if domain is None: return flags = 0 if domain.state()[0] in (libvirt.VIR_DOMAIN_RUNNING, libvirt.VIR_DOMAIN_PAUSED): flags |= libvirt.VIR_DOMAIN_DEVICE_MODIFY_LIVE if domain.isPersistent(): flags |= libvirt.VIR_DOMAIN_DEVICE_MODIFY_CONFIG if flags != 0: try: domain.attachDeviceFlags(xml, flags) except libvirt.libvirtError as e: if e.get_error_code( ) == libvirt.VIR_ERR_CONFIG_UNSUPPORTED: return False raise if ipcidr: network = Network(connection) network.protect_external(domain, ipcidr) return domain.XMLDesc() finally: connection.close()
def action(machineid): backuppath = '/mnt/cephfs' if not os.path.ismount(backuppath): raise RuntimeError("No device mounted on %s" % backuppath) from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.backup_machine_to_filesystem(machineid, backuppath)
def action(xml, domainid): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() domain = connection._get_domain(domainid) if domain: domain.undefineFlags(0) domain = connection.connection.defineXML(xml) return True
def __init__(self): self.phy_count = cpu_count() self.host_count = self.get_cpu_host_count() self.cpus = {i: CPU(i) for i in range(self.host_count, self.phy_count)} self.quarantine_prio = [ i for i in reversed(range(self.host_count, self.phy_count)) ] self.connection = LibvirtUtil() self.init_quarantine()
def action(machineid, force=False): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil from CloudscalerLibcloud.utils.network import Network connection = LibvirtUtil() network = Network(connection) domain = connection.get_domain_obj(machineid) if domain: network.cleanup_external(domain) return connection.shutdown(machineid, force)
def action(machineids, rate, burst): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() for machineid in machineids: domain = connection.get_domain(machineid) for nic in list(connection.get_domain_nics(domain['XMLDesc'])): if rate: j.system.qos.limitNic(nic, '%skb' % rate, '%skb' % burst) else: j.system.qos.removeLimit(nic)
def action(networkid): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() networks = connection.connection.listNetworks() from JumpScale.lib import ovsnetconfig vxnet = j.system.ovsnetconfig.ensureVXNet(networkid, 'vxbackend') bridgename = vxnet.bridge.name networkinformation = {'networkname': bridgename} if bridgename not in networks: #create the bridge if it does not exist connection.createNetwork(bridgename, bridgename) return networkinformation
def action(ovs_connection, size, diskguid, disk_info=None): import json # Creates a disk from a disk template # # ovs_connection: dict holding connection info for ovs restapi # eg: { ips: ['ip1', 'ip2', 'ip3'], client_id: 'dsfgfs', client_secret: 'sadfafsdf'} # size: size of the disk in GB # diskguid: guid of the disk that needs to be used to update # the volume. # returns true if disk was resized. False if need to restart machine in order to reflect the changes ovs = j.clients.openvstorage.get( ips=ovs_connection["ips"], credentials=(ovs_connection["client_id"], ovs_connection["client_secret"]), ) # Then resize to the correct size path = "/vdisks/{}/extend".format(diskguid) res = True new_size = size * 1024**3 params = dict(new_size=new_size) taskguid = ovs.post(path, data=json.dumps(params)) success, result = ovs.wait_for_task(taskguid) if not success: raise Exception("Could not update disk:\n{}".format(result)) if disk_info is not None and disk_info["machineRefId"] is not None: # leave import here as this is only relevant when execute on cpu node from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil import libvirt connection = LibvirtUtil() domain = connection.get_domain_obj(disk_info["machineRefId"]) if domain: domaindisks = list(connection.get_domain_disks(domain.XMLDesc())) dev = connection.get_domain_disk(disk_info["referenceId"], domaindisks) try: domain.blockResize(dev, new_size, libvirt.VIR_DOMAIN_BLOCK_RESIZE_BYTES) except: res = False return res
def action(networkid): import libvirt from CloudscalerLibcloud.utils.network import Network from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil libvirtutil = LibvirtUtil() network = Network(libvirtutil) bridges = [] con = libvirtutil.connection destination = '/var/lib/libvirt/images/routeros/{0:04x}/routeros.qcow2'.format( networkid) try: network_id_hex = '%04x' % int(networkid) name = 'routeros_%s' % network_id_hex try: domain = con.lookupByName(name) if domain: bridges = list(network.libvirtutil._get_domain_bridges(domain)) network.cleanup_gwmgmt(domain) network.cleanup_external(domain) domain.destroy() domain.undefine() j.system.fs.remove(destination) return True else: return True except libvirt.libvirtError: return False finally: network.libvirtutil.cleanupNetwork(networkid, bridges) con.close()
def action(machineid, vcpus): import libvirt from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() try: domain = connection.get_domain_obj(machineid) if domain is None: return try: domain.setVcpus(vcpus) except libvirt.libvirtError as e: if e.get_error_code() == 8: return False raise return True finally: connection.close()
def action(vlan): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() if vlan is None or vlan == 0: bridgename = 'public' else: bridgename = 'ext-%04x' % vlan nics = j.system.net.getNics() if bridgename not in nics: extbridge = 'ext-bridge' if extbridge not in nics: extbridge = 'backplane1' j.system.ovsnetconfig.newVlanBridge(bridgename, extbridge, vlan) if not connection.checkNetwork(bridgename): connection.createNetwork(bridgename, bridgename) return bridgename
def action(networkid, domainxml=None): from CloudscalerLibcloud.utils.network import Network from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil j.system.ovsnetconfig.cleanupIfUnused(networkid) if domainxml: libvirtutil = LibvirtUtil() network = Network(libvirtutil) network.cleanup_gwmgmt(domainxml) network.cleanup_external(domainxml) destination = '/var/lib/libvirt/images/routeros/{0:04x}/'.format(networkid) j.system.fs.removeDirTree(destination)
def action(name, networkId): networkname = 'space_%s' % networkId from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() networks = connection.connection.listNetworks() if networkname not in networks: #create the bridge if it does not exist from JumpScale.lib import ovsnetconfig vxnet = j.system.ovsnetconfig.ensureVXNet(networkId, 'vxbackend') bridgename = vxnet.bridge.name connection.createNetwork(networkname, bridgename) import libvirt con = libvirt.open() try: dom = con.lookupByName(name) state = dom.state()[0] if state != libvirt.VIR_DOMAIN_RUNNING: dom.create() finally: con.close()
def action(xml, machineid): import libvirt from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() try: domain = connection.connection.lookupByUUIDString(machineid) except: return None # machine not available anymore flags = 0 if domain.state()[0] in (libvirt.VIR_DOMAIN_RUNNING, libvirt.VIR_DOMAIN_PAUSED): flags |= libvirt.VIR_DOMAIN_DEVICE_MODIFY_LIVE if domain.isPersistent(): flags |= libvirt.VIR_DOMAIN_DEVICE_MODIFY_CONFIG if flags != 0: domain.detachDeviceFlags(xml, flags) return domain.XMLDesc()
def action(machineid): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.isCurrentStorageAction(machineid)
def action(machineid, backupname, location, emailaddress): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil from xml.etree import ElementTree import os import libvirt import time from email.mime.text import MIMEText import smtplib libvirtconn = libvirt.openReadOnly() pool = libvirtconn.storagePoolLookupByName("VMStor") poolvolumes = pool.listVolumes() connection = LibvirtUtil() root_disks = [] to_upload = [] domain = libvirtconn.lookupByUUIDString(machineid) disks = connection._get_domain_disk_file_names(domain) print 'ROOTDISKS %s' % str(disks) for disk in disks: name, ext = os.path.splitext(disk) if ext == '.qcow2': root_disks.append(disk) def get_dependency_disk(diskpath): if os.path.basename(diskpath) not in poolvolumes: return None volume = libvirtconn.storageVolLookupByPath(diskpath) if not volume: return None diskxml = ElementTree.fromstring(volume.XMLDesc(0)) xmlbacking = diskxml.find('backingStore/path') if xmlbacking.text: backendfile = xmlbacking.text else: backendfile = None return backendfile def send_mail(body, subject, sender, receiver): msg = MIMEText(body, 'html') msg['Subject'] = subject msg['From'] = sender msg['To'] = receiver server = ' ' smtp = None try: smtp = smtplib.SMTP(server, timeout=5) smtp.sendmail(sender, receivers, msg.as_string()) finally: if smtp: smtp.quit() return True for disk in root_disks: backendfile = disk to_upload.append(backendfile) while backendfile: backendfile = get_dependency_disk(backendfile) if backendfile: to_upload.append(backendfile) #start uploading files to location timestamp = time.time() locations = 'Backup Locations: \n' try: for upload in to_upload: source = 'file://%s' % upload destination_file = os.path.basename(upload) destination = '%s/%s/%s' % (location, backupname, destination_file) locations = locations + destination + '\n' j.cloud.system.fs.copyFile(source, destination) destination_xml = '%s/%s/machine.xml' % (location, backupname) local_source_xml = '/tmp/%s_%s' % (backupname, timestamp) domainxml = domain.XMLDesc(0) j.system.fs.writeFile(local_source_xml, domainxml) source_xml = 'file://%s' % local_source_xml j.cloud.system.fs.copyFile(source_xml, destination_xml) j.system.fs.remove(local_source_xml) send_mail(locations, 'Upload Successfull', '*****@*****.**', emailaddress) except Exception as e: send_mail('Upload of backup has failed', 'Upload Failed', '*****@*****.**', emailaddress)
def action(): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.list_domains()
def action(machineid, xml=None): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.suspend(machineid)
def action(machineid): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.reboot(machineid)
def action(machineid, machinexml): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.delete_machine(machineid, machinexml)
class Dispatcher(object): OVERSUPSCRIPTION = 4 def __init__(self): self.phy_count = cpu_count() self.host_count = self.get_cpu_host_count() self.cpus = {i: CPU(i) for i in range(self.host_count, self.phy_count)} self.quarantine_prio = [ i for i in reversed(range(self.host_count, self.phy_count)) ] self.connection = LibvirtUtil() self.init_quarantine() @classmethod def get_cpu_host_count(cls): count = cpu_count() if count <= 16: return 1 elif count <= 32: return 2 else: return 4 @classmethod def get_cpu_virsh_num(cls): return cpu_count() - cls.get_cpu_host_count() @staticmethod def is_quarantined(vmid): _, out = j.system.process.execute('virsh vcpupin "%s" --live' % (vmid), ignoreErrorOutput=True) vals = [ map(lambda y: y.strip(), x.split(':')) for x in out.split('\n')[2:] if x ] if not vals or '-' in vals[0][1]: return False return True @staticmethod def get_quarantined_vm_pins(vmid): _, out = j.system.process.execute('virsh vcpupin "%s" --live' % (vmid), ignoreErrorOutput=True) vals = [ map(lambda y: y.strip(), x.split(':')) for x in out.split('\n')[2:] if x ] if not Dispatcher.is_quarantined(vmid): return {} else: return {int(i): int(j) for i, j in vals} def init_quarantine(self): for domain in self.connection.list_domains(): if domain['state'] == libvirt.VIR_DOMAIN_RUNNING: vmid = domain['id'] pins = Dispatcher.get_quarantined_vm_pins(domain['id']) for vcpu, pcpu in pins.items(): self.cpus[pcpu].incr(vmid) def alloc(self, vmid, vcpus): cpus = '%s-%s' % (self.host_count, self.phy_count - 1) return {i: cpus for i in range(vcpus)} def dealloc(self, vmid): self.defrag() def defrag(self): pass def alloc_quarantine(self, vmid, vcpus): # TODO: locking should be done here results = [] req = vcpus for i in self.quarantine_prio: cpu = self.cpus[i] avail = Dispatcher.OVERSUPSCRIPTION - cpu.curr if avail > 0: results.append((cpu, avail)) req -= avail if req <= 0: # in case that we now have extra cpus if req < 0: results[-1] = (results[-1][0], results[-1][1] + req) break else: raise RuntimeError("Don't have enough cpu for the required vcpus") res = {} i = 0 for cpu, avail in results: cpu.alloc(vmid, avail) for _ in range(avail): res[i] = str(cpu.index) i += 1 return res def dealloc_quarantine(self, vmid): removed = [] for i in self.quarantine_prio: cpu = self.cpus[i] res = cpu.dealloc(vmid) if res: removed.append(i) self.defrag_quarantine() def defrag_quarantine(self): i = 0 j = len(self.quarantine_prio) - 1 while i < j: to = self.cpus[self.quarantine_prio[i]] from_ = self.cpus[self.quarantine_prio[j]] avail = Dispatcher.OVERSUPSCRIPTION - to.curr present = from_.curr if not avail: i += 1 continue if not present: j -= 1 continue to_move = min(avail, present) for vmid, count in from_.vms.items(): to_move_vm = min(to_move, count) from_.incr(vmid, -to_move_vm) to.incr(vmid, to_move_vm) to_move -= to_move_vm Dispatcher.move_cpus(vmid, to_move_vm, from_.index, to.index) if to_move == 0: break def quarantine_vm(self, vmid): if not Dispatcher.is_quarantined(vmid): vm = self.connection._get_domain(vmid) if vm is None: raise RuntimeError("cannot get the machine with id %s" % (vmid)) vcpus = vm.vcpusFlags() self.dealloc(vmid) cpus = self.alloc_quarantine(vmid, vcpus) Dispatcher.set_cpu(vm.UUIDString(), cpus) def unquarantine_vm(self, vmid): if Dispatcher.is_quarantined(vmid): vm = self.connection._get_domain(vmid) if vm is None: raise RuntimeError("cannot get the machine with id %s" % (vmid)) vcpus = vm.vcpusFlags() self.dealloc_quarantine(vmid) cpus = self.alloc(vmid, vcpus) Dispatcher.set_cpu(vm.UUIDString(), cpus) @staticmethod def set_cpu(vmid, pcpus): for i in pcpus: j.system.process.execute( "virsh vcpupin %s --vcpu '%d' --cpulist '%s' --live" % (vmid, i, pcpus[i]), ignoreErrorOutput=True) @staticmethod def move_cpus(vmid, count, from_, to): pins = Dispatcher.get_quarantined_vm_pins(vmid) for i, k in pins.items(): if k == from_: count -= 1 j.system.process.execute( "virsh vcpupin %s --vcpu '%d' --cpulist '%s' --live" % (vmid, i, to), ignoreErrorOutput=True) if count == 0: return raise RuntimeError("cannot move this number of vcores")
def action(machineid): from CloudscalerLibcloud.utils.libvirtutil import LibvirtUtil connection = LibvirtUtil() return connection.backup_machine_cephfs(machineid)