Exemplo n.º 1
0
def action():
    from CloudscalerLibcloud.utils.gridconfig import GridConfig
    ccl = j.clients.osis.getNamespace('cloudbroker')
    current_time = time.time()
    disks = ccl.disk.search({'status': 'TOBEDELETED'}, size=0)[1:]
    scl = j.clients.osis.getNamespace('system')
    acl = j.clients.agentcontroller.get()
    grid_info = {}
    for disk in disks:
        deletion_time = disk['deletionTime']
        grid_id = disk['gid']
        grid = scl.grid.get(grid_id)
        grid_config = GridConfig(grid)
        retention_period = grid_config.get('delete_retention_period')
        if current_time >= (deletion_time + retention_period):
            if not grid_id in grid_info:
                ovs_cred = grid_config.settings['ovs_credentials']
                ovs_connection = {'ips': ovs_cred['ips'],
                                  'client_id': ovs_cred['client_id'],
                                  'client_secret': ovs_cred['client_secret']}
                grid_info[grid_id] = {'ovs_connection': ovs_connection, 'diskguids': [], 'diskids': []}
                grid_info[grid_id]['diskids'].append(disk['id'])
                diskparts = disk['referenceId'].split('@')
                if len(diskparts) == 2:
                    grid_info[grid_id]['diskguids'].append(diskparts[1])
    for gid, grid in grid_info.items():
        args = {'diskguids': grid['diskguids'], 'ovs_connection': grid['ovs_connection']}
        acl.executeJumpscript(organization='greenitglobe', name='deletedisks', role='storagedriver', gid=gid, args=args)
        ccl.disk.updateSearch({'id': {'$in': grid['diskids']}}, {'$set': {'status': 'DESTROYED'}})
Exemplo n.º 2
0
def action():
    from CloudscalerLibcloud.utils.gridconfig import GridConfig

    ccl = j.clients.osis.getNamespace("cloudbroker")
    current_time = time.time()
    images = ccl.image.search({"status": "DELETED"}, size=0)[1:]
    scl = j.clients.osis.getNamespace("system")
    pcl = j.clients.portal.getByInstance("main")
    for image in images:
        references = ccl.vmachine.count({
            "imageId": image["id"],
            "status": {
                "$ne": "DESTROYED"
            }
        })
        if references:
            return
        deletion_time = image["deletionTime"]
        grid_id = image["gid"]
        grid = scl.grid.get(grid_id)
        grid_config = GridConfig(grid)
        retention_period = grid_config.get("delete_retention_period")
        if current_time >= (deletion_time + retention_period):
            pcl.actors.cloudbroker.image.delete(image["id"],
                                                reason="Cleanup job",
                                                permanently=True)
Exemplo n.º 3
0
 def __init__(self):
     self.connection = libvirt.open()
     self.readonly = libvirt.openReadOnly()
     self.basepath = "/mnt/vmstor"
     self.templatepath = "/mnt/vmstor/templates"
     self.env = Environment(
         loader=PackageLoader("CloudscalerLibcloud", "templates"))
     self.config = GridConfig()
Exemplo n.º 4
0
def action():
    from CloudscalerLibcloud.utils.gridconfig import GridConfig

    ccl = j.clients.osis.getNamespace("cloudbroker")
    current_time = time.time()
    machines = ccl.vmachine.search({"status": "DELETED"}, size=0)[1:]
    acl = j.clients.agentcontroller.get()
    scl = j.clients.osis.getNamespace("system")
    grid_info = {}
    for machine in machines:
        deletion_time = machine["deletionTime"]
        cloudspace = ccl.cloudspace.get(machine["cloudspaceId"])
        grid_id = cloudspace.gid
        grid = scl.grid.get(grid_id)
        grid_config = GridConfig(grid)
        retention_period = grid_config.get("delete_retention_period")
        if current_time >= (deletion_time + retention_period):
            ccl.vmachine.updateSearch(
                {"id": machine["id"]},
                {
                    "$set": {
                        "status": "DESTROYED",
                        "updateTime": int(time.time())
                    }
                },
            )
            query = {"id": {"$in": machine["disks"]}}
            if not grid_id in grid_info:
                ovs_cred = grid_config.settings["ovs_credentials"]
                ovs_connection = {
                    "ips": ovs_cred["ips"],
                    "client_id": ovs_cred["client_id"],
                    "client_secret": ovs_cred["client_secret"],
                }
                grid_info[grid_id] = {
                    "ovs_connection": ovs_connection,
                    "diskguids": []
                }
            for disk in ccl.disk.search(query)[1:]:
                diskparts = disk["referenceId"].split("@")
                if len(diskparts) == 2:
                    grid_info[grid_id]["diskguids"].append(diskparts[1])
            ccl.disk.updateSearch(query, {"$set": {"status": "DESTROYED"}})
    for gid, grid in grid_info.items():
        args = {
            "diskguids": grid["diskguids"],
            "ovs_connection": grid["ovs_connection"],
        }
        acl.executeJumpscript(
            organization="greenitglobe",
            name="deletedisks",
            role="storagedriver",
            gid=gid,
            args=args,
        )
Exemplo n.º 5
0
 def getStackCapacity(self, stack, grid, nodesbyid):
     # search for all vms running on the stacks
     usedvms = models.vmachine.search({
         "$fields": ["id", "memory"],
         "$query": {
             "stackId": stack["id"],
             "status": {
                 "$nin": resourcestatus.Machine.NON_CONSUMING_STATES
             },
         },
     })[1:]
     stack["usedvms"] = len(usedvms)
     if usedvms:
         stack["usedmemory"] = sum(vm["memory"] for vm in usedvms)
     else:
         stack["usedmemory"] = 0
     # add vfws
     nodeid = int(stack["referenceId"])
     roscount = self.vcl.virtualfirewall.count({
         "gid": stack["gid"],
         "nid": nodeid
     })
     stack["usedmemory"] += roscount * 128
     stack["usedros"] = roscount
     stack["totalmemory"] = nodesbyid[nodeid]
     reservedmemory = (GridConfig(
         grid, stack["totalmemory"] / 1024.).get("reserved_mem") or 0)
     stack["reservedmemory"] = reservedmemory
     stack["freememory"] = (stack["totalmemory"] - stack["usedmemory"] -
                            reservedmemory)
Exemplo n.º 6
0
 def __init__(self, stack):
     self._rndrbn_vnc = 0
     self.id = int(stack.referenceId)
     self.gid = stack.gid
     self.name = "libvirt"
     self.uri = stack.apiUrl
     self.stack = stack
     self.env = env
     self.scl = j.clients.osis.getNamespace("system")
     grid = self.scl.grid.get(self.gid)
     self.node = self.scl.node.get(self.id)
     self.config = GridConfig(grid, self.node.memory / 1024.)
     # preload ovs_credentials and ovs_connection
     # this is to detect erors earlier if there is
     # some misconfiguration
     self.ovs_connection
Exemplo n.º 7
0
def action():
    from CloudscalerLibcloud.utils.gridconfig import GridConfig
    ccl = j.clients.osis.getNamespace('cloudbroker')
    current_time = time.time()
    cloudspaces = ccl.cloudspace.search({'status': 'DELETED'}, size=0)[1:]
    scl = j.clients.osis.getNamespace('system')
    pcl = j.clients.portal.getByInstance('main')
    for cloudspace in cloudspaces:
        deletion_time = cloudspace['deletionTime']
        grid_id = cloudspace['gid']
        grid = scl.grid.get(grid_id)
        grid_config = GridConfig(grid)
        retention_period = grid_config.get('delete_retention_period')
        if current_time >= (deletion_time + retention_period):
            pcl.actors.cloudbroker.cloudspace.destroy(
                cloudspaceId=cloudspace['id'],
                permanently=True,
                reason='Cleanup job')
Exemplo n.º 8
0
def action():
    from CloudscalerLibcloud.utils.gridconfig import GridConfig

    ccl = j.clients.osis.getNamespace("cloudbroker")
    current_time = time.time()
    cloudspaces = ccl.cloudspace.search({"status": "DELETED"}, size=0)[1:]
    scl = j.clients.osis.getNamespace("system")
    pcl = j.clients.portal.getByInstance("main")
    for cloudspace in cloudspaces:
        deletion_time = cloudspace["deletionTime"]
        grid_id = cloudspace["gid"]
        grid = scl.grid.get(grid_id)
        grid_config = GridConfig(grid)
        retention_period = grid_config.get("delete_retention_period")
        if current_time >= (deletion_time + retention_period):
            pcl.actors.cloudbroker.cloudspace.destroy(
                cloudspaceId=cloudspace["id"],
                permanently=True,
                reason="Cleanup job")
Exemplo n.º 9
0
 def getStackCapacity(self, stack, grid, nodesbyid):
     # search for all vms running on the stacks
     usedvms = models.vmachine.search({'$fields': ['id', 'memory'],
                                       '$query': {'stackId': stack['id'],
                                                  'status': {'$nin': resourcestatus.Machine.NON_CONSUMING_STATES}}
                                       }
                                      )[1:]
     stack['usedvms'] = len(usedvms)
     if usedvms:
         stack['usedmemory'] = sum(vm['memory'] for vm in usedvms)
     else:
         stack['usedmemory'] = 0
     # add vfws
     nodeid = int(stack['referenceId'])
     roscount = self.vcl.virtualfirewall.count({'gid': stack['gid'], 'nid': nodeid})
     stack['usedmemory'] += roscount * 128
     stack['usedros'] = roscount
     stack['totalmemory'] = nodesbyid[nodeid]
     reservedmemory = GridConfig(grid, stack['totalmemory']/1024.).get('reserved_mem') or 0
     stack['reservedmemory'] = reservedmemory
     stack['freememory'] = stack['totalmemory'] - stack['usedmemory'] - reservedmemory
Exemplo n.º 10
0
def action():
    from CloudscalerLibcloud.compute.drivers.libvirt_driver import OpenvStorageVolume, OpenvStorageISO, convertchar
    from CloudscalerLibcloud.utils.gridconfig import GridConfig
    from xml.etree import ElementTree

    def get_volume(disk):
        volumeclass = OpenvStorageVolume
        if disk['type'] == 'M':
            volumeclass = OpenvStorageISO
        driver = None
        volume = volumeclass(
            id=disk['referenceId'],
            name=disk['name'],
            size=disk['sizeMax'],
            driver=driver,
            iotune=disk.get('iotune', {}),
        )
        return volume

    def get_domain_disks(dom):
        if isinstance(dom, ElementTree.Element):
            xml = dom
        elif isinstance(dom, basestring):
            xml = ElementTree.fromstring(dom)
        disks = xml.findall('devices/disk')
        for disk in disks:
            if disk.attrib['device'] in ('disk', 'cdrom'):
                yield disk

    def get_disk_by_name(disks, name):
        for disk, volume in disks:
            if volume.name == name:
                return disk, volume

    ccl = j.clients.osis.getNamespace('cloudbroker')
    config = GridConfig()
    ovs_credentials = config.get('ovs_credentials')
    connection = {
        'ips': ovs_credentials['ips'],
        'client_id': ovs_credentials['client_id'],
        'client_secret': ovs_credentials['client_secret']
    }
    acl = j.clients.agentcontroller.get()

    def get_disk_guid(gid, name):
        devicename = '/{}.raw'.format(name)
        args = {'ovs_connection': connection, 'diskpath': devicename}
        job = acl.executeJumpscript('greenitglobe',
                                    'lookup_disk_by_path',
                                    gid=gid,
                                    role='storagemaster',
                                    args=args)
        if job['state'] != 'OK':
            return False
        return job['result']

    lcl = j.clients.osis.getNamespace('libcloud')
    machines = ccl.vmachine.search(
        {'status': {
            '$nin': ['ERROR', 'DESTROYED']
        }}, size=0)[1:]
    for machine in machines:
        j.console.info('Updating machine {}'.format(machine['name']))
        xmlkey = 'domain_{}'.format(machine['referenceId'])
        if not lcl.libvirtdomain.exists(xmlkey):
            continue
        xml = lcl.libvirtdomain.get(xmlkey)
        xmldisks = list(get_domain_disks(xml))
        vmdisks = [
            (disk, get_volume(disk))
            for disk in ccl.disk.search({'id': {
                '$in': machine['disks']
            }})[1:]
        ]
        firstdisk, firstvolume = vmdisks[0]
        vmchanges = False
        for xmldisk in xmldisks:
            ismeta = xmldisk.attrib['device'] == 'cdrom'
            source = xmldisk.find('source')
            name = source.attrib['name']
            diskpair = get_disk_by_name(vmdisks, name)
            if not diskpair:
                if not ismeta:
                    # what is this?
                    continue

                diskguid = get_disk_guid(firstdisk['gid'], name)
                if diskguid is False:
                    continue

                vmchanges = True
                disk = ccl.disk.new()
                disk.name = 'Metadata iso'
                disk.type = 'M'
                disk.stackId = firstdisk['stackId']
                disk.accountId = firstdisk['accountId']
                disk.gid = firstdisk['gid']
                disk.referenceId = firstdisk['referenceId'].replace(
                    firstvolume.vdiskguid,
                    diskguid).replace(firstvolume.name, name)
                diskid = ccl.disk.set(disk)[0]
                machine['disks'].append(diskid)
            else:
                # lets fix the disk order
                target = xmldisk.find('target')
                dev = target.attrib['dev']
                disk, volume = diskpair
                disk['order'] = convertchar(dev[2:])
                ccl.disk.set(disk)
        for nic in machine['nics']:
            if nic.get('target') and not nic['deviceName']:
                nic['deviceName'] = nic['target']
                vmchanges = True

        if vmchanges:
            ccl.vmachine.set(machine)
Exemplo n.º 11
0
class LibvirtUtil(object):
    def __init__(self):
        self.connection = libvirt.open()
        self.readonly = libvirt.openReadOnly()
        self.basepath = "/mnt/vmstor"
        self.templatepath = "/mnt/vmstor/templates"
        self.env = Environment(
            loader=PackageLoader("CloudscalerLibcloud", "templates"))
        self.config = GridConfig()

    def _get_domain(self, id):
        try:
            domain = self.connection.lookupByUUIDString(id)
        except libvirt.libvirtError as e:
            if e.get_error_code() == libvirt.VIR_ERR_NO_DOMAIN:
                return None
        return domain

    def close(self):
        self.connection.close()
        self.readonly.close()

    def get_domain_obj(self, id):
        return self._get_domain(id)

    def modXML(self, xml):
        root = ElementTree.fromstring(xml)
        vcpu = root.find("vcpu")
        vcpu.set(
            "cpuset",
            "{startcpu}-{cpulimit}".format(startcpu=RESERVED_CPUS,
                                           cpulimit=CPU_COUNT - 1),
        )
        xml = ElementTree.tostring(root)
        return xml

    def defineXML(self, xml):
        xml = self.modXML(xml)
        return self.connection.defineXML(xml)

    def create(self, id, xml):
        if isLocked(id):
            raise Exception("Can't start a locked machine")
        domain = self._get_domain(id)
        if domain:
            state = domain.state()[0]
            if state == libvirt.VIR_DOMAIN_RUNNING:
                return domain.XMLDesc()
            elif state == libvirt.VIR_DOMAIN_PAUSED:
                domain.resume()
        else:
            xml = self.modXML(xml)
            domain = self.connection.createXML(xml)
        return domain.XMLDesc()

    def shutdown(self, id, force=False):
        if isLocked(id):
            raise Exception("Can't stop a locked machine")
        domain = self._get_domain(id)
        if domain:
            isPersistent = domain.isPersistent()
            networkid = self._get_domain_networkid(domain)
            bridges = list(self._get_domain_bridges(domain))
            if domain.state()[0] not in [
                    libvirt.VIR_DOMAIN_SHUTDOWN,
                    libvirt.VIR_DOMAIN_SHUTOFF,
                    libvirt.VIR_DOMAIN_CRASHED,
            ]:
                if force:
                    domain.destroy()
                else:
                    if not domain.shutdown() == 0:
                        return False
                    try:
                        self.waitForAction(
                            id,
                            timeout=30000,
                            events=[libvirt.VIR_DOMAIN_EVENT_STOPPED])
                    except TimeoutError as e:
                        j.errorconditionhandler.processPythonExceptionObject(e)
                        domain.destroy()
            if isPersistent:
                domain.undefine()
            if networkid or bridges:
                self.cleanupNetwork(networkid, bridges)
        return True

    def waitForAction(self, domid, timeout=None, events=None):
        libvirt.virEventRegisterDefaultImpl()
        rocon = libvirt.openReadOnly()
        run = {"state": True, "timeout": False}

        def timecb(timerid, opaque):
            run["state"] = False
            run["timeout"] = True

        def callback(con, domain, event, detail, opaque):
            if domain.UUIDString() == domid:
                if events is not None and event in events:
                    run["state"] = False
                    return True

        if timeout:
            libvirt.virEventAddTimeout(timeout, timecb, None)
        rocon.domainEventRegisterAny(None,
                                     libvirt.VIR_DOMAIN_EVENT_ID_LIFECYCLE,
                                     callback, rocon)
        while run["state"]:
            libvirt.virEventRunDefaultImpl()

        if run["timeout"]:
            raise TimeoutError("Failed to wait for state")

    def reboot(self, id, xml):
        if isLocked(id):
            raise Exception("Can't reboot a locked machine")
        domain = self._get_domain(id)
        if domain:
            if domain.state()[0] in [
                    libvirt.VIR_DOMAIN_SHUTDOWN,
                    libvirt.VIR_DOMAIN_SHUTOFF,
                    libvirt.VIR_DOMAIN_CRASHED,
            ]:
                domain.create()
            else:
                domain.reboot()
        else:
            self.create(id, xml)
        return True

    def suspend(self, id):
        if isLocked(id):
            raise Exception("Can't suspend a locked machine")
        domain = self._get_domain(id)
        if domain.state()[0] == libvirt.VIR_DOMAIN_PAUSED:
            return True
        return domain.suspend() == 0

    def resume(self, id):
        if isLocked(id):
            raise Exception("Can't resume a locked machine")
        domain = self._get_domain(id)
        if domain.state()[0] == libvirt.VIR_DOMAIN_RUNNING:
            return True
        return domain.resume() == 0

    def delete_machine(self, machineid, machinexml):
        if isLocked(id):
            raise Exception("Can't delete a locked machine")
        try:
            domain = self.connection.lookupByUUIDString(machineid)
            xml = ElementTree.fromstring(domain.XMLDesc())
        except:
            domain = None
            xml = ElementTree.fromstring(machinexml)
        networkid = self._get_domain_networkid(xml)
        bridges = self._get_domain_bridges(xml)
        if domain:
            if domain.state()[0] != libvirt.VIR_DOMAIN_SHUTOFF:
                domain.destroy()
            try:
                domain.undefine()
            except:
                pass  # none persistant vms dont need to be undefined
        if networkid or bridges:
            self.cleanupNetwork(networkid, bridges)
        name = xml.find("name").text
        poolpath = os.path.join(self.basepath, name)
        return True

    def get_domain_disks(self, dom):
        if isinstance(dom, ElementTree.Element):
            xml = dom
        elif isinstance(dom, basestring):
            xml = ElementTree.fromstring(dom)
        else:
            xml = ElementTree.fromstring(dom.XMLDesc(0))
        disks = xml.findall("devices/disk")
        for disk in disks:
            if disk.attrib["device"] in ("disk", "cdrom"):
                yield disk

    def get_domain_disk(self, referenceId, domaindisks):
        url = urlparse.urlparse(referenceId)
        name = url.path.split("@")[0].strip("/").split(":")[0]
        for disk in domaindisks:
            source = disk.find("source")
            if source is not None:
                if source.attrib.get("name", "").strip("/") == name:
                    target = disk.find("target")
                    return target.attrib["dev"]

    def get_domain_nics(self, dom):
        xml = self._get_xml_dom(dom)
        for target in xml.findall("devices/interface/target"):
            yield target.attrib["dev"]

    def get_domain_nics_info(self, dom):
        xml = self._get_xml_dom(dom)
        for interface in xml.findall("devices/interface"):
            nic = {}
            nic["mac"] = interface.find("mac").attrib["address"]
            nic["name"] = interface.find("target").attrib["dev"]
            source = interface.find("source")
            nic["bridge"] = (source.attrib["bridge"]
                             if source.attrib.get("bridge") else
                             source.attrib["network"])
            yield nic

    def _get_xml_dom(self, dom):
        if isinstance(dom, ElementTree.Element):
            return dom
        elif isinstance(dom, basestring):
            return ElementTree.fromstring(dom)
        else:
            return ElementTree.fromstring(dom.XMLDesc(0))

    def _get_domain_disk_file_names(self, dom):
        diskfiles = list()
        for disk in self.get_domain_disks(dom):
            source = disk.find("source")
            if source is not None:
                if source.attrib.get("protocol") == "openvstorage":
                    diskfiles.append(
                        os.path.join(self.basepath,
                                     source.attrib["name"] + ".raw"))
                else:
                    if "dev" in source.attrib:
                        diskfiles.append(source.attrib["dev"])
                    if "file" in source.attrib:
                        diskfiles.append(source.attrib["file"])
        return diskfiles

    def _get_domain_bridges(self, dom):
        xml = self._get_xml_dom(dom)
        interfaces = xml.findall("devices/interface/source")
        for interface in interfaces:
            for network_key in ["bridge", "network"]:
                if network_key in interface.attrib:
                    yield interface.attrib[network_key]

    def _get_domain_networkid(self, dom):
        for bridge in self._get_domain_bridges(dom):
            if bridge.startswith("space_"):
                networkid = bridge.partition("_")[-1]
                return int(networkid, 16)
        return None

    def check_disk(self, diskxml):
        return True

    def memory_usage(self):
        ids = self.readonly.listDomainsID()
        hostmem = self.readonly.getInfo()[1]
        totalmax = 0
        totalrunningmax = 0
        for id in ids:
            dom = self.readonly.lookupByID(id)
            machinestate, maxmem, mem = dom.info()[0:3]
            totalmax += mem / 1024
            if machinestate == libvirt.VIR_DOMAIN_RUNNING:
                totalrunningmax += maxmem / 1024
        return (hostmem, totalmax, totalrunningmax)

    def check_machine(self, machinexml, reserved_mem=None):
        if reserved_mem is None:
            reserved_mem = self.config.get("reserved_mem")
        xml = ElementTree.fromstring(machinexml)
        memory = int(xml.find("currentMemory").text)
        hostmem, totalmax, totalrunningmax = self.memory_usage()
        if (totalrunningmax + memory) > (hostmem - reserved_mem):
            return False
        return True

    def snapshot(self, id, xml, snapshottype):
        if isLocked(id):
            raise Exception("Can't snapshot a locked machine")
        domain = self._get_domain(id)
        flags = (0 if snapshottype == "internal" else
                 libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY)
        snap = domain.snapshotCreateXML(xml, flags)
        return {"name": snap.getName(), "xml": snap.getXMLDesc()}

    def listSnapshots(self, id):
        domain = self._get_domain(id)
        results = list()
        for snapshot in domain.listAllSnapshots():
            xml = ElementTree.fromstring(snapshot.getXMLDesc())
            snap = {
                "name": xml.find("name").text,
                "epoch": int(xml.find("creationTime").text),
            }
            results.append(snap)
        return results

    def deleteVolume(self, path):
        vol = self.connection.storageVolLookupByPath(path)
        return vol.delete(0)

    def getSnapshot(self, domain, name):
        domain = self._get_domain(domain)
        snap = domain.snapshotLookupByName("name")
        return {"name": snap.getName(), "epoch": snap.getXMLDesc()}

    def _isRootVolume(self, domain, file):
        diskfiles = self._get_domain_disk_file_names(domain)
        if file in diskfiles:
            return True
        return False

    def _renameSnapshot(self, id, name, newname):
        domain = self._get_domain(id)
        snapshot = domain.snapshotLookupByName(name, 0)
        xml = snapshot.getXMLDesc()
        newxml = xml.replace("<name>%s</name>" % name,
                             "<name>%s</name>" % newname)
        domain.snapshotCreateXML(
            newxml,
            (libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_REDEFINE
             or libvirt.VIR_DOMAIN_SNAPSHOT_CREATE_DISK_ONLY),
        )
        snapshot.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
        return True

    def deleteSnapshot(self, id, name):
        if isLocked(id):
            raise Exception("Can't delete a snapshot from a locked machine")
        newname = "%s_%s" % (name, "DELETING")
        self._renameSnapshot(id, name, newname)
        name = newname
        domain = self._get_domain(id)
        snapshot = domain.snapshotLookupByName(name, 0)
        snapshotfiles = self._getSnapshotDisks(id, name)
        volumes = []
        todelete = []
        for snapshotfile in snapshotfiles:
            is_root_volume = self._isRootVolume(domain,
                                                snapshotfile["file"].path)
            if not is_root_volume:
                print("Blockcommit from %s to %s" % (
                    snapshotfile["file"].path,
                    snapshotfile["file"].backing_file_path,
                ))
                domain.blockCommit(
                    snapshotfile["name"],
                    snapshotfile["file"].backing_file_path,
                    snapshotfile["file"].path,
                )
                todelete.append(snapshotfile["file"].path)
                volumes.append(snapshotfile["name"])
            else:
                # we can't use blockcommit on topsnapshots
                new_base = Qcow2(
                    snapshotfile["file"].backing_file_path).backing_file_path
                todelete.append(snapshotfile["file"].backing_file_path)
                if not new_base:
                    continue
                print("Blockrebase from %s" % new_base)
                flags = (libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY
                         | libvirt.VIR_DOMAIN_BLOCK_REBASE_REUSE_EXT
                         | libvirt.VIR_DOMAIN_BLOCK_REBASE_SHALLOW)
                domain.blockRebase(snapshotfile["name"], new_base, flags)
                volumes.append(snapshotfile["name"])

        while not self._block_job_domain_info(domain, volumes):
            time.sleep(0.5)

        # we can undefine the snapshot
        snapshot.delete(flags=libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
        for disk in todelete:
            if os.path.exists(disk):
                os.remove(disk)
        return True

    def _block_job_domain_info(self, domain, paths):
        for path in paths:
            done = self._block_job_info(domain, path)
            if not done:
                return False
        return True

    def _block_job_info(self, domain, path):
        status = domain.blockJobInfo(path, 0)
        print(status)
        try:
            cur = status.get("cur", 0)
            end = status.get("end", 0)
            if cur != 0 and end != 0:
                per = int((cur / float(end)) * 100)
                j.logger.log(
                    "Copy progress %s" % per,
                    1,
                    "progress",
                    tags="id:%s per:%s" % (domain.UUIDString(), per),
                )
            if cur == end:
                return True
        except Exception:
            return True
        else:
            return False

    def rollbackSnapshot(self, id, name, deletechildren=True):
        if isLocked(id):
            raise Exception("Can't rollback a locked machine")
        domain = self._get_domain(id)
        snapshot = domain.snapshotLookupByName(name, 0)
        snapshotdomainxml = ElementTree.fromstring(snapshot.getXMLDesc(0))
        domainxml = snapshotdomainxml.find("domain")
        newxml = ElementTree.tostring(domainxml)
        self.defineXML(newxml)
        if deletechildren:
            children = snapshot.listAllChildren(1)
            for child in children:
                snapshotfiles = self._getSnapshotDisks(id, child.getName())
                for snapshotfile in snapshotfiles:
                    os.remove(snapshotfile["file"].path)
                child.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
            snapshotfiles = self._getSnapshotDisks(id, name)
            for snapshotfile in snapshotfiles:
                os.remove(snapshotfile["file"].path)
            snapshot.delete(libvirt.VIR_DOMAIN_UNDEFINE_SNAPSHOTS_METADATA)
        return True

    def getProgressLogger(self, id, tmpl="%s"):
        def wrapper(per):
            j.logger.log(tmpl % per,
                         1,
                         "progress",
                         tags="id:%s per:%s" % (id, per))

        return wrapper

    @lockedAction
    def _clone(self, id, filename, clonefrom):
        domain = self.connection.lookupByUUIDString(id)
        domainconfig = domain.XMLDesc()
        destination_path = os.path.join(self.templatepath, filename)
        if domain.state()[0] in [
                libvirt.VIR_DOMAIN_SHUTDOWN,
                libvirt.VIR_DOMAIN_SHUTOFF,
                libvirt.VIR_DOMAIN_CRASHED,
                libvirt.VIR_DOMAIN_PAUSED,
        ] or not self._isRootVolume(domain, clonefrom):
            size = int(
                j.system.platform.qemu_img.info(clonefrom,
                                                unit="")["virtual size"])
            fd = os.open(destination_path, os.O_RDWR | os.O_CREAT)
            try:
                os.ftruncate(fd, size)
            finally:
                os.close(fd)
            j.system.platform.qemu_img.convert(clonefrom,
                                               "raw",
                                               destination_path,
                                               "raw",
                                               createTarget=False)
        else:
            domain.undefine()
            try:
                flags = (libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY
                         | libvirt.VIR_DOMAIN_BLOCK_REBASE_COPY_RAW)
                domain.blockRebase(clonefrom, destination_path, 0, flags)
                rebasedone = False
                while not rebasedone:
                    rebasedone = self._block_job_info(domain, clonefrom)
            finally:
                self.defineXML(domainconfig)
        return destination_path

    def exportToTemplate(self, id, name, clonefrom, filename):
        if isLocked(id):
            raise Exception("Can't export a locked machine")
        domain = self.connection.lookupByUUIDString(id)
        if not clonefrom:
            domaindisks = self._get_domain_disk_file_names(domain)
            if len(domaindisks) > 0:
                clonefrom = domaindisks[0]
            else:
                raise Exception("Node image found for this machine")
        else:
            snapshotfiles = self._getSnapshotDisks(id, name)
            # we just take the first one at this moment
            if len(snapshotfiles) > 0:
                clonefrom = snapshotfiles[0]["file"].backing_file_path
            else:
                raise Exception("No snapshot found")
        destination_path = self._clone(id, filename, clonefrom)
        return destination_path

    def create_disk(self, diskxml, poolname):
        pool = self._get_pool(poolname)
        return pool.createXML(diskxml, 0)

    def _getSnapshotDisks(self, id, name):
        domain = self._get_domain(id)
        snapshot = domain.snapshotLookupByName(name, 0)
        snapshotxml = ElementTree.fromstring(snapshot.getXMLDesc(0))
        snapshotfiles = []
        disks = snapshotxml.findall("disks/disk")
        for disk in disks:
            source = disk.find("source")
            if source is not None and disk.attrib["snapshot"] == "external":
                snapshotfiles.append({
                    "name": disk.attrib["name"],
                    "file": Qcow2(source.attrib["file"])
                })
        return snapshotfiles

    def _get_pool(self, poolname):
        storagepool = self.connection.storagePoolLookupByName(poolname)
        return storagepool

    def check_storagepool(self, poolname):
        if poolname not in self.connection.listStoragePools():
            poolpath = os.path.join(self.basepath, poolname)
            if not os.path.exists(poolpath):
                os.makedirs(poolpath)
                cmd = "chattr +C %s " % poolpath
                j.system.process.execute(
                    cmd,
                    dieOnNonZeroExitCode=False,
                    outputToStdout=False,
                    useShell=False,
                    ignoreErrorOutput=False,
                )
            pool = self.env.get_template("pool.xml").render(
                poolname=poolname, basepath=self.basepath)
            self.connection.storagePoolCreateXML(pool, 0)
        return True

    def create_machine(self, machinexml):
        xml = self.modXML(machinexml)
        domain = self.connection.createXML(xml)
        return self._to_node(domain)

    def _to_node(self, domain):
        state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
        locked = isLocked(domain.UUIDString())
        extra = {
            "uuid": domain.UUIDString(),
            "os_type": domain.OSType(),
            "types": self.connection.getType(),
            "used_memory": memory / 1024,
            "vcpu_count": vcpu_count,
            "used_cpu_time": used_cpu_time,
            "locked": locked,
        }
        return {
            "id": domain.UUIDString(),
            "name": domain.name(),
            "state": state,
            "extra": extra,
            "XMLDesc": domain.XMLDesc(0),
        }

    def _to_node_list(self, domain):
        state, max_mem, memory, vcpu_count, used_cpu_time = domain.info()
        extra = {
            "uuid": domain.UUIDString(),
            "os_type": domain.OSType(),
            "types": self.connection.getType(),
            "used_memory": memory / 1024,
            "vcpu_count": vcpu_count,
            "used_cpu_time": used_cpu_time,
        }
        return {
            "id": domain.UUIDString(),
            "name": domain.name(),
            "state": state,
            "extra": extra,
        }

    def get_domain(self, uuid):
        try:
            domain = self.connection.lookupByUUIDString(uuid)
        except:
            return None
        return self._to_node(domain)

    def list_domains(self):
        nodes = []
        for x in self.connection.listAllDomains(0):
            nodes.append(self._to_node_list(x))
        return nodes

    def _getPool(self, domain):
        # poolname is by definition the machine name
        return self.readonly.storagePoolLookupByName(domain.name())

    def _getTemplatePool(self, templatepoolname=None):
        if not templatepoolname:
            templatepoolname = "VMStor"
        return self.readonly.storagePoolLookupByName(templatepoolname)

    def createNetwork(self, networkname, bridge):
        networkxml = self.env.get_template("network.xml").render(
            networkname=networkname, bridge=bridge)
        network = self.connection.networkDefineXML(networkxml)
        network.create()
        network.setAutostart(True)

    def checkNetwork(self, networkname):
        return networkname in self.connection.listNetworks()

    def cleanupNetwork(self, networkid, bridges):
        def destroyNetwork(name):
            try:
                network = self.connection.networkLookupByName(networkname)
                try:
                    network.destroy()
                except:
                    pass
                try:
                    network.undefine()
                except:
                    pass
            except:
                # network does not exists
                pass

        if networkid and j.system.ovsnetconfig.cleanupIfUnused(networkid):
            networkname = netclasses.VXBridge(networkid).name
            destroyNetwork(networkname)

        for bridge in bridges:
            if not bridge.startswith("ext-"):
                continue
            if j.system.ovsnetconfig.cleanupIfUnusedVlanBridge(bridge):
                destroyNetwork(bridge)

    def createVMStorSnapshot(self, name):
        vmstor_snapshot_path = j.system.fs.joinPaths(self.basepath,
                                                     "snapshots")
        if not j.system.fs.exists(vmstor_snapshot_path):
            j.system.btrfs.subvolumeCreate(self.basepath, "snapshots")
        vmstorsnapshotpath = j.system.fs.joinPaths(vmstor_snapshot_path, name)
        j.system.btrfs.snapshotReadOnlyCreate(self.basepath,
                                              vmstorsnapshotpath)
        return True

    def deleteVMStorSnapshot(self, name):
        vmstor_snapshot_path = j.system.fs.joinPaths(self.basepath,
                                                     "snapshots")
        j.system.btrfs.subvolumeDelete(vmstor_snapshot_path, name)
        return True

    def listVMStorSnapshots(self):
        vmstor_snapshot_path = j.system.fs.joinPaths(self.basepath,
                                                     "snapshots")
        return j.system.btrfs.subvolumeList(vmstor_snapshot_path)

    def reset(self, id, xml):
        if isLocked(id):
            raise Exception("Can't reboot a locked machine")
        domain = self._get_domain(id)
        if domain:
            if domain.state()[0] in [
                    libvirt.VIR_DOMAIN_SHUTDOWN,
                    libvirt.VIR_DOMAIN_SHUTOFF,
                    libvirt.VIR_DOMAIN_CRASHED,
            ]:
                domain.create()
            else:
                domain.reset()
        else:
            self.create(id, xml)
Exemplo n.º 12
0
def action():
    from CloudscalerLibcloud.compute.drivers.libvirt_driver import (
        OpenvStorageVolume,
        OpenvStorageISO,
        convertchar,
    )
    from CloudscalerLibcloud.utils.gridconfig import GridConfig
    from xml.etree import ElementTree

    def get_volume(disk):
        volumeclass = OpenvStorageVolume
        if disk["type"] == "M":
            volumeclass = OpenvStorageISO
        driver = None
        volume = volumeclass(
            id=disk["referenceId"],
            name=disk["name"],
            size=disk["sizeMax"],
            driver=driver,
            iotune=disk.get("iotune", {}),
        )
        return volume

    def get_domain_disks(dom):
        if isinstance(dom, ElementTree.Element):
            xml = dom
        elif isinstance(dom, basestring):
            xml = ElementTree.fromstring(dom)
        disks = xml.findall("devices/disk")
        for disk in disks:
            if disk.attrib["device"] in ("disk", "cdrom"):
                yield disk

    def get_disk_by_name(disks, name):
        for disk, volume in disks:
            if volume.name == name:
                return disk, volume

    ccl = j.clients.osis.getNamespace("cloudbroker")
    config = GridConfig()
    ovs_credentials = config.get("ovs_credentials")
    connection = {
        "ips": ovs_credentials["ips"],
        "client_id": ovs_credentials["client_id"],
        "client_secret": ovs_credentials["client_secret"],
    }
    acl = j.clients.agentcontroller.get()

    def get_disk_guid(gid, name):
        devicename = "/{}.raw".format(name)
        args = {"ovs_connection": connection, "diskpath": devicename}
        job = acl.executeJumpscript(
            "greenitglobe",
            "lookup_disk_by_path",
            gid=gid,
            role="storagemaster",
            args=args,
        )
        if job["state"] != "OK":
            return False
        return job["result"]

    lcl = j.clients.osis.getNamespace("libcloud")
    machines = ccl.vmachine.search(
        {"status": {
            "$nin": ["ERROR", "DESTROYED"]
        }}, size=0)[1:]
    for machine in machines:
        j.console.info("Updating machine {}".format(machine["name"]))
        xmlkey = "domain_{}".format(machine["referenceId"])
        if not lcl.libvirtdomain.exists(xmlkey):
            continue
        xml = lcl.libvirtdomain.get(xmlkey)
        xmldisks = list(get_domain_disks(xml))
        vmdisks = [
            (disk, get_volume(disk))
            for disk in ccl.disk.search({"id": {
                "$in": machine["disks"]
            }})[1:]
        ]
        firstdisk, firstvolume = vmdisks[0]
        vmchanges = False
        for xmldisk in xmldisks:
            ismeta = xmldisk.attrib["device"] == "cdrom"
            source = xmldisk.find("source")
            name = source.attrib["name"]
            diskpair = get_disk_by_name(vmdisks, name)
            if not diskpair:
                if not ismeta:
                    # what is this?
                    continue

                diskguid = get_disk_guid(firstdisk["gid"], name)
                if diskguid is False:
                    continue

                vmchanges = True
                disk = ccl.disk.new()
                disk.name = "Metadata iso"
                disk.type = "M"
                disk.stackId = firstdisk["stackId"]
                disk.accountId = firstdisk["accountId"]
                disk.gid = firstdisk["gid"]
                disk.referenceId = (firstdisk["referenceId"].replace(
                    firstvolume.vdiskguid,
                    diskguid).replace(firstvolume.name, name))
                diskid = ccl.disk.set(disk)[0]
                machine["disks"].append(diskid)
            else:
                # lets fix the disk order
                target = xmldisk.find("target")
                dev = target.attrib["dev"]
                disk, volume = diskpair
                disk["order"] = convertchar(dev[2:])
                ccl.disk.set(disk)
        for nic in machine["nics"]:
            if nic.get("target") and not nic["deviceName"]:
                nic["deviceName"] = nic["target"]
                vmchanges = True

        if vmchanges:
            ccl.vmachine.set(machine)
Exemplo n.º 13
0
class CSLibvirtNodeDriver(object):

    _ovsdata = {}
    type = "CSLibvirt"

    NODE_STATE_MAP = {
        0: NodeState.TERMINATED,
        1: NodeState.RUNNING,
        2: NodeState.PENDING,
        3: NodeState.TERMINATED,  # paused
        4: NodeState.TERMINATED,  # shutting down
        5: NodeState.TERMINATED,
        6: NodeState.UNKNOWN,  # crashed
        7: NodeState.UNKNOWN,  # last
    }

    def __init__(self, stack):
        self._rndrbn_vnc = 0
        self.id = int(stack.referenceId)
        self.gid = stack.gid
        self.name = "libvirt"
        self.uri = stack.apiUrl
        self.stack = stack
        self.env = env
        self.scl = j.clients.osis.getNamespace("system")
        grid = self.scl.grid.get(self.gid)
        self.node = self.scl.node.get(self.id)
        self.config = GridConfig(grid, self.node.memory / 1024.)
        # preload ovs_credentials and ovs_connection
        # this is to detect erors earlier if there is
        # some misconfiguration
        self.ovs_connection

    backendconnection = connection.DummyConnection()

    @property
    def ovs_credentials(self):
        cachekey = "credentials_{}".format(self.gid)
        if cachekey not in self._ovsdata:
            credentials = self.config.get("ovs_credentials")
            self._ovsdata[cachekey] = credentials
        return self._ovsdata[cachekey]

    @property
    def ovs_connection(self):
        cachekey = "ovs_connection_{}".format(self.gid)
        if cachekey not in self._ovsdata:
            connection = {
                "ips": self.ovs_credentials["ips"],
                "client_id": self.ovs_credentials["client_id"],
                "client_secret": self.ovs_credentials["client_secret"],
            }
            self._ovsdata[cachekey] = connection
        return self._ovsdata[cachekey]

    @property
    def ovs_settings(self):
        cachekey = "ovs_settings_{}".format(self.gid)
        if cachekey not in self._ovsdata:
            grid_settings = self.config.get("ovs_settings", dict())
            settings = dict(
                vpool_vmstor_metadatacache=grid_settings.get(
                    "vpool_vmstor_metadatacache", 20
                ),
                vpool_data_metadatacache=grid_settings.get(
                    "vpool_data_metadatacache", 20
                ),
            )
            self._ovsdata[cachekey] = settings
        return self._ovsdata[cachekey]

    def getVolumeId(self, vdiskguid, edgeclient, name):
        username = self.ovs_credentials.get("edgeuser")
        password = self.ovs_credentials.get("edgepassword")
        return getOpenvStroageVolumeId(
            edgeclient["storageip"],
            edgeclient["edgeport"],
            name,
            vdiskguid,
            edgeclient.get("protocol", "tcp"),
            username,
            password,
        )

    @property
    def all_edgeclients(self):
        return self._execute_agent_job(
            "listedgeclients", role="storagemaster", ovs_connection=self.ovs_connection
        )

    def list_vdisks(self, storagerouterguid):
        return self._execute_agent_job(
            "listvdisks",
            role="storagemaster",
            ovs_connection=self.ovs_connection,
            storagerouterguid=storagerouterguid,
        )

    @property
    def edgeclients(self):
        edgeclients = filter(
            lambda client: client["status"] == "OK", self.all_edgeclients
        )

        activesessions = (
            self.backendconnection.agentcontroller_client.listActiveSessions()
        )
        activenodes = self.scl.node.search(
            {"status": "ENABLED", "gid": self.gid, "roles": "storagedriver"}
        )[1:]

        def get_active_node(storageip):
            for activenode in activenodes:
                if storageip in activenode["ipaddr"]:
                    return activenode
            return None

        def filter_clients(client):
            node = get_active_node(client["storageip"])
            if node is None:
                return False
            client["nid"] = node["id"]
            return (node["gid"], node["id"]) in activesessions

        return filter(filter_clients, edgeclients)

    def getNextEdgeClient(self, vpool, edgeclients=None):
        clients = edgeclients or self.edgeclients[:]
        clients = filter(lambda x: x["vpool"] == vpool, clients)
        if not clients:
            raise exceptions.ServiceUnavailable(
                "No storagerouter available for vpool {}".format(vpool)
            )
        return sorted(clients, key=lambda client: client["vdiskcount"])[0]

    def getEdgeClientFromVolume(self, volume):
        edgeclients = self.edgeclients[:]
        for edgeclient in edgeclients:
            if (
                volume.edgehost == edgeclient["storageip"]
                and volume.edgeport == edgeclient["edgeport"]
            ):
                return edgeclient, edgeclients

    def getBestDataVpool(self):
        edgeclients = self.edgeclients[:]
        diskspervpool = {}
        for edgeclient in edgeclients:
            diskspervpool[edgeclient["vpool"]] = (
                diskspervpool.setdefault(edgeclient["vpool"], 0)
                + edgeclient["vdiskcount"]
            )
        if len(diskspervpool) > 1:
            for vpool in list(diskspervpool.keys()):
                if not vpool.startswith("data"):
                    diskspervpool.pop(vpool)
        # get vpool with least vdiskcount
        return (
            sorted(diskspervpool.items(), key=lambda vpool: vpool[1])[0][0],
            edgeclients,
        )

    def set_backend(self, connection):
        """
        Set a connection to the cloudbroker backend, this is used
        to get all the supported images and sizes
        """
        self.backendconnection = connection

    def _execute_agent_job(
        self, name_, id=None, wait=True, queue=None, role=None, timeout=600, **kwargs
    ):
        if not id and not role:
            id = int(self.id)

        elif id is None:
            id = 0
        else:
            id = id and int(id)

        tags = getJobTags()
        job = self.backendconnection.agentcontroller_client.executeJumpscript(
            "greenitglobe",
            name_,
            nid=id,
            role=role,
            gid=self.gid,
            wait=wait,
            queue=queue,
            args=kwargs,
            tags=tags,
        )
        if wait and job["state"] != "OK":
            if job["state"] == "NOWORK":
                j.errorconditionhandler.raiseOperationalWarning(
                    "Could not find agent with nid:%s" % id
                )
            elif job["state"] == "TIMEOUT":
                j.errorconditionhandler.raiseOperationalWarning(
                    "Job failed to execute on time"
                )
            else:
                j.errorconditionhandler.raiseOperationalWarning(
                    "Could not execute %s for nid:%s, error was:%s"
                    % (name_, id, job["result"])
                )

            raise exceptions.ServiceUnavailable(
                "Could not perform action: {name} at this time".format(name=name_)
            )
        if wait:
            return job["result"]
        else:
            return job

    def _create_disk(self, vm_id, disksize, image, disk_role="base"):
        edgeclient = self.getNextEdgeClient("vmstor")

        diskname = "{0}/bootdisk-{0}".format(vm_id)
        kwargs = {
            "ovs_connection": self.ovs_connection,
            "storagerouterguid": edgeclient["storagerouterguid"],
            "size": disksize,
            "templateguid": image.referenceId,
            "diskname": diskname,
            "pagecache_ratio": self.ovs_settings["vpool_vmstor_metadatacache"],
        }

        try:
            vdiskguid = self._execute_agent_job(
                "creatediskfromtemplate", role="storagedriver", **kwargs
            )
        except (Exception, exceptions.ServiceUnavailable) as ex:
            raise StorageException(ex.message, ex)

        volumeid = self.getVolumeId(
            vdiskguid=vdiskguid, edgeclient=edgeclient, name=diskname
        )
        return (
            OpenvStorageVolume(id=volumeid, name=diskname, size=disksize, driver=self),
            edgeclient,
        )

    def create_volume(self, size, name, data=True, dev=""):
        if data:
            vpoolname, edgeclients = self.getBestDataVpool()
            edgeclient = self.getNextEdgeClient(vpoolname, edgeclients)
            diskname = "volumes/volume_{}".format(name)
        else:
            edgeclient = self.getNextEdgeClient("vmstor")
            diskname = name
        kwargs = {
            "ovs_connection": self.ovs_connection,
            "vpoolguid": edgeclient["vpoolguid"],
            "storagerouterguid": edgeclient["storagerouterguid"],
            "diskname": diskname,
            "size": size,
            "pagecache_ratio": self.ovs_settings["vpool_data_metadatacache"],
        }
        try:
            vdiskguid = self._execute_agent_job(
                "createdisk", role="storagedriver", **kwargs
            )
        except (Exception, exceptions.ServiceUnavailable) as ex:
            raise StorageException(ex.message, ex)
        volumeid = self.getVolumeId(
            vdiskguid=vdiskguid, edgeclient=edgeclient, name=diskname
        )
        stvol = OpenvStorageVolume(id=volumeid, size=size, name=diskname, driver=self)
        stvol.dev = dev
        return stvol

    def create_volumes(self, volumes):
        stvolumes = []
        for volume in volumes:
            stvol = self.create_volume(
                volume["size"],
                volume["name"],
                volume.get("data", True),
                volume.get("dev", ""),
            )
            stvolumes.append(stvol)
        return stvolumes

    def attach_volume(self, node, volume):
        self._execute_agent_job(
            "attach_device", queue="hypervisor", xml=str(volume), machineid=node.id
        )
        return True

    def destroy_volume(self, volume):
        return self.destroy_volumes_by_guid([volume.vdiskguid])

    def get_volume_from_xml(self, xmldom, volume):
        devices = xmldom.find("devices")
        for disk in devices.iterfind("disk"):
            if disk.attrib["device"] != "disk":
                continue
            source = disk.find("source")
            if source.attrib.get("dev", source.attrib.get("name")) == volume.name:
                return devices, disk
        return None, None

    def detach_volume(self, volume):
        node = volume.extra["node"]
        self._execute_agent_job(
            "detach_device", queue="hypervisor", xml=str(volume), machineid=node.id
        )
        return node

    def _create_metadata_iso(self, edgeclient, name, password, type, userdata=None):
        customuserdata = userdata or {}
        if isinstance(customuserdata, basestring):
            customuserdata = yaml.load(customuserdata)
        if type not in ["WINDOWS", "Windows"]:
            memrule = 'SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", ATTR{state}="online"'
            cpurule = 'SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"'
            runcmds = []
            runcmds.append(
                "echo '{}' > /etc/udev/rules.d/66-hotplug.rules".format(memrule)
            )
            runcmds.append(
                "echo '{}' >> /etc/udev/rules.d/66-hotplug.rules".format(cpurule)
            )
            runcmds.append(["udevadm", "control", "-R"])

            userdata = {
                "password": password,
                "users": [
                    {
                        "name": "cloudscalers",
                        "plain_text_passwd": password,
                        "lock-passwd": False,
                        "shell": "/bin/bash",
                        "sudo": "ALL=(ALL) ALL",
                    }
                ],
                "ssh_pwauth": True,
                "runcmd": runcmds,
                "manage_etc_hosts": True,
                "chpasswd": {"expire": False},
            }
            metadata = {"local-hostname": name}
            if "users" in customuserdata:
                users = customuserdata.pop("users", [])
                userdata["users"].extend(users)
            userdata.update(customuserdata)
        else:
            userdata = {}
            metadata = {"admin_pass": password, "hostname": name}

        diskpath = "{0}/cloud-init-{0}".format(name)
        kwargs = {
            "ovs_connection": self.ovs_connection,
            "vpoolguid": edgeclient["vpoolguid"],
            "storagerouterguid": edgeclient["storagerouterguid"],
            "diskname": diskpath,
            "size": 0.1,
            "pagecache_ratio": self.ovs_settings["vpool_data_metadatacache"],
        }
        try:
            vdiskguid = self._execute_agent_job(
                "createdisk", role="storagedriver", **kwargs
            )
        except Exception as ex:
            raise StorageException(ex.message, ex)

        volumeid = self.getVolumeId(
            vdiskguid=vdiskguid, edgeclient=edgeclient, name=diskpath
        )
        isovolume = OpenvStorageISO(id=volumeid, name=diskpath, size=0, driver=self)
        try:
            volumeid = self._execute_agent_job(
                "createmetaiso",
                role="storagedriver",
                ovspath=volumeid,
                metadata=metadata,
                userdata=userdata,
                type=type,
            )
        except Exception as ex:
            raise StorageException(ex.message, ex, volumes=[isovolume])
        return isovolume

    def generate_password_hash(self, password):
        def generate_salt():
            salt_set = (
                "abcdefghijklmnopqrstuvwxyz" "ABCDEFGHIJKLMNOPQRSTUVWXYZ" "0123456789./"
            )
            salt = 16 * " "
            return "".join([random.choice(salt_set) for c in salt])

        salt = generate_salt()
        return crypt.crypt(password, "$6$" + salt)

    def get_host_memory(self):
        return self.node.memory - self.config.get("reserved_mem")

    def init_node(
        self,
        name,
        size,
        networkid=None,
        volumes=None,
        imagetype="",
        boottype="bios",
        machineId=None,
    ):
        volumes = volumes or []
        macaddress = self.backendconnection.getMacAddress(self.gid)

        networkname = "space_{:04x}".format(networkid)
        nodeid = str(uuid.uuid4())
        interfaces = [
            NetworkInterface(
                macaddress, "{}-{:04x}".format(name, networkid), "bridge", networkname
            )
        ]
        netinfo = [{"id": networkid, "type": "vxlan"}]
        extra = {
            "volumes": volumes,
            "ifaces": interfaces,
            "imagetype": imagetype,
            "size": size,
            "bootdev": "hd",
            "boottype": boottype,
            "machineId": machineId,
        }
        node = Node(
            id=nodeid,
            name=name,
            state=NodeState.PENDING,
            public_ips=[],
            private_ips=[],
            driver=self,
            extra=extra,
        )
        machinexml = self.get_xml(node)

        # 0 means default behaviour, e.g machine is auto started.
        result = self._execute_agent_job(
            "createmachine",
            queue="hypervisor",
            machinexml=machinexml,
            vmlog_dir=vmlog_dir,
            netinfo=netinfo,
        )
        if not result or result == -1:
            # Agent is not registered to agentcontroller or we can't provision the
            # machine(e.g not enough resources, delete machine)
            if result == -1:
                self._execute_agent_job(
                    "deletemachine",
                    queue="hypervisor",
                    machineid=None,
                    machinexml=machinexml,
                )
            raise NotEnoughResources("Failed to create machine", volumes)

        node = self._from_agent_to_node(result, volumes=volumes)
        return node

    def ex_create_template(self, node, name, new_vdiskguid):
        bootvolume = node.extra["volumes"][0]
        kwargs = {
            "ovs_connection": self.ovs_connection,
            "diskguid": bootvolume.vdiskguid,
            "new_vdiskguid": new_vdiskguid,
            "template_name": name,
        }
        image_path = self._execute_agent_job(
            "createtemplate", queue="io", role="storagedriver", **kwargs
        )
        return image_path

    def ex_delete_template(self, templateid):
        kwargs = {
            "ovs_connection": self.ovs_connection,
            "diskguid": str(uuid.UUID(templateid)),
        }
        self._execute_agent_job(
            "deletetemplate", queue="io", role="storagedriver", **kwargs
        )

    def ex_get_node_details(self, node_id):
        driver = DummyNodeDriver(0)
        node = Node(
            id=node_id,
            name="",
            state=NodeState.RUNNING,
            public_ips=[],
            private_ips=[],
            driver=driver,
        )
        agentnode = self._get_domain_for_node(node)
        if agentnode is None:
            return None
        node = self._from_agent_to_node(agentnode)
        return node

    def get_disk_guids(self, node, type=None):
        diskguids = []
        for volume in node.extra["volumes"]:
            if (
                type is not None
                and volume.type != type
                or isinstance(volume, PhysicalVolume)
            ):
                continue
            diskguids.append(volume.vdiskguid)
        return diskguids

    def ex_create_snapshot(self, node, name):
        diskguids = self.get_disk_guids(node, type="disk")
        kwargs = {
            "diskguids": diskguids,
            "ovs_connection": self.ovs_connection,
            "name": name,
        }
        return self._execute_agent_job(
            "createsnapshots", role="storagedriver", **kwargs
        )

    def ex_list_snapshots(self, node):
        diskguids = self.get_disk_guids(node, type="disk")
        kwargs = {"diskguids": diskguids, "ovs_connection": self.ovs_connection}
        return self._execute_agent_job("listsnapshots", role="storagedriver", **kwargs)

    def ex_delete_snapshot(self, node, timestamp=None, name=None):
        diskguids = self.get_disk_guids(node, type="disk")
        kwargs = {
            "diskguids": diskguids,
            "ovs_connection": self.ovs_connection,
            "timestamp": timestamp,
            "name": name,
        }
        return self._execute_agent_job(
            "deletesnapshot", wait=False, role="storagedriver", **kwargs
        )

    def ex_rollback_snapshot(self, node, timestamp, name):
        diskguids = self.get_disk_guids(node, type="disk")
        kwargs = {
            "diskguids": diskguids,
            "timestamp": timestamp,
            "name": name,
            "ovs_connection": self.ovs_connection,
        }
        return self._execute_agent_job(
            "rollbacksnapshot", role="storagedriver", **kwargs
        )

    def _get_domain_disk_file_names(self, dom, disktype="disk"):
        if isinstance(dom, ElementTree.Element):
            xml = dom
        elif isinstance(dom, basestring):
            xml = ElementTree.fromstring(dom)
        else:
            raise RuntimeError("Invalid type %s for parameter dom" % type(dom))
        disks = xml.findall("devices/disk")
        diskfiles = list()
        for disk in disks:
            if disktype is None or disk.attrib["device"] == disktype:
                source = disk.find("source")
                if source is not None:
                    if source.attrib.get("protocol") == "openvstorage":
                        ovsdisk = OpenvStorageVolumeFromXML(disk, self)
                        diskfiles.append(ovsdisk.vdiskguid)
                    elif "dev" in source.attrib:
                        diskfiles.append(source.attrib["dev"])
                    elif "file" in source.attrib:
                        diskfiles.append(source.attrib["file"])
        return diskfiles

    def _get_snapshot_disk_file_names(self, xml):
        xml = ElementTree.fromstring(xml)
        domain = xml.findall("domain")[0]
        return self._get_domain_disk_file_names(domain)

    def destroy_node(self, node):
        xml = self.get_xml(node)
        self._execute_agent_job(
            "deletemachine", queue="hypervisor", machineid=node.id, machinexml=xml
        )
        return True

    def ex_limitio(self, volume):
        node = volume.extra["node"]
        if node.state == LibvirtState.RUNNING:
            return self._execute_agent_job(
                "limitdiskio",
                queue="hypervisor",
                machineid=node.id,
                disks=[volume.id],
                iotune=volume.iotune,
            )

    def destroy_volumes_by_guid(self, diskguids):
        kwargs = {"diskguids": diskguids, "ovs_connection": self.ovs_connection}
        try:
            self._execute_agent_job("deletedisks", role="storagedriver", **kwargs)
        except exceptions.ServiceUnavailable as rError:
            j.errorconditionhandler.processPythonExceptionObject(
                rError,
                message="Failed to delete disks may be they are deleted from the storage node",
            )

    def ex_get_console_url(self, node):
        urls = self.backendconnection.listVNC(self.gid)
        id_ = self._rndrbn_vnc % len(urls)
        url = urls[id_]
        self._rndrbn_vnc += 1
        token = self.backendconnection.storeInfo(self.ex_get_console_output(node), 300)
        return url + "%s" % token

    def list_nodes(self):
        noderesult = []
        nodes = self.backendconnection.listNodes()
        result = self._execute_agent_job("listmachines", queue="default")
        for x in result:
            if x["id"] in nodes:
                ipaddress = nodes[x["id"]]["ipaddress"]
            else:
                ipaddress = ""
            noderesult.append(self._from_agent_to_node(x, ipaddress))
        return noderesult

    def ex_stop_node(self, node, force=False):
        machineid = node.id
        return self._execute_agent_job(
            "stopmachine", queue="hypervisor", machineid=machineid, force=force
        )

    def ex_suspend_node(self, node):
        machineid = node.id
        return self._execute_agent_job(
            "suspendmachine", queue="hypervisor", machineid=machineid
        )

    def ex_resume_node(self, node):
        machineid = node.id
        return self._execute_agent_job(
            "resumemachine", queue="hypervisor", machineid=machineid
        )

    def ex_pause_node(self, node):
        machineid = node.id
        return self._execute_agent_job(
            "pausemachine", queue="hypervisor", machineid=machineid
        )

    def ex_unpause_node(self, node):
        machineid = node.id
        return self._execute_agent_job(
            "unpausemachine", queue="hypervisor", machineid=machineid
        )

    def ex_soft_reboot_node(self, node):
        xml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job(
            "softrebootmachine",
            queue="hypervisor",
            machineid=node.id,
            xml=xml,
            netinfo=netinfo,
        )

    def ex_hard_reboot_node(self, node):
        xml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job(
            "hardrebootmachine",
            queue="hypervisor",
            machineid=node.id,
            xml=xml,
            netinfo=netinfo,
        )

    def get_net_info(self, node):
        netinfo = []
        for interface in node.extra["ifaces"]:
            if interface.type == "private":
                netinfo.append({"type": "vxlan", "id": interface.networkId})
            else:
                netinfo.append({"type": "vlan", "id": interface.networkId})
        return netinfo

    def get_xml(self, node):
        machinetemplate = self.env.get_template("machine.xml")
        hostmemory = self.get_host_memory()
        timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        logfile = "{logdir}/{id}.{timestamp}.log".format(
            logdir=vmlog_dir,
            id=node.extra.get("machineId", node.id),
            timestamp=timestamp,
        )
        machinexml = machinetemplate.render(
            {"node": node, "hostmemory": hostmemory, "logfile": logfile}
        )
        return machinexml

    def ex_start_node(self, node):
        machinexml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        self._execute_agent_job(
            "startmachine",
            queue="hypervisor",
            machineid=node.id,
            xml=machinexml,
            vmlog_dir=vmlog_dir,
            netinfo=netinfo,
        )
        return True

    def ex_get_console_output(self, node):
        domain = self._get_domain_for_node(node=node)
        xml = ElementTree.fromstring(domain["XMLDesc"])
        graphics = xml.find("devices/graphics")
        info = dict()
        info["port"] = int(graphics.attrib["port"])
        info["type"] = graphics.attrib["type"]
        info["ipaddress"] = self._get_connection_ip()
        return info

    def ex_import(self, size, vmid, networkid, disks):
        name = "vm-%s" % vmid
        volumes = []
        for i, disk in enumerate(disks):
            path = disk["path"]
            parsedurl = urlparse.urlparse(path)
            if parsedurl.netloc == "":
                path = path.replace(
                    "{}:".format(parsedurl.scheme), "{}://".format(parsedurl.scheme)
                )
            volume = OpenvStorageVolume(
                id="%s@%s" % (path, disk["guid"]),
                name="N/A",
                size=disk["size"],
                driver=self,
            )
            volume.dev = "vd%s" % convertnumber(i + 1)
            volumes.append(volume)
        return self.init_node(
            name, size, networkid=networkid, volumes=volumes, machineId=vmid
        )

    def ex_clone_disks(self, diskmapping, disks_snapshots=None):
        disks_snapshots = disks_snapshots or {}
        disks = []
        diskvpool = {}
        for volume, diskname in diskmapping:
            source_edgeclient, edgeclients = self.getEdgeClientFromVolume(volume)
            edgeclient = self.getNextEdgeClient(source_edgeclient["vpool"], edgeclients)
            diskinfo = {
                "clone_name": diskname,
                "diskguid": volume.vdiskguid,
                "storagerouterguid": edgeclient["storagerouterguid"],
            }
            if disks_snapshots.get(volume.vdiskguid, None):
                diskinfo["snapshotguid"] = disks_snapshots[volume.vdiskguid]
            diskvpool[volume.vdiskguid] = edgeclient
            disks.append(diskinfo)

        kwargs = {"ovs_connection": self.ovs_connection, "disks": disks}
        newdisks = self._execute_agent_job("clonedisks", role="storagedriver", **kwargs)
        volumes = []
        for idx, diskinfo in enumerate(disks):
            newdiskguid, vpoolguid = newdisks[idx]
            edgeclient = diskvpool[diskinfo["diskguid"]]
            volumeid = self.getVolumeId(newdiskguid, edgeclient, diskinfo["clone_name"])
            volume = OpenvStorageVolume(id=volumeid, name="N/A", size=-1, driver=self)
            volume.dev = "vd%s" % convertnumber(idx)
            volume.edgeclient = edgeclient
            volumes.append(volume)
        return volumes

    def ex_clone(
        self,
        node,
        password,
        imagetype,
        size,
        vmid,
        networkid,
        diskmapping,
        disks_snapshots=None,
    ):
        disks_snapshots = disks_snapshots or {}
        name = "vm-%s" % vmid
        volumes = self.ex_clone_disks(diskmapping, disks_snapshots)
        volumes.append(
            self._create_metadata_iso(volumes[0].edgeclient, name, password, imagetype)
        )
        return self.init_node(
            name,
            size,
            networkid=networkid,
            volumes=volumes,
            imagetype=imagetype,
            machineId=vmid,
        )

    def ex_extend_disk(self, diskguid, newsize, disk_info=None):
        if disk_info is None:
            disk_info = {"machineRefId": None}
        res = self._execute_agent_job(
            "extend_disk",
            ovs_connection=self.ovs_connection,
            size=newsize,
            diskguid=diskguid,
            disk_info=disk_info,
        )
        return res

    def ex_export(self, node, exportname, uncpath, emailaddress):
        machineid = node.id
        return self._execute_agent_job(
            "backupmachine",
            wait=False,
            machineid=machineid,
            backupname=exportname,
            location=uncpath,
            emailaddress=emailaddress,
        )

    def ex_is_storage_action_running(self, node):
        """
        Check if an action is being running that is doing some interactions
        with the disk
        """
        machineid = node.id
        return self._execute_agent_job(
            "checkstorageaction", wait=True, machineid=machineid
        )

    def _get_connection_ip(self):
        uri = urlparse.urlparse(self.uri)
        return uri.netloc

    def _get_domain_for_node(self, node):
        return self._execute_agent_job(
            "getmachine", queue="hypervisor", machineid=node.id
        )

    def _from_agent_to_node(self, domain, publicipaddress="", volumes=None):
        xml = domain.get("XMLDesc")
        node = Node(
            id=domain["id"],
            public_ips=[],
            name=domain["name"],
            private_ips=[],
            state=domain["state"],
            driver=self,
        )
        if xml:
            node = self._from_xml_to_node(xml, node)
        node.state = domain["state"]
        extra = domain["extra"]
        node.extra.update(extra)
        if volumes:
            node.extra["volumes"] = volumes
        if publicipaddress:
            node.public_ips.append(publicipaddress)
        return node

    def _from_xml_to_node(self, xml, node=None):
        dom = ElementTree.fromstring(xml)
        state = NodeState.UNKNOWN
        volumes = list()
        ifaces = list()
        for disk in dom.findall("devices/disk"):
            source = disk.find("source")
            if disk.attrib["device"] != "disk" or source.attrib.get("dev"):
                continue
            volume = OpenvStorageVolumeFromXML(disk, self)
            volumes.append(volume)
        for nic in dom.findall("devices/interface"):
            mac = None
            macelement = nic.find("mac")
            source = nic.find("source")
            if macelement is not None:
                mac = macelement.attrib["address"]
            target = nic.find("target").attrib["dev"]
            bridgename = (
                source.attrib["bridge"]
                if source.attrib.get("bridge")
                else source.attrib["network"]
            )
            if bridgename.startswith(("ext-", "public")):
                bridgetype = "PUBLIC"
            else:
                bridgetype = "bridge"
            ifaces.append(
                NetworkInterface(
                    mac=mac, target=target, type=bridgetype, bridgename=bridgename
                )
            )
        name = dom.find("name").text
        bootdev = dom.find("os/boot").attrib["dev"]
        extra = {"volumes": volumes, "ifaces": ifaces, "bootdev": bootdev}
        if node is None:
            id = dom.find("uuid").text
            node = Node(
                id=id,
                name=name,
                state=state,
                public_ips=[],
                private_ips=[],
                driver=self,
                extra=extra,
            )
        else:
            node.extra.update(extra)
        return node

    def ex_snapshots_can_be_deleted_while_running(self):
        """
        FOR LIBVIRT A SNAPSHOT CAN'T BE DELETED WHILE MACHINE RUNNGIN
        """
        return False

    def attach_public_network(self, node, vlan, ipcidr):
        """
        Attach Virtual machine to the cpu node public network
        """
        macaddress = self.backendconnection.getMacAddress(self.gid)
        target = "%s-ext" % (node.name)
        bridgename = j.system.ovsnetconfig.getVlanBridge(vlan)
        interface = NetworkInterface(
            mac=macaddress,
            target=target,
            type="PUBLIC",
            bridgename=bridgename,
            networkId=vlan,
        )
        self._execute_agent_job(
            "attach_device",
            queue="hypervisor",
            xml=str(interface),
            machineid=node.id,
            ipcidr=ipcidr,
        )
        return interface

    def detach_public_network(self, node):
        for iface in node.extra["ifaces"]:
            if iface.type == "PUBLIC":
                self._execute_agent_job(
                    "detach_device",
                    queue="hypervisor",
                    xml=str(iface),
                    machineid=node.id,
                )

    def ex_resize(self, node, extramem, vcpus):
        machinetemplate = self.env.get_template("memory.xml")
        result = True
        if extramem > 0:
            memory = machinetemplate.render({"memory": extramem})
            result = (
                self._execute_agent_job(
                    "attach_device", queue="hypervisor", xml=memory, machineid=node.id
                )
                is not False
            )
        if vcpus is not None:
            result &= self._execute_agent_job(
                "change_vcpus", queue="hypervisor", vcpus=vcpus, machineid=node.id
            )

        if result is False:
            return False
        return True

    def ex_migrate(self, node, sourceprovider, force=False):
        domainxml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job(
            "vm_livemigrate",
            vm_id=node.id,
            sourceurl=sourceprovider.uri,
            force=force,
            domainxml=domainxml,
            vmlog_dir=vmlog_dir,
            netinfo=netinfo,
        )
Exemplo n.º 14
0
def action(ovs_connection, diskguid, new_vdiskguid, template_name):
    # Creates sets vdisk as a template
    #
    # ovs_connection: dict holding connection info for ovs restapi
    #   eg: { ips: ['ip1', 'ip2', 'ip3'], client_id: 'dsfgfs', client_secret: 'sadfafsdf'}
    # diskguid: disk of which we are creating a template
    #
    # returns diskguid of the created template
    from CloudscalerLibcloud import openvstorage
    from CloudscalerLibcloud.utils.gridconfig import GridConfig
    config = GridConfig()
    username = config.settings['ovs_credentials'].get('edgeuser', '')
    password = config.settings['ovs_credentials'].get('edgepassword', '')

    ovs = j.clients.openvstorage.get(
        ips=ovs_connection['ips'],
        credentials=(ovs_connection['client_id'],
                     ovs_connection['client_secret']))
    disk = ovs.get('/vdisks/{}'.format(diskguid))

    storage_drivers = ovs.get('/storagedrivers',
                              params={'contents': 'storagedriver_id'})['data']

    def getDiskPath(disk):
        storagedriver_id = disk['storagedriver_id']
        for sd in storage_drivers:
            if sd['storagedriver_id'] == storagedriver_id:
                break
        storage_ip = sd['storage_ip']
        edge_port = sd['ports']['edge']
        path = 'openvstorage+tcp:{}:{}{}'.format(
            storage_ip, edge_port, disk['devicename'].split('.')[0])
        if username and password:
            path = '{}:username={}:password={}'.format(path, username,
                                                       password)
        return path

    def cleanup(snapshot_guid=None, cloned_diskguid=None):
        try:
            if cloned_diskguid:
                path = '/vdisks/{}/delete'
                taskguid = ovs.post(path.format(cloned_diskguid))
                success, result = ovs.wait_for_task(taskguid)
                if not success:
                    raise Exception(
                        "Could not delete disk:\n{}".format(result))

            if snapshot_guid:
                path_delete_snapshot = '/vdisks/{}/remove_snapshot'
                taskguid = ovs.post(path_delete_snapshot.format(diskguid),
                                    params=dict(snapshot_id=snapshot_guid))
                success, result = ovs.wait_for_task(taskguid)
                if not success:
                    raise Exception(
                        "Could not delete snapshot:\n{}".format(result))
        except:
            pass

    # create snapshot
    path = '/vdisks/{}/create_snapshot'
    params = dict(name=template_name, sticky=True)
    taskguid = ovs.post(path.format(diskguid), params=params)
    success, snapshot_guid = ovs.wait_for_task(taskguid)
    if not success:
        raise Exception(
            "Could not create snapshots:\n{}".format(snapshot_guid))

    # clone the snapshot
    clone_path = '/vdisks/{}/clone'
    # Create clone
    taskguid = ovs.post(clone_path.format(diskguid),
                        params=dict(
                            name=template_name,
                            storagerouter_guid=disk['storagerouter_guid'],
                            snapshot_id=snapshot_guid))
    success, result = ovs.wait_for_task(taskguid)
    if not success:
        cleanup(snapshot_guid=snapshot_guid)
        raise Exception("Could not create clone:\n{}".format(result))
    cloned_diskguid = result['vdisk_guid']
    cloned_disk = ovs.get('/vdisks/{}'.format(cloned_diskguid))
    new_disk = ovs.get('/vdisks/{}'.format(new_vdiskguid))
    src = getDiskPath(cloned_disk)
    dest = getDiskPath(new_disk)
    try:
        j.system.platform.qemu_img.convert(src,
                                           None,
                                           dest,
                                           'raw',
                                           createTarget=False)
    except:
        cleanup(snapshot_guid=snapshot_guid, cloned_diskguid=cloned_diskguid)
        raise

    # Set the new disk as template
    path = '/vdisks/{}/set_as_template'.format(new_vdiskguid)
    taskguid = ovs.post(path)
    success, result = ovs.wait_for_task(taskguid)
    if not success:
        cleanup(snapshot_guid=snapshot_guid, cloned_diskguid=cloned_diskguid)
        raise Exception("Could not create a template:\n{}".format(result))
    # delete the snapshot and cloned_disk
    cleanup(snapshot_guid=snapshot_guid, cloned_diskguid=cloned_diskguid)

    return dest
Exemplo n.º 15
0
class CSLibvirtNodeDriver(object):

    _ovsdata = {}
    type = 'CSLibvirt'

    NODE_STATE_MAP = {
        0: NodeState.TERMINATED,
        1: NodeState.RUNNING,
        2: NodeState.PENDING,
        3: NodeState.TERMINATED,  # paused
        4: NodeState.TERMINATED,  # shutting down
        5: NodeState.TERMINATED,
        6: NodeState.UNKNOWN,  # crashed
        7: NodeState.UNKNOWN,  # last
    }

    def __init__(self, stack):
        self._rndrbn_vnc = 0
        self.id = int(stack.referenceId)
        self.gid = stack.gid
        self.name = 'libvirt'
        self.uri = stack.apiUrl
        self.stack = stack
        self.env = env
        self.scl = j.clients.osis.getNamespace('system')
        grid = self.scl.grid.get(self.gid)
        self.node = self.scl.node.get(self.id)
        self.config = GridConfig(grid, self.node.memory / 1024.)
        # preload ovs_credentials and ovs_connection
        # this is to detect erors earlier if there is
        # some misconfiguration
        self.ovs_connection

    backendconnection = connection.DummyConnection()

    @property
    def ovs_credentials(self):
        cachekey = 'credentials_{}'.format(self.gid)
        if cachekey not in self._ovsdata:
            credentials = self.config.get('ovs_credentials')
            self._ovsdata[cachekey] = credentials
        return self._ovsdata[cachekey]

    @property
    def ovs_connection(self):
        cachekey = 'ovs_connection_{}'.format(self.gid)
        if cachekey not in self._ovsdata:
            connection = {
                'ips': self.ovs_credentials['ips'],
                'client_id': self.ovs_credentials['client_id'],
                'client_secret': self.ovs_credentials['client_secret']
            }
            self._ovsdata[cachekey] = connection
        return self._ovsdata[cachekey]

    @property
    def ovs_settings(self):
        cachekey = 'ovs_settings_{}'.format(self.gid)
        if cachekey not in self._ovsdata:
            grid_settings = self.config.get('ovs_settings', dict())
            settings = dict(vpool_vmstor_metadatacache=grid_settings.get(
                'vpool_vmstor_metadatacache', 20),
                            vpool_data_metadatacache=grid_settings.get(
                                'vpool_data_metadatacache', 20))
            self._ovsdata[cachekey] = settings
        return self._ovsdata[cachekey]

    def getVolumeId(self, vdiskguid, edgeclient, name):
        username = self.ovs_credentials.get('edgeuser')
        password = self.ovs_credentials.get('edgepassword')
        return getOpenvStroageVolumeId(edgeclient['storageip'],
                                       edgeclient['edgeport'], name, vdiskguid,
                                       edgeclient.get('protocol', 'tcp'),
                                       username, password)

    @property
    def all_edgeclients(self):
        return self._execute_agent_job('listedgeclients',
                                       role='storagemaster',
                                       ovs_connection=self.ovs_connection)

    def list_vdisks(self, storagerouterguid):
        return self._execute_agent_job('listvdisks',
                                       role='storagemaster',
                                       ovs_connection=self.ovs_connection,
                                       storagerouterguid=storagerouterguid)

    @property
    def edgeclients(self):
        edgeclients = filter(lambda client: client['status'] == 'OK',
                             self.all_edgeclients)

        activesessions = self.backendconnection.agentcontroller_client.listActiveSessions(
        )
        activenodes = self.scl.node.search({
            'status': 'ENABLED',
            'gid': self.gid,
            'roles': 'storagedriver'
        })[1:]

        def get_active_node(storageip):
            for activenode in activenodes:
                if storageip in activenode['ipaddr']:
                    return activenode
            return None

        def filter_clients(client):
            node = get_active_node(client['storageip'])
            if node is None:
                return False
            client['nid'] = node['id']
            return (node['gid'], node['id']) in activesessions

        return filter(filter_clients, edgeclients)

    def getNextEdgeClient(self, vpool, edgeclients=None):
        clients = edgeclients or self.edgeclients[:]
        clients = filter(lambda x: x['vpool'] == vpool, clients)
        if not clients:
            raise exceptions.ServiceUnavailable(
                "No storagerouter available for vpool {}".format(vpool))
        return sorted(clients, key=lambda client: client['vdiskcount'])[0]

    def getEdgeClientFromVolume(self, volume):
        edgeclients = self.edgeclients[:]
        for edgeclient in edgeclients:
            if volume.edgehost == edgeclient[
                    'storageip'] and volume.edgeport == edgeclient['edgeport']:
                return edgeclient, edgeclients

    def getBestDataVpool(self):
        edgeclients = self.edgeclients[:]
        diskspervpool = {}
        for edgeclient in edgeclients:
            diskspervpool[edgeclient['vpool']] = diskspervpool.setdefault(
                edgeclient['vpool'], 0) + edgeclient['vdiskcount']
        if len(diskspervpool) > 1:
            for vpool in list(diskspervpool.keys()):
                if not vpool.startswith('data'):
                    diskspervpool.pop(vpool)
        # get vpool with least vdiskcount
        return sorted(diskspervpool.items(),
                      key=lambda vpool: vpool[1])[0][0], edgeclients

    def set_backend(self, connection):
        """
        Set a connection to the cloudbroker backend, this is used
        to get all the supported images and sizes
        """
        self.backendconnection = connection

    def _execute_agent_job(self,
                           name_,
                           id=None,
                           wait=True,
                           queue=None,
                           role=None,
                           timeout=600,
                           **kwargs):
        if not id and not role:
            id = int(self.id)

        elif id is None:
            id = 0
        else:
            id = id and int(id)

        tags = getJobTags()
        job = self.backendconnection.agentcontroller_client.executeJumpscript(
            'greenitglobe',
            name_,
            nid=id,
            role=role,
            gid=self.gid,
            wait=wait,
            queue=queue,
            args=kwargs,
            tags=tags)
        if wait and job['state'] != 'OK':
            if job['state'] == 'NOWORK':
                j.errorconditionhandler.raiseOperationalWarning(
                    'Could not find agent with nid:%s' % id)
            elif job['state'] == 'TIMEOUT':
                j.errorconditionhandler.raiseOperationalWarning(
                    'Job failed to execute on time')
            else:
                j.errorconditionhandler.raiseOperationalWarning(
                    "Could not execute %s for nid:%s, error was:%s" %
                    (name_, id, job['result']))

            raise exceptions.ServiceUnavailable(
                'Could not perform action: {name} at this time'.format(
                    name=name_))
        if wait:
            return job['result']
        else:
            return job

    def _create_disk(self, vm_id, disksize, image, disk_role='base'):
        edgeclient = self.getNextEdgeClient('vmstor')

        diskname = '{0}/bootdisk-{0}'.format(vm_id)
        kwargs = {
            'ovs_connection': self.ovs_connection,
            'storagerouterguid': edgeclient['storagerouterguid'],
            'size': disksize,
            'templateguid': image.referenceId,
            'diskname': diskname,
            'pagecache_ratio': self.ovs_settings['vpool_vmstor_metadatacache']
        }

        try:
            vdiskguid = self._execute_agent_job('creatediskfromtemplate',
                                                role='storagedriver',
                                                **kwargs)
        except (Exception, exceptions.ServiceUnavailable) as ex:
            raise StorageException(ex.message, ex)

        volumeid = self.getVolumeId(vdiskguid=vdiskguid,
                                    edgeclient=edgeclient,
                                    name=diskname)
        return OpenvStorageVolume(id=volumeid,
                                  name=diskname,
                                  size=disksize,
                                  driver=self), edgeclient

    def create_volume(self, size, name, data=True, dev=''):
        if data:
            vpoolname, edgeclients = self.getBestDataVpool()
            edgeclient = self.getNextEdgeClient(vpoolname, edgeclients)
            diskname = 'volumes/volume_{}'.format(name)
        else:
            edgeclient = self.getNextEdgeClient('vmstor')
            diskname = name
        kwargs = {
            'ovs_connection': self.ovs_connection,
            'vpoolguid': edgeclient['vpoolguid'],
            'storagerouterguid': edgeclient['storagerouterguid'],
            'diskname': diskname,
            'size': size,
            'pagecache_ratio': self.ovs_settings['vpool_data_metadatacache']
        }
        try:
            vdiskguid = self._execute_agent_job('createdisk',
                                                role='storagedriver',
                                                **kwargs)
        except (Exception, exceptions.ServiceUnavailable) as ex:
            raise StorageException(ex.message, ex)
        volumeid = self.getVolumeId(vdiskguid=vdiskguid,
                                    edgeclient=edgeclient,
                                    name=diskname)
        stvol = OpenvStorageVolume(id=volumeid,
                                   size=size,
                                   name=diskname,
                                   driver=self)
        stvol.dev = dev
        return stvol

    def create_volumes(self, volumes):
        stvolumes = []
        for volume in volumes:
            stvol = self.create_volume(volume['size'], volume['name'],
                                       volume.get('data', True),
                                       volume.get('dev', ''))
            stvolumes.append(stvol)
        return stvolumes

    def attach_volume(self, node, volume):
        self._execute_agent_job('attach_device',
                                queue='hypervisor',
                                xml=str(volume),
                                machineid=node.id)
        return True

    def destroy_volume(self, volume):
        return self.destroy_volumes_by_guid([volume.vdiskguid])

    def get_volume_from_xml(self, xmldom, volume):
        devices = xmldom.find('devices')
        for disk in devices.iterfind('disk'):
            if disk.attrib['device'] != 'disk':
                continue
            source = disk.find('source')
            if source.attrib.get('dev',
                                 source.attrib.get('name')) == volume.name:
                return devices, disk
        return None, None

    def detach_volume(self, volume):
        node = volume.extra['node']
        self._execute_agent_job('detach_device',
                                queue='hypervisor',
                                xml=str(volume),
                                machineid=node.id)
        return node

    def _create_metadata_iso(self,
                             edgeclient,
                             name,
                             password,
                             type,
                             userdata=None):
        customuserdata = userdata or {}
        if isinstance(customuserdata, basestring):
            customuserdata = yaml.load(customuserdata)
        if type not in ['WINDOWS', 'Windows']:
            memrule = 'SUBSYSTEM=="memory", ACTION=="add", TEST=="state", ATTR{state}=="offline", ATTR{state}="online"'
            cpurule = 'SUBSYSTEM=="cpu", ACTION=="add", TEST=="online", ATTR{online}=="0", ATTR{online}="1"'
            runcmds = []
            runcmds.append(
                "echo '{}' > /etc/udev/rules.d/66-hotplug.rules".format(
                    memrule))
            runcmds.append(
                "echo '{}' >> /etc/udev/rules.d/66-hotplug.rules".format(
                    cpurule))
            runcmds.append(['udevadm', 'control', '-R'])

            userdata = {
                'password':
                password,
                'users': [{
                    'name': 'cloudscalers',
                    'plain_text_passwd': password,
                    'lock-passwd': False,
                    'shell': '/bin/bash',
                    'sudo': 'ALL=(ALL) ALL'
                }],
                'ssh_pwauth':
                True,
                'runcmd':
                runcmds,
                'manage_etc_hosts':
                True,
                'chpasswd': {
                    'expire': False
                }
            }
            metadata = {'local-hostname': name}
            if 'users' in customuserdata:
                users = customuserdata.pop('users', [])
                userdata['users'].extend(users)
            userdata.update(customuserdata)
        else:
            userdata = {}
            metadata = {'admin_pass': password, 'hostname': name}

        diskpath = "{0}/cloud-init-{0}".format(name)
        kwargs = {
            'ovs_connection': self.ovs_connection,
            'vpoolguid': edgeclient['vpoolguid'],
            'storagerouterguid': edgeclient['storagerouterguid'],
            'diskname': diskpath,
            'size': 0.1,
            'pagecache_ratio': self.ovs_settings['vpool_data_metadatacache']
        }
        try:
            vdiskguid = self._execute_agent_job('createdisk',
                                                role='storagedriver',
                                                **kwargs)
        except Exception as ex:
            raise StorageException(ex.message, ex)

        volumeid = self.getVolumeId(vdiskguid=vdiskguid,
                                    edgeclient=edgeclient,
                                    name=diskpath)
        isovolume = OpenvStorageISO(id=volumeid,
                                    name=diskpath,
                                    size=0,
                                    driver=self)
        try:
            volumeid = self._execute_agent_job('createmetaiso',
                                               role='storagedriver',
                                               ovspath=volumeid,
                                               metadata=metadata,
                                               userdata=userdata,
                                               type=type)
        except Exception as ex:
            raise StorageException(ex.message, ex, volumes=[isovolume])
        return isovolume

    def generate_password_hash(self, password):
        def generate_salt():
            salt_set = ('abcdefghijklmnopqrstuvwxyz'
                        'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
                        '0123456789./')
            salt = 16 * ' '
            return ''.join([random.choice(salt_set) for c in salt])

        salt = generate_salt()
        return crypt.crypt(password, '$6$' + salt)

    def get_host_memory(self):
        return self.node.memory - self.config.get('reserved_mem')

    def init_node(self,
                  name,
                  size,
                  networkid=None,
                  volumes=None,
                  imagetype='',
                  boottype='bios',
                  machineId=None):
        volumes = volumes or []
        macaddress = self.backendconnection.getMacAddress(self.gid)

        networkname = 'space_{:04x}'.format(networkid)
        nodeid = str(uuid.uuid4())
        interfaces = [
            NetworkInterface(macaddress, '{}-{:04x}'.format(name, networkid),
                             'bridge', networkname)
        ]
        netinfo = [{'id': networkid, 'type': 'vxlan'}]
        extra = {
            'volumes': volumes,
            'ifaces': interfaces,
            'imagetype': imagetype,
            'size': size,
            'bootdev': 'hd',
            'boottype': boottype,
            'machineId': machineId
        }
        node = Node(id=nodeid,
                    name=name,
                    state=NodeState.PENDING,
                    public_ips=[],
                    private_ips=[],
                    driver=self,
                    extra=extra)
        machinexml = self.get_xml(node)

        # 0 means default behaviour, e.g machine is auto started.
        result = self._execute_agent_job('createmachine',
                                         queue='hypervisor',
                                         machinexml=machinexml,
                                         vmlog_dir=vmlog_dir,
                                         netinfo=netinfo)
        if not result or result == -1:
            # Agent is not registered to agentcontroller or we can't provision the
            # machine(e.g not enough resources, delete machine)
            if result == -1:
                self._execute_agent_job('deletemachine',
                                        queue='hypervisor',
                                        machineid=None,
                                        machinexml=machinexml)
            raise NotEnoughResources("Failed to create machine", volumes)

        node = self._from_agent_to_node(result, volumes=volumes)
        return node

    def ex_create_template(self, node, name, new_vdiskguid):
        bootvolume = node.extra['volumes'][0]
        kwargs = {
            'ovs_connection': self.ovs_connection,
            'diskguid': bootvolume.vdiskguid,
            'new_vdiskguid': new_vdiskguid,
            'template_name': name
        }
        image_path = self._execute_agent_job('createtemplate',
                                             queue='io',
                                             role='storagedriver',
                                             **kwargs)
        return image_path

    def ex_delete_template(self, templateid):
        kwargs = {
            'ovs_connection': self.ovs_connection,
            'diskguid': str(uuid.UUID(templateid))
        }
        self._execute_agent_job('deletetemplate',
                                queue='io',
                                role='storagedriver',
                                **kwargs)

    def ex_get_node_details(self, node_id):
        driver = DummyNodeDriver(0)
        node = Node(id=node_id,
                    name='',
                    state=NodeState.RUNNING,
                    public_ips=[],
                    private_ips=[],
                    driver=driver)
        agentnode = self._get_domain_for_node(node)
        if agentnode is None:
            return None
        node = self._from_agent_to_node(agentnode)
        return node

    def get_disk_guids(self, node, type=None):
        diskguids = []
        for volume in node.extra['volumes']:
            if type is not None and volume.type != type or isinstance(
                    volume, PhysicalVolume):
                continue
            diskguids.append(volume.vdiskguid)
        return diskguids

    def ex_create_snapshot(self, node, name):
        diskguids = self.get_disk_guids(node, type='disk')
        kwargs = {
            'diskguids': diskguids,
            'ovs_connection': self.ovs_connection,
            'name': name
        }
        return self._execute_agent_job('createsnapshots',
                                       role='storagedriver',
                                       **kwargs)

    def ex_list_snapshots(self, node):
        diskguids = self.get_disk_guids(node, type='disk')
        kwargs = {
            'diskguids': diskguids,
            'ovs_connection': self.ovs_connection
        }
        return self._execute_agent_job('listsnapshots',
                                       role='storagedriver',
                                       **kwargs)

    def ex_delete_snapshot(self, node, timestamp=None, name=None):
        diskguids = self.get_disk_guids(node, type='disk')
        kwargs = {
            'diskguids': diskguids,
            'ovs_connection': self.ovs_connection,
            'timestamp': timestamp,
            'name': name
        }
        return self._execute_agent_job('deletesnapshot',
                                       wait=False,
                                       role='storagedriver',
                                       **kwargs)

    def ex_rollback_snapshot(self, node, timestamp, name):
        diskguids = self.get_disk_guids(node, type='disk')
        kwargs = {
            'diskguids': diskguids,
            'timestamp': timestamp,
            'name': name,
            'ovs_connection': self.ovs_connection
        }
        return self._execute_agent_job('rollbacksnapshot',
                                       role='storagedriver',
                                       **kwargs)

    def _get_domain_disk_file_names(self, dom, disktype='disk'):
        if isinstance(dom, ElementTree.Element):
            xml = dom
        elif isinstance(dom, basestring):
            xml = ElementTree.fromstring(dom)
        else:
            raise RuntimeError('Invalid type %s for parameter dom' % type(dom))
        disks = xml.findall('devices/disk')
        diskfiles = list()
        for disk in disks:
            if disktype is None or disk.attrib['device'] == disktype:
                source = disk.find('source')
                if source is not None:
                    if source.attrib.get('protocol') == 'openvstorage':
                        ovsdisk = OpenvStorageVolumeFromXML(disk, self)
                        diskfiles.append(ovsdisk.vdiskguid)
                    elif 'dev' in source.attrib:
                        diskfiles.append(source.attrib['dev'])
                    elif 'file' in source.attrib:
                        diskfiles.append(source.attrib['file'])
        return diskfiles

    def _get_snapshot_disk_file_names(self, xml):
        xml = ElementTree.fromstring(xml)
        domain = xml.findall('domain')[0]
        return self._get_domain_disk_file_names(domain)

    def destroy_node(self, node):
        xml = self.get_xml(node)
        self._execute_agent_job('deletemachine',
                                queue='hypervisor',
                                machineid=node.id,
                                machinexml=xml)
        return True

    def ex_limitio(self, volume):
        node = volume.extra['node']
        if node.state == LibvirtState.RUNNING:
            return self._execute_agent_job('limitdiskio',
                                           queue='hypervisor',
                                           machineid=node.id,
                                           disks=[volume.id],
                                           iotune=volume.iotune)

    def destroy_volumes_by_guid(self, diskguids):
        kwargs = {
            'diskguids': diskguids,
            'ovs_connection': self.ovs_connection
        }
        try:
            self._execute_agent_job('deletedisks',
                                    role='storagedriver',
                                    **kwargs)
        except exceptions.ServiceUnavailable as rError:
            j.errorconditionhandler.processPythonExceptionObject(
                rError,
                message=
                "Failed to delete disks may be they are deleted from the storage node"
            )

    def ex_get_console_url(self, node):
        urls = self.backendconnection.listVNC(self.gid)
        id_ = self._rndrbn_vnc % len(urls)
        url = urls[id_]
        self._rndrbn_vnc += 1
        token = self.backendconnection.storeInfo(
            self.ex_get_console_output(node), 300)
        return url + "%s" % token

    def list_nodes(self):
        noderesult = []
        nodes = self.backendconnection.listNodes()
        result = self._execute_agent_job('listmachines', queue='default')
        for x in result:
            if x['id'] in nodes:
                ipaddress = nodes[x['id']]['ipaddress']
            else:
                ipaddress = ''
            noderesult.append(self._from_agent_to_node(x, ipaddress))
        return noderesult

    def ex_stop_node(self, node, force=False):
        machineid = node.id
        return self._execute_agent_job('stopmachine',
                                       queue='hypervisor',
                                       machineid=machineid,
                                       force=force)

    def ex_suspend_node(self, node):
        machineid = node.id
        return self._execute_agent_job('suspendmachine',
                                       queue='hypervisor',
                                       machineid=machineid)

    def ex_resume_node(self, node):
        machineid = node.id
        return self._execute_agent_job('resumemachine',
                                       queue='hypervisor',
                                       machineid=machineid)

    def ex_pause_node(self, node):
        machineid = node.id
        return self._execute_agent_job('pausemachine',
                                       queue='hypervisor',
                                       machineid=machineid)

    def ex_unpause_node(self, node):
        machineid = node.id
        return self._execute_agent_job('unpausemachine',
                                       queue='hypervisor',
                                       machineid=machineid)

    def ex_soft_reboot_node(self, node):
        xml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job('softrebootmachine',
                                       queue='hypervisor',
                                       machineid=node.id,
                                       xml=xml,
                                       netinfo=netinfo)

    def ex_hard_reboot_node(self, node):
        xml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job('hardrebootmachine',
                                       queue='hypervisor',
                                       machineid=node.id,
                                       xml=xml,
                                       netinfo=netinfo)

    def get_net_info(self, node):
        netinfo = []
        for interface in node.extra['ifaces']:
            if interface.type == 'private':
                netinfo.append({'type': 'vxlan', 'id': interface.networkId})
            else:
                netinfo.append({'type': 'vlan', 'id': interface.networkId})
        return netinfo

    def get_xml(self, node):
        machinetemplate = self.env.get_template("machine.xml")
        hostmemory = self.get_host_memory()
        timestamp = datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        logfile = '{logdir}/{id}.{timestamp}.log'.format(logdir=vmlog_dir,
                                                         id=node.extra.get(
                                                             'machineId',
                                                             node.id),
                                                         timestamp=timestamp)
        machinexml = machinetemplate.render({
            'node': node,
            'hostmemory': hostmemory,
            'logfile': logfile,
        })
        return machinexml

    def ex_start_node(self, node):
        machinexml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        self._execute_agent_job('startmachine',
                                queue='hypervisor',
                                machineid=node.id,
                                xml=machinexml,
                                vmlog_dir=vmlog_dir,
                                netinfo=netinfo)
        return True

    def ex_get_console_output(self, node):
        domain = self._get_domain_for_node(node=node)
        xml = ElementTree.fromstring(domain['XMLDesc'])
        graphics = xml.find('devices/graphics')
        info = dict()
        info['port'] = int(graphics.attrib['port'])
        info['type'] = graphics.attrib['type']
        info['ipaddress'] = self._get_connection_ip()
        return info

    def ex_import(self, size, vmid, networkid, disks):
        name = 'vm-%s' % vmid
        volumes = []
        for i, disk in enumerate(disks):
            path = disk['path']
            parsedurl = urlparse.urlparse(path)
            if parsedurl.netloc == '':
                path = path.replace('{}:'.format(parsedurl.scheme),
                                    '{}://'.format(parsedurl.scheme))
            volume = OpenvStorageVolume(id='%s@%s' % (path, disk['guid']),
                                        name='N/A',
                                        size=disk['size'],
                                        driver=self)
            volume.dev = 'vd%s' % convertnumber(i + 1)
            volumes.append(volume)
        return self.init_node(name,
                              size,
                              networkid=networkid,
                              volumes=volumes,
                              machineId=vmid)

    def ex_clone_disks(self, diskmapping, disks_snapshots=None):
        disks_snapshots = disks_snapshots or {}
        disks = []
        diskvpool = {}
        for volume, diskname in diskmapping:
            source_edgeclient, edgeclients = self.getEdgeClientFromVolume(
                volume)
            edgeclient = self.getNextEdgeClient(source_edgeclient['vpool'],
                                                edgeclients)
            diskinfo = {
                'clone_name': diskname,
                'diskguid': volume.vdiskguid,
                'storagerouterguid': edgeclient['storagerouterguid']
            }
            if disks_snapshots.get(volume.vdiskguid, None):
                diskinfo['snapshotguid'] = disks_snapshots[volume.vdiskguid]
            diskvpool[volume.vdiskguid] = edgeclient
            disks.append(diskinfo)

        kwargs = {'ovs_connection': self.ovs_connection, 'disks': disks}
        newdisks = self._execute_agent_job('clonedisks',
                                           role='storagedriver',
                                           **kwargs)
        volumes = []
        for idx, diskinfo in enumerate(disks):
            newdiskguid, vpoolguid = newdisks[idx]
            edgeclient = diskvpool[diskinfo['diskguid']]
            volumeid = self.getVolumeId(newdiskguid, edgeclient,
                                        diskinfo['clone_name'])
            volume = OpenvStorageVolume(id=volumeid,
                                        name='N/A',
                                        size=-1,
                                        driver=self)
            volume.dev = 'vd%s' % convertnumber(idx)
            volume.edgeclient = edgeclient
            volumes.append(volume)
        return volumes

    def ex_clone(self,
                 node,
                 password,
                 imagetype,
                 size,
                 vmid,
                 networkid,
                 diskmapping,
                 disks_snapshots=None):
        disks_snapshots = disks_snapshots or {}
        name = 'vm-%s' % vmid
        volumes = self.ex_clone_disks(diskmapping, disks_snapshots)
        volumes.append(
            self._create_metadata_iso(volumes[0].edgeclient, name, password,
                                      imagetype))
        return self.init_node(name,
                              size,
                              networkid=networkid,
                              volumes=volumes,
                              imagetype=imagetype,
                              machineId=vmid)

    def ex_extend_disk(self, diskguid, newsize, disk_info=None):
        if disk_info is None:
            disk_info = {'machineRefId': None}
        res = self._execute_agent_job('extend_disk',
                                      ovs_connection=self.ovs_connection,
                                      size=newsize,
                                      diskguid=diskguid,
                                      disk_info=disk_info)
        return res

    def ex_export(self, node, exportname, uncpath, emailaddress):
        machineid = node.id
        return self._execute_agent_job('backupmachine',
                                       wait=False,
                                       machineid=machineid,
                                       backupname=exportname,
                                       location=uncpath,
                                       emailaddress=emailaddress)

    def ex_is_storage_action_running(self, node):
        """
        Check if an action is being running that is doing some interactions
        with the disk
        """
        machineid = node.id
        return self._execute_agent_job('checkstorageaction',
                                       wait=True,
                                       machineid=machineid)

    def _get_connection_ip(self):
        uri = urlparse.urlparse(self.uri)
        return uri.netloc

    def _get_domain_for_node(self, node):
        return self._execute_agent_job('getmachine',
                                       queue='hypervisor',
                                       machineid=node.id)

    def _from_agent_to_node(self, domain, publicipaddress='', volumes=None):
        xml = domain.get('XMLDesc')
        node = Node(id=domain['id'],
                    public_ips=[],
                    name=domain['name'],
                    private_ips=[],
                    state=domain['state'],
                    driver=self)
        if xml:
            node = self._from_xml_to_node(xml, node)
        node.state = domain['state']
        extra = domain['extra']
        node.extra.update(extra)
        if volumes:
            node.extra['volumes'] = volumes
        if publicipaddress:
            node.public_ips.append(publicipaddress)
        return node

    def _from_xml_to_node(self, xml, node=None):
        dom = ElementTree.fromstring(xml)
        state = NodeState.UNKNOWN
        volumes = list()
        ifaces = list()
        for disk in dom.findall('devices/disk'):
            source = disk.find('source')
            if disk.attrib['device'] != 'disk' or source.attrib.get('dev'):
                continue
            volume = OpenvStorageVolumeFromXML(disk, self)
            volumes.append(volume)
        for nic in dom.findall('devices/interface'):
            mac = None
            macelement = nic.find('mac')
            source = nic.find('source')
            if macelement is not None:
                mac = macelement.attrib['address']
            target = nic.find('target').attrib['dev']
            bridgename = source.attrib['bridge'] if source.attrib.get(
                'bridge') else source.attrib['network']
            if bridgename.startswith(('ext-', 'public')):
                bridgetype = 'PUBLIC'
            else:
                bridgetype = 'bridge'
            ifaces.append(
                NetworkInterface(mac=mac,
                                 target=target,
                                 type=bridgetype,
                                 bridgename=bridgename))
        name = dom.find('name').text
        bootdev = dom.find('os/boot').attrib['dev']
        extra = {'volumes': volumes, 'ifaces': ifaces, 'bootdev': bootdev}
        if node is None:
            id = dom.find('uuid').text
            node = Node(id=id,
                        name=name,
                        state=state,
                        public_ips=[],
                        private_ips=[],
                        driver=self,
                        extra=extra)
        else:
            node.extra.update(extra)
        return node

    def ex_snapshots_can_be_deleted_while_running(self):
        """
        FOR LIBVIRT A SNAPSHOT CAN'T BE DELETED WHILE MACHINE RUNNGIN
        """
        return False

    def attach_public_network(self, node, ipcidr, interface):
        """
        Attach Virtual machine to the cpu node public network
        """
        self._execute_agent_job("attach_device",
                                queue="hypervisor",
                                xml=str(interface),
                                machineid=node.id,
                                ipcidr=ipcidr,
                                vlan=interface.networkId)
        return interface

    def detach_public_network(self, node):
        for iface in node.extra['ifaces']:
            if iface.type == 'PUBLIC':
                self._execute_agent_job('detach_device',
                                        queue='hypervisor',
                                        xml=str(iface),
                                        machineid=node.id)

    def ex_resize(self, node, extramem, vcpus):
        machinetemplate = self.env.get_template("memory.xml")
        result = True
        if extramem > 0:
            memory = machinetemplate.render({'memory': extramem})
            result = self._execute_agent_job('attach_device',
                                             queue='hypervisor',
                                             xml=memory,
                                             machineid=node.id) is not False
        if vcpus is not None:
            result &= self._execute_agent_job('change_vcpus',
                                              queue='hypervisor',
                                              vcpus=vcpus,
                                              machineid=node.id)

        if result is False:
            return False
        return True

    def ex_migrate(self, node, sourceprovider, force=False):
        domainxml = self.get_xml(node)
        netinfo = self.get_net_info(node)
        return self._execute_agent_job('vm_livemigrate',
                                       vm_id=node.id,
                                       sourceurl=sourceprovider.uri,
                                       force=force,
                                       domainxml=domainxml,
                                       vmlog_dir=vmlog_dir,
                                       netinfo=netinfo)