Exemple #1
0
    def add_vdisk_to_vm(self,
                        inst_name,
                        storage_name,
                        size,
                        disk_type="qcow2"):
        """
        @param inst_name: the name of VM
        @param storage_name: which storage repository the virtual disk put
        @param size: the disk size
        @:param disk_type: qcow, qcow2, vmdk, raw, vhd, fat, ext2/3/4, etc
        """
        if not self._hypervisor_handler:
            self._hypervisor_handler = self.get_handler()
        try:
            pool = self._hypervisor_handler.storagePoolLookupByName(
                storage_name)
        except libvirtError as error:
            log.error("No storage named: %s", storage_name)
            return False
        pool_free = pool.info(
        )[-1]  # pool.info is a list [Pool state, Capacity, Allocation, Available]
        if int(size) * (2**30) > pool_free:
            log.error("No such enough free size in storage %s", storage_name)
            return False

        pool_xml_tree = xmlEtree.fromstring(pool.XMLDesc())
        pool_path = pool_xml_tree.find("target/path")
        if pool_path is None:
            path_str = "/var/lib/libvirt/images"  #  the default path for pool in kvm
        else:
            path_str = pool_path.text
        disk_name = "".join(
            [self._get_available_vdisk_name(inst_name), ".", disk_type])
        target_vol_path = os.path.join(path_str, disk_name)

        # volume type: file, block, dir, network, netdir
        #'GB' (gigabytes, 10^9 bytes), 'G' or 'GiB' (gibibytes, 2^30 bytes)
        storage_vol_xml = """
        <volume type="file">
            <name>%s</name>
            <allocation>0</allocation>
            <capacity unit="G">%s</capacity>
            <target>
                <path>%s</path>
                <format type='%s'/>
                <permissions>
                    <owner>107</owner>
                    <group>107</group>
                    <mode>0644</mode>
                    <label>virt_image_t</label>
                </permissions>
            </target>
        </volume>"""

        storage_vol_xml = storage_vol_xml % (disk_name, size, target_vol_path,
                                             disk_type)
        vol_obj = pool.createXML(storage_vol_xml)

        return self.attach_disk_to_domain(inst_name, target_vol_path,
                                          disk_type)
    def set_vm_memory_live(self, inst_name, memory_target):
        """
        :param memory_target: Memory in GB, set dynamic_max and dynamic_min to the target size
        :return:
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        if not self.is_instance_running(inst_name=inst_name):
            log.error("Set live memory need VM to be running.")
            return False
        try:
            vm_ref = self._hypervisor_handler.xenapi.VM.get_by_name_label(
                inst_name)[0]
            gb = 1024.0 * 1024.0 * 1024.0
            memory_size = int(gb * float(memory_target))
            # set_memory_target_live has been deprecated
            # self._hypervisor_handler.xenapi.VM.set_memory_target_live(vm_ref, str(memory_size))
            self._hypervisor_handler.xenapi.VM.set_memory_dynamic_range(
                vm_ref, str(memory_size), str(memory_size))

            return True
        except Exception as error:
            log.exception("Exception raise when set live memory: %s", error)
            return False
Exemple #3
0
    def config_vcpus(self, inst_name, vcpu_nums=None, vcpu_max=None):
        """
        :param inst_name: VM name
        :param vcpu_nums: the current vcpu number
        :param vcpu_max: the max vcpu  number
        :return:
        """
        if not vcpu_max and not vcpu_nums:
            return True

        log.info("Start to configure the VCPU in VM [%s].", inst_name)

        if vcpu_nums and self.virt_driver.is_instance_running(
                inst_name=inst_name):
            ret = self.virt_driver.set_vm_vcpu_live(inst_name=inst_name,
                                                    vcpu_num=vcpu_nums)

        elif vcpu_max and self.virt_driver.is_instance_halted(
                inst_name=inst_name):
            ret = self.virt_driver.set_vm_vcpu_max(inst_name=inst_name,
                                                   vcpu_num=vcpu_max)

        else:
            log.error(
                "Only support set live cpu on a running VM  or set max cpu number on a halted VM."
            )
            return False
        # set vcpu max will change the start up vcpu when max < live cpu number
        if ret:
            # Don't need to check db sync ret, because there is crontab to sync it
            self.update_database_info(inst_name=inst_name)

        return ret
    def get_vif_bridge_name(self, inst_name, vif_index):
        """
        :param inst_name:
        :param vif_index:
        :return: the bridge name which the vif attached to
        """
        vif_list = self._get_dom_interfaces_elements_list(inst_name)
        try:
            tree = vif_list[int(vif_index)]
        except (IndexError, ValueError):
            log.error("No vif with index: %s", vif_index)
            return None

        source_element = tree.find('source')
        try:
            bridge_name = source_element.get('bridge', None)
            if bridge_name is None:
                network = source_element.get("network", None)
                if network:
                    network_dom = self._hypervisor_handler.networkLookupByName(
                        network)
                    bridge_name = network_dom.bridgeName()

            return bridge_name
        except AttributeError:
            log.error("No interface with index %s on domain: %s", vif_index,
                      inst_name)
            return None
Exemple #5
0
    def get_vif_ip(self, inst_name, vif_index):
        """
        Note: in Xenserver the device index is not same as the eth index in guest internal.eg: 0,3 will be eth0, eth1
        :param inst_name:
        :param vif_index: device index, not the eth index in guest internal
        :return: None or IP
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        try:
            vm_ref = self._hypervisor_handler.xenapi.VM.get_by_name_label(inst_name)[0]
        except IndexError:
            log.error("No VM with name [%s].", inst_name)
            return None

        try:
            all_vifs = self.get_all_vifs_indexes(inst_name)
            guest_metrics_ref = self._hypervisor_handler.xenapi.VM.get_guest_metrics(vm_ref)
            network_dict = self._hypervisor_handler.xenapi.VM_guest_metrics.get_networks(guest_metrics_ref)
        except Exception as error:
            log.debug("Except in get_vif_ip: %s", error)
            return None

        if str(vif_index) not in all_vifs:
            log.error("Vif index does not exist.")
            return None

        # by test, if there are vif 0,1,3, and each device with a ip, then the dic are about "2/ip: 192.168.1.122;  1/ip: 192.168.1.200;  0/ip: 10.143.248.253;"
        # if there are vif 0,1,2,3, with ip attached to 0,1,3, then the dic looks like "3/ip: 192.168.1.122;1/ip: 192.168.1.200;  0/ip: 10.143.248.253;"
        # but if remove the device 2, before reboot, the ip keep same, but after reboot, the device index are keep 0,1,3, but ip change to "2/ip:, 1/ip:,0/ip:"
        # assume the vif device are added in sequence, and device index keep same as eth index in vm, so use the device_index to get ip
        return network_dict.get(str(vif_index)+"/ip", None)
    def unplug_vif_from_vm(self, inst_name, vif_index):
        """
        @description Hot-unplug the specified VIF from the running VM
        @param vif_index: virtual interface index
        @return: True if success else False
        """
        vif_list = self._get_dom_interfaces_elements_list(inst_name)
        try:
            vif = vif_list[int(vif_index)]
        except (IndexError, ValueError):
            log.error("No vif with index %s found in domain %s", vif_index,
                      inst_name)
            return False

        dom = self._get_domain_handler(domain_name=inst_name)
        try:
            if dom.isActive():
                ret = dom.detachDeviceFlags(xmlEtree.tostring(vif),
                                            libvirt.VIR_DOMAIN_AFFECT_LIVE)
            else:
                # ret = dom.detachDeviceFlags(xmlEtree.tostring(vif))
                return True
        except libvirtError as error:
            log.error("Exceptions when unplug vif: %s", error)
            return False

        return ret == 0
    def destroy_vif(self, inst_name, vif_index):
        """
        In order to be keep same with Xen, destroy it in config
        @param vif_index: reference object to virtual interface in guest VM
        """
        vif_list = self._get_dom_interfaces_elements_list(inst_name)
        try:
            vif = vif_list[int(vif_index)]
        except (IndexError, ValueError):
            log.error("No vif with index %s found in domain %s", vif_index,
                      inst_name)
            return False

        dom = self._get_domain_handler(domain_name=inst_name)
        try:
            if dom.isActive():
                ret = dom.detachDeviceFlags(xmlEtree.tostring(vif),
                                            libvirt.VIR_DOMAIN_AFFECT_CONFIG)
            else:
                ret = dom.detachDeviceFlags(xmlEtree.tostring(vif))
        except libvirtError as error:
            log.error("Exceptions when destroy vif: %s", error)
            return False

        return ret == 0
    def _get_dom_interfaces_elements_list(self, inst_name):
        """
        :param inst_name:
        :return: a list of interface element, sorted with the slot of interfaces
        """
        if not self._hypervisor_handler:
            self._hypervisor_handler = self.get_handler()

        domain = self._get_domain_handler(domain_name=inst_name)
        if not domain:
            log.error(
                "Domain %s doesn't exist, can not get interfaces information.",
                inst_name)
            return []

        interface_dict = {}
        tree = xmlEtree.fromstring(
            domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
        interface_list = tree.findall('devices/interface')
        for interface in interface_list:
            address_element = interface.find('address')
            slot = address_element.attrib.get('slot', None)
            if slot:
                interface_dict[int(slot, 16)] = interface
        # a list of interface element, sorted by interface/address/slot.
        return [interface_dict[key] for key in sorted(interface_dict)]
Exemple #9
0
    def _ipv4_address_check(cls, ipv4_address, prop_string):
        """
        check the ipv4 address
        :return:
        """
        if ipv4_address.is_multicast:
            log.error("%s cannot be a multicast address, see RFC 3171",
                      prop_string)
            return False

        if ipv4_address.is_loopback:
            log.error("%s cannot be a loopback address, see RFC 3330",
                      prop_string)
            return False

        if ipv4_address.is_link_local:
            log.error("%s cannot be a link-local address, see RFC 3927",
                      prop_string)
            return False

        if ipv4_address.is_unspecified:
            log.error("%s cannot be a unspecified address, see RFC 5735",
                      prop_string)
            return False

        if ipv4_address.is_reserved:
            log.error("%s is other IETF reserved", prop_string)
            return False

        return True
Exemple #10
0
    def get_device_infor(self, device_name=None, pif_ref=None):
        """
        @param pif_ref: reference to a PIF object
        @param device_name: name of interface in host
        @return: return a dict with key: DNS,IP,MTU,MAC,netmask,gateway,network, etc.
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        if device_name is not None:
            pif_ref = self._get_PIF_by_device(device_name)
            if not pif_ref:
                log.error(
                    "Can not get device infor with given device name:%s.",
                    device_name)
                return {}
            pif_record = self._hypervisor_handler.xenapi.PIF.get_record(
                pif_ref)
        elif pif_ref is not None:
            pif_record = self._hypervisor_handler.xenapi.PIF.get_record(
                pif_ref)
        else:
            log.error("Please specify a device name to get device infor.")
            return {}

        default_infor = {}
        default_infor.setdefault('device', pif_record.get('device', None))
        default_infor.setdefault('IP', pif_record.get('IP', None))
        default_infor.setdefault('DNS', pif_record.get('DNS', None))
        default_infor.setdefault('MAC', pif_record.get('MAC', None))
        default_infor.setdefault('gateway', pif_record.get('gateway', None))
        default_infor.setdefault('netmask', pif_record.get('netmask', None))
        return default_infor
Exemple #11
0
    def delete_instance(self, inst_name):
        '''
        undefine:If the domain is running, it's converted to transient domain, without stopping it.
        If the domain is inactive, the domain configuration is removed.
        '''
        domain = self._get_domain_handler(inst_name)
        if not domain:
            return True

        if domain.isActive():
            domain.destroy(
            )  # It will shutdown the domain force, if it is already shutdown, libvirtError will raise

        try:
            ret = domain.undefine()
            if ret == 0:
                target_disk = VM_HOUSE + inst_name + ".qcow2"
                cmd = "rm -f %s" % target_disk
                log.debug("remove the disk file for %s: %s", inst_name, cmd)
                p = subprocess.Popen(cmd,
                                     stdout=subprocess.PIPE,
                                     stderr=subprocess.PIPE,
                                     shell=True)
                _, perr = p.communicate()
                if perr:
                    log.error("Deleting the disk for vm %s meet an error:%s",
                              inst_name, perr)
                    return False

            return ret == 0
        except Exception, error:
            log.exception(error)
            return False
Exemple #12
0
    def set_vm_dynamic_memory(self, inst_name, memory_max=None, memory_min=None):
        """
        set memory for a domain, if it is active, set it lively and the config file, if it is deactive, set the config file
        :param inst_name:
        :param memory_max:
        :param memory_min:
        :return:
        """
        dom = self._get_domain_handler(domain_name=inst_name)
        if dom is None:
            return False
        gitabyte = 1024 * 1024  # unit is KB
        if memory_max:
            memory_size = int(memory_max) * gitabyte
        elif memory_min:
            log.info("Don't support min memory set.")
            return True
        else:
            log.error("Neither maxMemory nor minMemory is supplied.")
            return False
        try:
            if dom.isActive():
                ret = dom.setMemoryFlags(memory_size, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
            else:
                ret = dom.setMemoryFlags(memory_size)  # dom.setMemory need dom to be active
        except libvirtError as error:
            log.exception("Exception: %s", error)
            return False

        return ret == 0
Exemple #13
0
    def destroy_old_vif(self, inst_name, vif_index):
        """
        Destroy old vif whose index is vif_index
        @param inst_name: Vm name
        @param vif_index: vif index
        """
        log.info("Start to delete the old interface device [%s] from VM [%s].",
                 vif_index, inst_name)

        if not self.vnet_driver.is_vif_exist(inst_name, vif_index):
            log.info("No old vif with index [%s], don't need to destroy.",
                     vif_index)
            return True

        if self.virt_driver.is_instance_running(inst_name):
            ret = self.vnet_driver.unplug_vif_from_vm(inst_name, vif_index)
            if not ret:
                log.error(
                    "Failed to unplug the virtual interface device [%s] from VM.",
                    vif_index)
                return False
        ret = self.vnet_driver.destroy_vif(inst_name, vif_index)
        if not ret:
            log.error("Failed to destroy the virtual interface device [%s].",
                      vif_index)
            return False

        self.delete_ip_info_from_database(inst_name, vif_index)

        return True
Exemple #14
0
    def set_vm_static_memory(self, inst_name, memory_max=None, memory_min=None):
        """
        set memory for a inactive domain
        :param inst_name:
        :param memory_max: size of GB
        :param memory_min: size of GB
        :return:
        """
        # dom.setMaxMemory() need dom to be inactive
        dom = self._get_domain_handler(domain_name=inst_name)
        if dom is None:
            return False
        if dom.isActive():
            log.error("Set domain max memory need it to be stopped.")
            return False

        gitabyte = 1024 * 1024  # unit is KB
        if memory_max:
            memory_size = int(memory_max) * gitabyte
        elif memory_min:
            log.info("Don't support min memory set.")
            return True
        else:
            log.error("Neither maxMemory nor minMemory is supplied.")
            return False
        # dom.setMemoryFlags(memory_size, libvirt.VIR_DOMAIN_AFFECT_CURRENT|libvirt.VIR_DOMAIN_MEM_MAXIMUM) also OK
        try:
            ret = dom.setMaxMemory(memory_size)
        except libvirtError as error:
            log.exception("Exception: %s", error)
            return False

        return ret == 0
Exemple #15
0
    def plug_vif_to_vm(self, inst_name, vif_index):
        """
        Hotplug the specified VIF, dynamically attaching it to the running VM
        @param vif_index: virtual interface index
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        vif_ref = self._get_vif_by_index(inst_name, vif_index)
        if vif_ref is None:
            log.error("No vif found with index [%s] when try to attach vif.",
                      vif_index)
            return False

        vm_ref = self._hypervisor_handler.xenapi.VIF.get_VM(vif_ref)
        power_status = self._hypervisor_handler.xenapi.VM.get_record(
            vm_ref)['power_state']
        allowed_opera = self._hypervisor_handler.xenapi.VIF.get_record(
            vif_ref)['allowed_operations']
        if 'plug' not in allowed_opera and power_status == 'Running':
            log.info("VIF [%s] is already pluged.", vif_index)
            return True

        try:
            self._hypervisor_handler.xenapi.VIF.plug(vif_ref)
        except Exception as error:
            log.error("Exception raised when hot-plug a VIF:%s.", error)
            return False
        return True
Exemple #16
0
    def power_on_vm(self, inst_name):
        """
        @summary: power on vm with name label inst_name
        """
        log.debug("Start power on VM [%s].", inst_name)
        if self.is_instance_running(inst_name):
            log.info("VM [%s] is already running.", inst_name)
            return True

        handler = self.get_handler()
        if handler is not None:
            vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0]
            vm_state = handler.xenapi.VM.get_record(vm_ref)['power_state']
            try:
                if vm_state == "Suspended":
                    handler.xenapi.VM.resume(
                        vm_ref, False,
                        True)  # start_paused = False; force = True
                elif vm_state == "Paused":
                    handler.xenapi.VM.unpause(vm_ref)
                else:  # vm_state == "Halted"
                    handler.xenapi.VM.start(vm_ref, False, True)
                time.sleep(1)
            except Exception, error:
                log.error("Raise exception:'%s' while power on vm:%s", error,
                          inst_name)
                return False
    def get_device_infor(self, device_name=None):
        """
        @param device_name: name of interface in host
        @return: return a dict with key: DNS,IP,MTU,MAC,netmask,gateway,network, etc.
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()
        try:
            device_dom = self._hypervisor_handler.interfaceLookupByName(
                device_name)
        except libvirtError as error:
            log.error("Exception when get device infor: %s", error)
            return {}

        default_infor = {}

        device_tree = xmlEtree.fromstring(device_dom.XMLDesc())
        ip_element = device_tree.find("protocol[@family='ipv4']/ip")
        if ip_element is not None:
            prefix, ip = ip_element.attrib.get(
                'prefix'), ip_element.attrib.get('address', None)
            default_infor.setdefault('IP', ip)
            default_infor.setdefault('netmask',
                                     str(IPv4Address._make_netmask(prefix)[0]))
        else:
            default_infor.setdefault('IP', None)
            default_infor.setdefault('netmask', None)

        default_infor.setdefault('device', device_name)
        default_infor.setdefault('DNS', None)
        default_infor.setdefault('MAC', device_dom.MACString())
        default_infor.setdefault('gateway', None)

        return default_infor
Exemple #18
0
    def destroy_vif(self, inst_name, vif_index):
        """
        @param vif_index: index of virtual interface in guest VM
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        vif_ref = self._get_vif_by_index(inst_name, vif_index)
        if vif_ref is None:
            log.error(
                "No virtual interface device found with index [%s] when try to destroy vif.",
                vif_index)
            return False

        # unplug in allowed_operations, means the vif has plugged in VM
        if 'unplug' in self._hypervisor_handler.xenapi.VIF.get_record(
                vif_ref)['allowed_operations']:
            log.error(
                "Error when destroy, please firstly unplug the VIF or power off the VM."
            )
            return False

        try:
            self._hypervisor_handler.xenapi.VIF.destroy(vif_ref)
        except Exception as error:
            log.exception("Exceptions raised when destroy VIF:%s", error)
            return False
        return True
Exemple #19
0
def find_default_server(hosts, role, config_dict):
    """
    :param hosts: server's ip list
    :return: a default server's Ip
    """
    server_infors = get_server_infors(hosts)
    # each item is tuple (server_ip,  [logicFreeNoOverCommit, physic-free-mem, logic-free-mem, physic-free-disk, logic-free-disk])
    sorted_servers = sorted(server_infors.iteritems(),
                            key=lambda (k, v): operator.itemgetter(0, 1, 2)(v),
                            reverse=True)
    for item in sorted_servers:
        log.debug("%s", item)

    for ip, info in sorted_servers:
        # find a server's physical memory at least has 10GB free memory to start a new vm,
        # and logic free memory with over commit fit the logic memory size in new vm,
        # and the disk pool has at least 100G free
        if (info[1] > 10) and (info[2] - config_dict[role]['memory'] >
                               0) and (info[3] > 100):
            default_server = ip
            break
    else:
        log.error("No server is available for the new vm, please confirm it.")
        return None

    log.info("Schedual to server: %s", default_server)

    return default_server
Exemple #20
0
    def __get_disk_elements_list(self, inst_name):
        """
        :param inst_name:
        :return: a dict with key is disk index and value is disk xml element
        """
        if not self._hypervisor_handler:
            self._hypervisor_handler = self.get_handler()

        disk_dict = {}
        domain = self._get_domain_handler(domain_name=inst_name)
        if not domain:
            log.error(
                "Domain %s doesn't exist, can not get interfaces information.",
                inst_name)
            return []

        tree = xmlEtree.fromstring(
            domain.XMLDesc(libvirt.VIR_DOMAIN_XML_INACTIVE))
        disk_list = tree.findall("devices/disk[@device='disk']")
        for disk in disk_list:
            device_name = disk.find('target').get('dev')
            if device_name:
                disk_dict[device_name] = disk
        # a dict of disk element, sorted by disk/target/dev.
        return [disk_dict[key] for key in sorted(disk_dict)]
Exemple #21
0
    def __init__(self, user="******", passwd="admin"):
        super(HostDbDriver, self).__init__(user, passwd)
        self.db_host = DB_HOST
        self.login_url = LOGIN_URL
        self.logout_url = LOGOUT_URL
        self.url = None

        login_data = {'username': self.user, 'password': self.passwd}

        try:
            self.session = requests.Session()
            login_res = self.session.post(self.login_url, data=login_data)
            res_content = json.loads(login_res.content)

            if res_content[
                    'status'] == 1:  # the success check depend on the login html
                log.debug("Login url [%s] check with username [%s] success.",
                          self.db_host, self.user)
            else:
                log.error("Login url [%s] check with username [%s] failed.",
                          self.db_host, self.user)
                self.session = None
        except requests.exceptions.ConnectionError as connerror:
            log.exception("Connection exception: %s", connerror)
            self.session = None
        except Exception as error:
            log.exception("Exception when init session: %s", error)
            self.session = None
Exemple #22
0
    def get_handler(self):
        '''
        return the handler of the virt_driver
        '''
        if self._hypervisor_handler:
            return self._hypervisor_handler

        old = signal.signal(signal.SIGALRM, self.timeout_handler)
        signal.alarm(4)  # connetctions timeout set to 4 secs

        try:
            if self.hostname is None:
                url = DEFAULT_HV
                self._hypervisor_handler = libvirt.open(url)
            else:
                url = "{0}{1}{2}".format('qemu+tls://', self.hostname, '/system')
                self._hypervisor_handler = libvirt.openAuth(url, self._auth, 0)
        except Exception as error:
            log.debug("Can not connect to url: %s, error: %s. Retrying...", url, error)
            signal.alarm(4)
            try:
                url = "{0}{1}{2}".format('qemu+tcp://', self.hostname, '/system')
                self._hypervisor_handler = libvirt.openAuth(url, self._auth, 0)
            except Exception as error:
                log.error("Can not connect to url: %s, error: %s ", url, error)
                return None
        finally:
            signal.alarm(0)
            signal.signal(signal.SIGALRM, old)

        if not self._hypervisor_handler:
            return None

        return self._hypervisor_handler
Exemple #23
0
    def unplug_vif_from_vm(self, inst_name, vif_index):
        """
        Hot-unplug the specified VIF, dynamically unattaching it from the running VM
        @param vif_index: virtual interface index
        @note It should check the power_state before use this API
        """
        if self._hypervisor_handler is None:
            self._hypervisor_handler = self.get_handler()

        vif_ref = self._get_vif_by_index(inst_name, vif_index)
        if vif_ref is None:
            log.error("No vif found with index [%s] when try to detach vif.",
                      vif_index)
            return False

        vm_ref = self._hypervisor_handler.xenapi.VIF.get_VM(vif_ref)
        power_status = self._hypervisor_handler.xenapi.VM.get_record(
            vm_ref)['power_state']
        allowed_opera = self._hypervisor_handler.xenapi.VIF.get_record(
            vif_ref)['allowed_operations']
        if 'unplug' not in allowed_opera and power_status == 'Running':
            log.info("VIF [%s] is already unpluged.", vif_index)
            return True

        try:
            self._hypervisor_handler.xenapi.VIF.unplug(vif_ref)
        except Exception as error:
            log.exception("Exceptions raised when unplug a VIF:%s", error)
            return False
        return True
Exemple #24
0
    def power_off_vm(self, inst_name):
        """
        @see: void shutdown (session ref session_id, VM ref vm), it will attempts to
        first clean shutdown a VM and if it should fail then perform a hard shutdown on it.
        """
        log.debug("Start power off vm [%s].", inst_name)
        if self.is_instance_halted(inst_name):
            log.info("VM [%s] is already not running.", inst_name)
            return True

        handler = self.get_handler()
        if handler is None:
            log.error("Can not get handler when try to power off VM [%s].",
                      inst_name)
            return False

        vm_ref = handler.xenapi.VM.get_by_name_label(inst_name)[0]

        try:
            handler.xenapi.VM.shutdown(vm_ref)
            time.sleep(0.5)
        except Exception, error:
            log.exception("Exception raised: %s when shutdown VM [%s].", error,
                          inst_name)
            return False
Exemple #25
0
    def set_vm_vcpu_live(self, inst_name, vcpu_num):
        """
        set the vcpu numbers for a running VM; and set vcpus in the config file when domain is deactive
        :param inst_name:
        :param vcpu_num: should be str of a int number
        :return: True or False
        """
        dom = self._get_domain_handler(domain_name=inst_name)
        if dom is None:
            return False

        vcpu_num = int(vcpu_num)
        if vcpu_num > self.get_vm_vcpu_max(inst_name):
            log.error("vCpus number [%s] exceed the limit of max vcpus: %s", vcpu_num, dom.maxVcpus())
            return False

        try:
            if dom.isActive():
                # dom.setVcpus(vcpu_num) # only effect the live domain, when power off, the config lose
                ret = dom.setVcpusFlags(vcpu_num, libvirt.VIR_DOMAIN_AFFECT_LIVE | libvirt.VIR_DOMAIN_AFFECT_CONFIG)
            else:
                ret = dom.setVcpusFlags(vcpu_num, libvirt.VIR_DOMAIN_AFFECT_CURRENT)
        except libvirtError as error:
            log.exception("Exceptions when set vcpu lively: %s", error)
            return False

        return ret == 0
Exemple #26
0
    def _get_root_handler(self):
        """
        Return the root handler of libvirt
        """
        if self._hypervisor_root_handler:
            return self._hypervisor_root_handler

        if self.hostname is None:
            hostname = "localhost"
        else:
            hostname = self.hostname
        url = "{0}{1}{2}".format('qemu+tls://', hostname, '/system')
        old = signal.signal(signal.SIGALRM, self.timeout_handler)
        signal.alarm(4)  # connetctions timeout set to 4 secs
        try:
            self._hypervisor_root_handler = libvirt.openAuth(url, self._auth, 0)
        except Exception as error:
            log.debug("Can not connect to %s, error: %s. Retrying...", url, error)
            url = "{0}{1}{2}".format('qemu+tcp://', hostname, '/system')
            signal.alarm(4)
            try:
                self._hypervisor_root_handler = libvirt.openAuth(url, self._auth, 0)
            except Exception as error:
                log.error("Can not connect to url:%s, error: %s", url, error)
                return None

        finally:
            signal.alarm(0)
            signal.signal(signal.SIGALRM, old)

        if self._hypervisor_root_handler:
            return self._hypervisor_root_handler

        return None
Exemple #27
0
    def get_disk_size(self, inst_name, device_num):
        """
        :param inst_name: VM name
        :param device_num: the disk index number
        :return: return size in GB, or 0 if no device found
        """
        disk_list = self.__get_disk_elements_list(inst_name)
        try:
            disk_element = disk_list[int(device_num)]
        except IndexError:
            log.error("No disk found with device number: %s", device_num)
            return 0

        source = disk_element.find("source")
        if source is None:
            return 0

        file_path = source.get("file", None)
        try:
            volume_obj = self._hypervisor_handler.storageVolLookupByPath(file_path)
            # volume_list.info(): type, Capacity, Allocation(used)
            return volume_obj.info()[1] / 1024.0 / 1024.0 / 1024.0
        except (TypeError, IndexError) as error:
            log.exception("Exceptions raise when get disk size: %s", error)
            return 0
Exemple #28
0
    def create_instance(self, vm_name, reference_vm, storage_pool=None):
        """
        :param vm_name: new vm name
        :param reference_vm: template name
        :param storage_pool: pool name in which the new vm disk will be put
        :return:
        """
        log.info("enter create_instance %s", vm_name)
        if self.is_instance_exists(vm_name):
            log.error("Already exist domain: %s", vm_name)
            return False

        hv_handler = self.get_handler()
        template_dom = self._get_domain_handler(domain_name=reference_vm)
        template_xml = template_dom.XMLDesc(libvirt.VIR_DOMAIN_XML_SECURE)
        tree = xmlEtree.fromstring(template_xml)
        name = tree.find("name")
        name.text = vm_name
        uuid = tree.find('uuid')
        tree.remove(uuid)

        pool_path = None
        if storage_pool:
            pool_path = self.get_target_path_via_pool(storage_pool)

        # remove MAC for interface
        for interface in tree.findall("devices/interface"):
            elm = interface.find("mac")
            interface.remove(elm)

        # clone disk for new domain
        for disk in tree.findall("devices/disk[@device='disk']"):
            source_elm = disk.find('source')
            source_file = source_elm.get('file')
            if source_file:
                suffix = str(os.path.basename(source_file)).split(".")[-1]
                target_file = ".".join([self._get_available_vdisk_name(vm_name), suffix])
                # set clone path in base path of storage_pool, so that clone disk locate that path
                if pool_path is not None:
                    clone_path = os.path.join(pool_path, target_file)
                    self.clone_disk_in_pool(source_file, target_file, storage_pool)
                else:
                    base_path, new_pool_name = self.get_target_path_via_file(source_file)
                    clone_path = os.path.join(base_path, target_file)
                    self.clone_disk_in_pool(source_file, target_file, new_pool_name )
                log.info("Cloned disk from %s to %s", source_file, clone_path)
                source_elm.set('file', clone_path)

        try:
            # if failed it will raise libvirtError, return value is always a Domain object
            new_dom = hv_handler.defineXML(xmlEtree.tostring(tree))
        except libvirtError:
            log.error("Create domain %s failed when define by xml.", vm_name)
            return False

        new_dom.setAutostart(1)

        return True
Exemple #29
0
    def create_new_vif(self,
                       inst_name,
                       vif_index,
                       device_name=None,
                       network=None,
                       ip=None):
        """
        create a new virtual interface on the target VM
        @param inst_name: Vm name
        @param device_name: vswitch (with the host-device attached) which the vif attach to
        @param vif_index: vif index
        @param network: bridge name
        @param ip
        """
        log.info(
            "Start to add a new virtual interface device with index:[%s] to VM [%s]",
            vif_index, inst_name)

        if ip:
            mac_strs = ['%02x' % int(num) for num in ip.split(".")]
            mac_addr = VM_MAC_PREFIX + ":%s:%s:%s:%s" % tuple(mac_strs)
        else:
            mac_addr = None

        log.debug("Create VIF [%s] with IP: %s,  MAC: %s.", vif_index, ip,
                  mac_addr)
        new_vif = self.vnet_driver.create_new_vif(inst_name,
                                                  vif_index,
                                                  device_name,
                                                  network,
                                                  MAC=mac_addr)
        if new_vif is not None:

            self.update_ip_infor_to_database(inst_name,
                                             vif_index=vif_index,
                                             ip=ip)

            if self.virt_driver.is_instance_running(inst_name):
                ret = self.vnet_driver.plug_vif_to_vm(inst_name, vif_index)
                if ret:
                    log.info(
                        "New virtual interface device [%s] attached to VM [%s] successfully.",
                        vif_index, inst_name)
                    return True
                else:
                    log.error(
                        "New virtual interface device attached failed to VM [%s].",
                        inst_name)
                    return False
            else:
                log.info(
                    "New virtual interface device created successfully, but didn't plugin as VM is power off."
                )
                return True

        log.error("Can not create new virtual interface device [%s].",
                  vif_index)
        return False
Exemple #30
0
 def __nonzero__(self):
     if self.virt_driver and self.vnet_driver and self.db_driver:
         return True
     else:
         if not self.virt_driver or not self.vnet_driver:
             log.error("Can not connect to virtual driver.")
         if not self.db_driver:
             log.error("Can not connect to DB driver.")
         return False