Exemple #1
0
 def __init__(self):
     self.host_instance = XendNode.instance()
     self.host_cpus = self.host_instance.get_host_cpu_refs()
     
     pif_refs = self.host_instance.get_PIF_refs()
     self.host_pifs = []
     for pif_ref in pif_refs:
         pif = XendAPIStore.get(pif_ref, "PIF")
         self.host_pifs.append(pif)
    def getHostRecord(self):
#         self.login()
# #         hosts = self.proxy.host.get_all(self.session).get('Value', '')
#         hostRecord = self.proxy.host.get_record_lite(self.session).get('Value', {})
# #         print self.proxy.host.get_record_lite(self.session, host)
#         return hostRecord
        host_uuid = XendNode.instance().uuid
        in_pool = BNPoolAPI.get_in_pool()
        return {'uuid' : host_uuid,
                'in_pool' : in_pool
                }
Exemple #3
0
    def get_memory_free(self):
        node = XendNode.instance()
        xendom = XendDomain.instance()
        doms = xendom.list()
        doms_mem_total = 0
        for dom in doms:
            if cmp(dom.get_uuid(), DOM0_UUID) == 0:
                continue
            dominfo = xendom.get_vm_by_uuid(dom.get_uuid())
            doms_mem_total += dominfo.get_memory_dynamic_max()

        return (self.host_instance.xc.physinfo()["total_memory"] * 1024 - doms_mem_total) / 1024
Exemple #4
0
    def _process_event(self, udev_event):
        try:
            if (udev_event.get('SUBSYSTEM', None) == 'pci'):
                pci_name = udev_event.get('PCI_SLOT_NAME', None)
                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding pci device %s", pci_name)
                    XendNode.instance().add_PPCI(pci_name)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing pci device %s", pci_name)
                    XendNode.instance().remove_PPCI(pci_name)

            elif (udev_event.get('SUBSYSTEM', None) == 'scsi'):
                hctl = None
                devpath = udev_event.get('DEVPATH', None)
                if devpath:
                    hctl = devpath.split('/')[-1]
                    if len(hctl.split(':')) != 4:
                        hctl = None
                if hctl is None:
                    # By any possibility, if an HCTL isn't gotten from
                    # the udev event, the udev event is ignored.
                    log.warn("Invalid udev event about scsi received")
                    return

                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding scsi device %s", hctl)
                    XendNode.instance().add_PSCSI(hctl)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing scsi device %s", hctl)
                    XendNode.instance().remove_PSCSI(hctl)

            elif (udev_event.get('SUBSYSTEM', None) == 'net'):
                interface = udev_event.get('INTERFACE', None)
                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding net device %s", interface)
                    XendNode.instance().add_network(interface)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing net device %s", interface)
                    XendNode.instance().remove_network(interface)

        except Exception, e:
            log.warn("error while processing udev event(): %s" % str(e))
 def get_host(self):
     from xen.xend import XendNode
     return XendNode.instance().get_uuid()
Exemple #6
0
 def get_host(self):
     from xen.xend import XendNode
     return XendNode.instance().get_uuid()
Exemple #7
0
    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified 
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i,l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                 xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port   = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
Exemple #8
0
 def __init__(self):
     SrvDir.__init__(self)
     self.xn = XendNode.instance()
     self.add('dmesg', 'SrvDmesg')
     self.add('log', 'SrvXendLog')
Exemple #9
0
def restore(xd, fd, dominfo=None, paused=False, relocating=False):
    signature = read_exact(fd, len(SIGNATURE),
                           "not a valid guest state file: signature read")
    if signature != SIGNATURE:
        raise XendError("not a valid guest state file: found '%s'" % signature)

    l = read_exact(fd, sizeof_int,
                   "not a valid guest state file: config size read")
    vmconfig_size = unpack("!i", l)[0]
    vmconfig_buf = read_exact(fd, vmconfig_size,
                              "not a valid guest state file: config read")

    p = sxp.Parser()
    p.input(vmconfig_buf)
    if not p.ready:
        raise XendError("not a valid guest state file: config parse")

    vmconfig = p.get_val()

    if not relocating:
        domconfig = XendConfig(sxp_obj=vmconfig)
        othervm = xd.domain_lookup_nr(domconfig["name_label"])
        if othervm is None or othervm.domid is None:
            othervm = xd.domain_lookup_nr(domconfig["uuid"])
        if othervm is not None and othervm.domid is not None:
            raise VmError("Domain '%s' already exists with ID '%d'" %
                          (domconfig["name_label"], othervm.domid))

    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i, l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae = int(dominfo.info['platform'].get('pae', 0))
        log.info("restore hvm domain %d, apic=%d, pae=%d", dominfo.domid, apic,
                 pae)
    else:
        apic = 0
        pae = 0

    try:
        restore_image = image.create(dominfo, dominfo.info)
        memory = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_dynamic_max'] / 1024)
        maxmem = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_static_max'] / 1024)
        shadow = restore_image.getRequiredShadowMemory(
            dominfo.info['shadow_memory'] * 1024,
            dominfo.info['memory_static_max'] / 1024)

        log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, ",
                  dominfo.info['shadow_memory'],
                  dominfo.info['memory_static_max'],
                  dominfo.info['memory_static_min'])

        # Round shadow up to a multiple of a MiB, as shadow_mem_control
        # takes MiB and we must not round down and end up under-providing.
        shadow = ((shadow + 1023) / 1024) * 1024

        # set memory limit
        xc.domain_setmaxmem(dominfo.getDomid(), maxmem)

        balloon.free(memory + shadow)

        shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow / 1024)
        dominfo.info['shadow_memory'] = shadow_cur

        cmd = map(str, [
            xen.util.auxbin.pathTo(XC_RESTORE), fd,
            dominfo.getDomid(), store_port, console_port,
            int(is_hvm), pae, apic
        ])
        log.debug("[xc_restore]: %s", string.join(cmd))

        handler = RestoreInputHandler()

        forkHelper(cmd, fd, handler.handler, True)

        # We don't want to pass this fd to any other children -- we
        # might need to recover the disk space that backs it.
        try:
            flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(fd, fcntl.F_SETFD, flags)
        except:
            pass

        if handler.store_mfn is None:
            raise XendError('Could not read store MFN')

        if not is_hvm and handler.console_mfn is None:
            raise XendError('Could not read console MFN')

        # get qemu state and create a tmp file for dm restore
        # Even PV guests may have QEMU stat, but its not currently
        # used so only bother with HVM currently.
        if is_hvm:
            qemu_signature = read_exact(fd, len(QEMU_SIGNATURE),
                                        "invalid device model signature read")
            if qemu_signature != QEMU_SIGNATURE:
                raise XendError("not a valid device model state: found '%s'" %
                                qemu_signature)
            qemu_fd = os.open("/var/lib/xen/qemu-save.%d" % dominfo.getDomid(),
                              os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            while True:
                buf = os.read(fd, dm_batch)
                if len(buf):
                    write_exact(qemu_fd, buf,
                                "could not write dm state to tmp file")
                else:
                    break
            os.close(qemu_fd)

        restore_image.setCpuid()

        os.read(fd, 1)  # Wait for source to close connection

        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)

        #
        # We shouldn't hold the domains_lock over a waitForDevices
        # As this function sometime gets called holding this lock,
        # we must release it and re-acquire it appropriately
        #
        from xen.xend import XendDomain

        lock = True
        try:
            XendDomain.instance().domains_lock.release()
        except:
            lock = False

        try:
            dominfo.waitForDevices()  # Wait for backends to set up
        except Exception, exn:
            log.exception(exn)

        if lock:
            XendDomain.instance().domains_lock.acquire()

        if not paused:
            dominfo.unpause()

        return dominfo
 def __init__(self):
     SrvDir.__init__(self)
     self.xn = XendNode.instance()
     self.add('dmesg', 'SrvDmesg')
     self.add('log', 'SrvXendLog')
def restore(xd, fd, dominfo = None, paused = False, relocating = False):
    signature = read_exact(fd, len(SIGNATURE),
        "not a valid guest state file: signature read")
    if signature != SIGNATURE:
        raise XendError("not a valid guest state file: found '%s'" %
                        signature)

    l = read_exact(fd, sizeof_int,
                   "not a valid guest state file: config size read")
    vmconfig_size = unpack("!i", l)[0]
    vmconfig_buf = read_exact(fd, vmconfig_size,
        "not a valid guest state file: config read")

    p = sxp.Parser()
    p.input(vmconfig_buf)
    if not p.ready:
        raise XendError("not a valid guest state file: config parse")

    vmconfig = p.get_val()

    if not relocating:
        domconfig = XendConfig(sxp_obj = vmconfig)
        othervm = xd.domain_lookup_nr(domconfig["name_label"])
        if othervm is None or othervm.domid is None:
            othervm = xd.domain_lookup_nr(domconfig["uuid"])
        if othervm is not None and othervm.domid is not None: 
            raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))

    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified 
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i,l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                 xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port   = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae  = int(dominfo.info['platform'].get('pae',  0))
        log.info("restore hvm domain %d, apic=%d, pae=%d",
                 dominfo.domid, apic, pae)
    else:
        apic = 0
        pae  = 0

    try:
        restore_image = image.create(dominfo, dominfo.info)
        memory = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_dynamic_max'] / 1024)
        maxmem = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_static_max'] / 1024)
        shadow = restore_image.getRequiredShadowMemory(
            dominfo.info['shadow_memory'] * 1024,
            dominfo.info['memory_static_max'] / 1024)

        log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, ",
                  dominfo.info['shadow_memory'],
                  dominfo.info['memory_static_max'],
                  dominfo.info['memory_static_min'])

        # Round shadow up to a multiple of a MiB, as shadow_mem_control
        # takes MiB and we must not round down and end up under-providing.
        shadow = ((shadow + 1023) / 1024) * 1024

        # set memory limit
        xc.domain_setmaxmem(dominfo.getDomid(), maxmem)

        balloon.free(memory + shadow)

        shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow / 1024)
        dominfo.info['shadow_memory'] = shadow_cur

        cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
                        fd, dominfo.getDomid(),
                        store_port, console_port, int(is_hvm), pae, apic])
        log.debug("[xc_restore]: %s", string.join(cmd))

        handler = RestoreInputHandler()

        forkHelper(cmd, fd, handler.handler, True)

        # We don't want to pass this fd to any other children -- we 
        # might need to recover the disk space that backs it.
        try:
            flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(fd, fcntl.F_SETFD, flags)
        except:
            pass

        if handler.store_mfn is None:
            raise XendError('Could not read store MFN')

        if not is_hvm and handler.console_mfn is None:
            raise XendError('Could not read console MFN')        

        # get qemu state and create a tmp file for dm restore
        # Even PV guests may have QEMU stat, but its not currently
        # used so only bother with HVM currently.
        if is_hvm:
            qemu_signature = read_exact(fd, len(QEMU_SIGNATURE),
                                        "invalid device model signature read")
            if qemu_signature != QEMU_SIGNATURE:
                raise XendError("not a valid device model state: found '%s'" %
                                qemu_signature)
            qemu_fd = os.open("/var/lib/xen/qemu-save.%d" % dominfo.getDomid(),
                              os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            while True:
                buf = os.read(fd, dm_batch)
                if len(buf):
                    write_exact(qemu_fd, buf,
                                "could not write dm state to tmp file")
                else:
                    break
            os.close(qemu_fd)

        restore_image.setCpuid()

        os.read(fd, 1)           # Wait for source to close connection
        
        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)

        #
        # We shouldn't hold the domains_lock over a waitForDevices
        # As this function sometime gets called holding this lock,
        # we must release it and re-acquire it appropriately
        #
        from xen.xend import XendDomain

        lock = True;
        try:
            XendDomain.instance().domains_lock.release()
        except:
            lock = False;

        try:
            dominfo.waitForDevices() # Wait for backends to set up
        except Exception, exn:
            log.exception(exn)

        if lock:
            XendDomain.instance().domains_lock.acquire()

        if not paused:
            dominfo.unpause()

        return dominfo
    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i, l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
    def _process_event(self, udev_event):
        try:
            if (udev_event.get('SUBSYSTEM', None) == 'pci'):
                pci_name = udev_event.get('PCI_SLOT_NAME', None)
                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding pci device %s", pci_name)
                    XendNode.instance().add_PPCI(pci_name)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing pci device %s", pci_name)
                    XendNode.instance().remove_PPCI(pci_name)

            elif (udev_event.get('SUBSYSTEM', None) == 'scsi'):
                hctl = None
                devpath = udev_event.get('DEVPATH', None)
                if devpath:
                    hctl = devpath.split('/')[-1]
                    if len(hctl.split(':')) != 4:
                        hctl = None
                if hctl is None:
                    # By any possibility, if an HCTL isn't gotten from
                    # the udev event, the udev event is ignored.
                    log.warn("Invalid udev event about scsi received")
                    return

                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding scsi device %s", hctl)
                    XendNode.instance().add_PSCSI(hctl)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing scsi device %s", hctl)
                    XendNode.instance().remove_PSCSI(hctl)

            elif (udev_event.get('SUBSYSTEM', None) == 'net'):
                interface = udev_event.get('INTERFACE', None)
                if (udev_event['ACTION'] == 'add'):
                    log.info("Adding net device %s", interface)
                    XendNode.instance().add_network(interface)
                elif (udev_event['ACTION'] == 'remove'):
                    log.info("Removing net device %s", interface)
                    XendNode.instance().remove_network(interface)

        except Exception, e:
            log.warn("error while processing udev event(): %s" % str(e))
Exemple #14
0
from tools import *
from default_record import *
from xen.xend import uuid
from xen.xend import XendDomain, XendNode
from xen.xend import BNVMAPI, BNStorageAPI
from xen.xend.server.netif import randomMAC
from xen.xend.ConfigUtil import getConfigVar
from xen.xend.XendAPIConstants import *
from xen.xend.XendAuthSessions import instance as auth_manager
from xen.xend.XendLogging import log_unittest, init

init("/var/log/xen/unittest.log", "DEBUG", log_unittest)
log = log_unittest

MB = 1024 * 1024
XEND_NODE = XendNode.instance()
XEND_DOMAIN = XendDomain.instance()
VMAPI = BNVMAPI.instance()
STORAGEAPI = BNStorageAPI.instance()
SESSION = "SessionForTest"
# SESSION = VMAPI.session_login_with_password('root', 'onceas').get('Value')
SR_TYPE = 'ocfs2'
ISO_SR_TYPE = 'gpfs_iso'
VM_VDI_MAP = {}

if getConfigVar('compute', 'VM', 'disk_limit'):
    DISK_LIMIT = int(getConfigVar('compute', 'VM', 'disk_limit'))
else:
    DISK_LIMIT = 6
    
if getConfigVar('compute', 'VM', 'interface_limit'):