示例#1
0
    def set_CPU_Affinity(self, vcpu, cpumap):
        domid = self.xend_domain_instance.getDomid()
        dominfo = self.xend_domain_instance
        if not dominfo:
            raise XendInvalidDomain(str(domid))

        # if vcpu is keyword 'all', apply the cpumap to all vcpus
        if str(vcpu).lower() == "all":
            vcpus = range(0, int(dominfo.getVCpuCount()))
        else:
            vcpus = [int(vcpu)]

        # set the same cpumask for all vcpus
        rc = 0
        cpus = dominfo.getCpus()
        cpumap = map(int, cpumap.split(","))
        for v in vcpus:
            try:
                if dominfo._stateGet() in (DOM_STATE_RUNNING,
                                           DOM_STATE_PAUSED):
                    rc = xc.vcpu_setaffinity(domid, v, cpumap)
                cpus[v] = cpumap
            except Exception, ex:
                log.exception(ex)
                raise XendError("Cannot pin vcpu: %d to cpu: %s - %s" % \
                                (v, cpumap, str(ex)))
示例#2
0
    def domain_pincpu(self, domid, vcpu, cpumap):
        """Set which cpus vcpu can use

        @param cpumap:  string repr of list of usable cpus
        """
        dominfo = self.domain_lookup_by_name_or_id_nr(domid)
        if not dominfo:
            raise XendInvalidDomain(str(domid))

        try:
            return xc.vcpu_setaffinity(dominfo.getDomid(), vcpu, cpumap)
        except Exception, ex:
            raise XendError(str(ex))
示例#3
0
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified 
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i,l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                 xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port   = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae  = int(dominfo.info['platform'].get('pae',  0))
        log.info("restore hvm domain %d, apic=%d, pae=%d",
                 dominfo.domid, apic, pae)
示例#4
0
def restore(xd, fd, dominfo=None, paused=False, relocating=False):
    signature = read_exact(fd, len(SIGNATURE),
                           "not a valid guest state file: signature read")
    if signature != SIGNATURE:
        raise XendError("not a valid guest state file: found '%s'" % signature)

    l = read_exact(fd, sizeof_int,
                   "not a valid guest state file: config size read")
    vmconfig_size = unpack("!i", l)[0]
    vmconfig_buf = read_exact(fd, vmconfig_size,
                              "not a valid guest state file: config read")

    p = sxp.Parser()
    p.input(vmconfig_buf)
    if not p.ready:
        raise XendError("not a valid guest state file: config parse")

    vmconfig = p.get_val()

    if not relocating:
        domconfig = XendConfig(sxp_obj=vmconfig)
        othervm = xd.domain_lookup_nr(domconfig["name_label"])
        if othervm is None or othervm.domid is None:
            othervm = xd.domain_lookup_nr(domconfig["uuid"])
        if othervm is not None and othervm.domid is not None:
            raise VmError("Domain '%s' already exists with ID '%d'" %
                          (domconfig["name_label"], othervm.domid))

    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i, l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae = int(dominfo.info['platform'].get('pae', 0))
        log.info("restore hvm domain %d, apic=%d, pae=%d", dominfo.domid, apic,
                 pae)
    else:
        apic = 0
        pae = 0

    try:
        restore_image = image.create(dominfo, dominfo.info)
        memory = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_dynamic_max'] / 1024)
        maxmem = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_static_max'] / 1024)
        shadow = restore_image.getRequiredShadowMemory(
            dominfo.info['shadow_memory'] * 1024,
            dominfo.info['memory_static_max'] / 1024)

        log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, ",
                  dominfo.info['shadow_memory'],
                  dominfo.info['memory_static_max'],
                  dominfo.info['memory_static_min'])

        # Round shadow up to a multiple of a MiB, as shadow_mem_control
        # takes MiB and we must not round down and end up under-providing.
        shadow = ((shadow + 1023) / 1024) * 1024

        # set memory limit
        xc.domain_setmaxmem(dominfo.getDomid(), maxmem)

        balloon.free(memory + shadow)

        shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow / 1024)
        dominfo.info['shadow_memory'] = shadow_cur

        cmd = map(str, [
            xen.util.auxbin.pathTo(XC_RESTORE), fd,
            dominfo.getDomid(), store_port, console_port,
            int(is_hvm), pae, apic
        ])
        log.debug("[xc_restore]: %s", string.join(cmd))

        handler = RestoreInputHandler()

        forkHelper(cmd, fd, handler.handler, True)

        # We don't want to pass this fd to any other children -- we
        # might need to recover the disk space that backs it.
        try:
            flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(fd, fcntl.F_SETFD, flags)
        except:
            pass

        if handler.store_mfn is None:
            raise XendError('Could not read store MFN')

        if not is_hvm and handler.console_mfn is None:
            raise XendError('Could not read console MFN')

        # get qemu state and create a tmp file for dm restore
        # Even PV guests may have QEMU stat, but its not currently
        # used so only bother with HVM currently.
        if is_hvm:
            qemu_signature = read_exact(fd, len(QEMU_SIGNATURE),
                                        "invalid device model signature read")
            if qemu_signature != QEMU_SIGNATURE:
                raise XendError("not a valid device model state: found '%s'" %
                                qemu_signature)
            qemu_fd = os.open("/var/lib/xen/qemu-save.%d" % dominfo.getDomid(),
                              os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            while True:
                buf = os.read(fd, dm_batch)
                if len(buf):
                    write_exact(qemu_fd, buf,
                                "could not write dm state to tmp file")
                else:
                    break
            os.close(qemu_fd)

        restore_image.setCpuid()

        os.read(fd, 1)  # Wait for source to close connection

        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)

        #
        # We shouldn't hold the domains_lock over a waitForDevices
        # As this function sometime gets called holding this lock,
        # we must release it and re-acquire it appropriately
        #
        from xen.xend import XendDomain

        lock = True
        try:
            XendDomain.instance().domains_lock.release()
        except:
            lock = False

        try:
            dominfo.waitForDevices()  # Wait for backends to set up
        except Exception, exn:
            log.exception(exn)

        if lock:
            XendDomain.instance().domains_lock.acquire()

        if not paused:
            dominfo.unpause()

        return dominfo
示例#5
0
def restore(xd, fd, dominfo = None, paused = False, relocating = False):
    signature = read_exact(fd, len(SIGNATURE),
        "not a valid guest state file: signature read")
    if signature != SIGNATURE:
        raise XendError("not a valid guest state file: found '%s'" %
                        signature)

    l = read_exact(fd, sizeof_int,
                   "not a valid guest state file: config size read")
    vmconfig_size = unpack("!i", l)[0]
    vmconfig_buf = read_exact(fd, vmconfig_size,
        "not a valid guest state file: config read")

    p = sxp.Parser()
    p.input(vmconfig_buf)
    if not p.ready:
        raise XendError("not a valid guest state file: config parse")

    vmconfig = p.get_val()

    if not relocating:
        domconfig = XendConfig(sxp_obj = vmconfig)
        othervm = xd.domain_lookup_nr(domconfig["name_label"])
        if othervm is None or othervm.domid is None:
            othervm = xd.domain_lookup_nr(domconfig["uuid"])
        if othervm is not None and othervm.domid is not None: 
            raise VmError("Domain '%s' already exists with ID '%d'" % (domconfig["name_label"], othervm.domid))

    if dominfo:
        dominfo.resume()
    else:
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified 
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i,l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                 xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port   = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae  = int(dominfo.info['platform'].get('pae',  0))
        log.info("restore hvm domain %d, apic=%d, pae=%d",
                 dominfo.domid, apic, pae)
    else:
        apic = 0
        pae  = 0

    try:
        restore_image = image.create(dominfo, dominfo.info)
        memory = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_dynamic_max'] / 1024)
        maxmem = restore_image.getRequiredAvailableMemory(
            dominfo.info['memory_static_max'] / 1024)
        shadow = restore_image.getRequiredShadowMemory(
            dominfo.info['shadow_memory'] * 1024,
            dominfo.info['memory_static_max'] / 1024)

        log.debug("restore:shadow=0x%x, _static_max=0x%x, _static_min=0x%x, ",
                  dominfo.info['shadow_memory'],
                  dominfo.info['memory_static_max'],
                  dominfo.info['memory_static_min'])

        # Round shadow up to a multiple of a MiB, as shadow_mem_control
        # takes MiB and we must not round down and end up under-providing.
        shadow = ((shadow + 1023) / 1024) * 1024

        # set memory limit
        xc.domain_setmaxmem(dominfo.getDomid(), maxmem)

        balloon.free(memory + shadow)

        shadow_cur = xc.shadow_mem_control(dominfo.getDomid(), shadow / 1024)
        dominfo.info['shadow_memory'] = shadow_cur

        cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
                        fd, dominfo.getDomid(),
                        store_port, console_port, int(is_hvm), pae, apic])
        log.debug("[xc_restore]: %s", string.join(cmd))

        handler = RestoreInputHandler()

        forkHelper(cmd, fd, handler.handler, True)

        # We don't want to pass this fd to any other children -- we 
        # might need to recover the disk space that backs it.
        try:
            flags = fcntl.fcntl(fd, fcntl.F_GETFD)
            flags |= fcntl.FD_CLOEXEC
            fcntl.fcntl(fd, fcntl.F_SETFD, flags)
        except:
            pass

        if handler.store_mfn is None:
            raise XendError('Could not read store MFN')

        if not is_hvm and handler.console_mfn is None:
            raise XendError('Could not read console MFN')        

        # get qemu state and create a tmp file for dm restore
        # Even PV guests may have QEMU stat, but its not currently
        # used so only bother with HVM currently.
        if is_hvm:
            qemu_signature = read_exact(fd, len(QEMU_SIGNATURE),
                                        "invalid device model signature read")
            if qemu_signature != QEMU_SIGNATURE:
                raise XendError("not a valid device model state: found '%s'" %
                                qemu_signature)
            qemu_fd = os.open("/var/lib/xen/qemu-save.%d" % dominfo.getDomid(),
                              os.O_WRONLY | os.O_CREAT | os.O_TRUNC)
            while True:
                buf = os.read(fd, dm_batch)
                if len(buf):
                    write_exact(qemu_fd, buf,
                                "could not write dm state to tmp file")
                else:
                    break
            os.close(qemu_fd)

        restore_image.setCpuid()

        os.read(fd, 1)           # Wait for source to close connection
        
        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)

        #
        # We shouldn't hold the domains_lock over a waitForDevices
        # As this function sometime gets called holding this lock,
        # we must release it and re-acquire it appropriately
        #
        from xen.xend import XendDomain

        lock = True;
        try:
            XendDomain.instance().domains_lock.release()
        except:
            lock = False;

        try:
            dominfo.waitForDevices() # Wait for backends to set up
        except Exception, exn:
            log.exception(exn)

        if lock:
            XendDomain.instance().domains_lock.acquire()

        if not paused:
            dominfo.unpause()

        return dominfo
        dominfo = xd.restore_(vmconfig)

    # repin domain vcpus if a target node number was specified
    # this is done prior to memory allocation to aide in memory
    # distribution for NUMA systems.
    nodenr = -1
    for i, l in enumerate(vmconfig):
        if type(l) == type([]):
            if l[0] == 'node':
                nodenr = int(l[1])

    if nodenr >= 0:
        node_to_cpu = XendNode.instance().xc.physinfo()['node_to_cpu']
        if nodenr < len(node_to_cpu):
            for v in range(0, dominfo.info['VCPUs_max']):
                xc.vcpu_setaffinity(dominfo.domid, v, node_to_cpu[nodenr])

    store_port = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    # if hvm, pass mem size to calculate the store_mfn
    image_cfg = dominfo.info.get('image', {})
    is_hvm = dominfo.info.is_hvm()
    if is_hvm:
        apic = int(dominfo.info['platform'].get('apic', 0))
        pae = int(dominfo.info['platform'].get('pae', 0))
        log.info("restore hvm domain %d, apic=%d, pae=%d", dominfo.domid, apic,
                 pae)