Example #1
0
def restore(xd, fd):
    signature = read_exact(fd, len(SIGNATURE),
        "not a valid guest state file: signature read")
    if signature != SIGNATURE:
        raise XendError("not a valid guest state file: found '%s'" %
                        signature)

    l = read_exact(fd, sizeof_int,
                   "not a valid guest state file: config size read")
    vmconfig_size = unpack("!i", l)[0]
    vmconfig_buf = read_exact(fd, vmconfig_size,
        "not a valid guest state file: config read")

    p = sxp.Parser()
    p.input(vmconfig_buf)
    if not p.ready:
        raise XendError("not a valid guest state file: config parse")

    vmconfig = p.get_val()

    dominfo = xd.restore_(vmconfig)

    store_port   = dominfo.getStorePort()
    console_port = dominfo.getConsolePort()

    assert store_port
    assert console_port

    try:
        l = read_exact(fd, sizeof_unsigned_long,
                       "not a valid guest state file: pfn count read")
        nr_pfns = unpack("L", l)[0]    # native sizeof long
        if nr_pfns > 16*1024*1024:     # XXX 
            raise XendError(
                "not a valid guest state file: pfn count out of range")

        balloon.free(xc.pages_to_kib(nr_pfns))

        cmd = map(str, [xen.util.auxbin.pathTo(XC_RESTORE),
                        xc.handle(), fd, dominfo.getDomid(), nr_pfns,
                        store_port, console_port])
        log.debug("[xc_restore]: %s", string.join(cmd))

        handler = RestoreInputHandler()

        forkHelper(cmd, fd, handler.handler, True)

        if handler.store_mfn is None or handler.console_mfn is None:
            raise XendError('Could not read store/console MFN')

        dominfo.unpause()

        dominfo.completeRestore(handler.store_mfn, handler.console_mfn)

        return dominfo
    except:
        dominfo.destroy()
        raise
Example #2
0
 def decrease_untouched_memory(self, value):
     if not self.is_enabled():
         return 
     elif self.untouched_memory < value: 
         raise VmError(('I need %d  KiB untouch mem, but only have %d KiB untouched mem in Memory Pool') %(value,self.reserve_memory))
     else:
         self.untouched_memory -= value
         log.debug("MemoryPool: decrease_untouched_memory: untouched_memory %d KiB" %self.untouched_memory)
     return
Example #3
0
 def increase_memory(self, value):
     if not self.is_enabled():
         return  
     else:
         self.reserve_memory += value
         if self.reserve_memory > self.default_reserved_memory:
             raise VmError(('the maxsize of memory pool is %d KiB, but current is %d KiB') %(value,self.reserve_memory))
         log.debug("MemoryPool: increase_memory:%d, reserved_memory %d KiB" %(value,self.reserve_memory))
     return
Example #4
0
 def decrease_memory(self, value):
     if not self.is_enabled() or value <= 4096: #4M for PV guest kernel and ramdisk unzip
         return 
     elif self.reserve_memory < value: 
         raise VMError(('I need %d KiB, but only have %d KiB in Memory Pool') %(value,self.reserve_memory))
     else:
         self.reserve_memory -=  value
         log.debug("MemoryPool:  decrease_memory: decrease: %d reserved_memory %d KiB" %(value,self.reserve_memory))
     return
Example #5
0
 def increase_memory(self, value):
     if not self.is_enabled():
         return
     else:
         self.reserve_memory += value
         if self.reserve_memory > self.default_reserved_memory:
             raise VmError(
                 ("the maxsize of memory pool is %d KiB, but current is %d KiB") % (value, self.reserve_memory)
             )
         log.debug("MemoryPool: increase_memory:%d, reserved_memory %d KiB" % (value, self.reserve_memory))
     return
Example #6
0
 def add_usbdev(self, busid):
     # if the adding usb device should be owned by usbback
     # and is probed by other usb drivers, seize it!
     bus, intf = busid.split(':')
     buses = vusb_util.get_assigned_buses()
     if str(bus) in buses:
         if not vusb_util.usb_intf_is_binded(busid):
             log.debug("add_usb(): %s is binded to other driver" % busid)
             vusb_util.unbind_usb_device(bus)
             vusb_util.bind_usb_device(bus)
     return
Example #7
0
 def decrease_memory(self, value):
     if not self.is_enabled() or value <= 4096:  # 4M for PV guest kernel and ramdisk unzip
         return
     elif self.reserve_memory < value:
         raise VMError(("I need %d KiB, but only have %d KiB in Memory Pool") % (value, self.reserve_memory))
     else:
         self.reserve_memory -= value
         log.debug(
             "MemoryPool:  decrease_memory: decrease: %d reserved_memory %d KiB" % (value, self.reserve_memory)
         )
     return
Example #8
0
 def add_usbdev(self, busid):
     # if the adding usb device should be owned by usbback
     # and is probed by other usb drivers, seize it!
     bus, intf = busid.split(':')
     buses = vusb_util.get_assigned_buses()
     if str(bus) in buses:
         if not vusb_util.usb_intf_is_binded(busid):
             log.debug("add_usb(): %s is binded to other driver" % busid)
             vusb_util.unbind_usb_device(bus)
             vusb_util.bind_usb_device(bus)
     return
Example #9
0
 def decrease_untouched_memory(self, value):
     if not self.is_enabled():
         return
     elif self.untouched_memory < value:
         raise VmError(
             ("I need %d  KiB untouch mem, but only have %d KiB untouched mem in Memory Pool")
             % (value, self.reserve_memory)
         )
     else:
         self.untouched_memory -= value
         log.debug("MemoryPool: decrease_untouched_memory: untouched_memory %d KiB" % self.untouched_memory)
     return
Example #10
0
def save(fd, dominfo, live):
    write_exact(fd, SIGNATURE, "could not write guest state file: signature")

    config = sxp.to_string(dominfo.sxpr())

    domain_name = dominfo.getName()
    # Rename the domain temporarily, so that we don't get a name clash if this
    # domain is migrating (live or non-live) to the local host.  Doing such a
    # thing is useful for debugging.
    dominfo.setName('migrating-' + domain_name)

    try:
        write_exact(fd, pack("!i", len(config)),
                    "could not write guest state file: config len")
        write_exact(fd, config, "could not write guest state file: config")

        # xc_save takes three customization parameters: maxit, max_f, and
        # flags the last controls whether or not save is 'live', while the
        # first two further customize behaviour when 'live' save is
        # enabled. Passing "0" simply uses the defaults compiled into
        # libxenguest; see the comments and/or code in xc_linux_save() for
        # more information.
        cmd = [xen.util.auxbin.pathTo(XC_SAVE), str(xc.handle()), str(fd),
               str(dominfo.getDomid()), "0", "0", str(int(live)) ]
        log.debug("[xc_save]: %s", string.join(cmd))

        def saveInputHandler(line, tochild):
            log.debug("In saveInputHandler %s", line)
            if line == "suspend":
                log.debug("Suspending %d ...", dominfo.getDomid())
                dominfo.shutdown('suspend')
                dominfo.waitForShutdown()
                log.info("Domain %d suspended.", dominfo.getDomid())
                tochild.write("done\n")
                tochild.flush()
                log.debug('Written done')

        forkHelper(cmd, fd, saveInputHandler, False)

        dominfo.destroyDomain()

    except Exception, exn:
        log.exception("Save failed on domain %s (%d).", domain_name,
                      dominfo.getDomid())
        try:
            dominfo.setName(domain_name)
        except:
            log.exception("Failed to reset the migrating domain's name")
        raise Exception, exn
Example #11
0
    def _init_PIFs(self):
        # Initialise PIFs
        # First configure ones off disk
        saved_pifs = self.state_store.load_state('pif')
        if saved_pifs:
            for pif_uuid, pif in saved_pifs.items():
                try:
                    XendPIF.recreate(pif, pif_uuid)
                except CreateUnspecifiedAttributeError:
                    log.warn("Error recreating PIF %s", pif_uuid)
        
        # Next discover any existing PIFs and check
        # they are not already configured
        configured_pifs = [XendAPIStore.get(
                               pif_uuid, "PIF")
                                   .get_interface_name()
                           for pif_uuid in XendPIF.get_all()]
        unconfigured_pifs = [(name, mtu, mac)
                             for name, mtu, mac in linux_get_phy_ifaces()
                             if name not in configured_pifs]

        # Get a mapping from interface to bridge          
        if_to_br = dict([(i,b)
                         for (b,ifs) in Brctl.get_state().items()
                             for i in ifs])

        for name, mtu, mac in unconfigured_pifs:
            # Check PIF is on bridge
            # if not, ignore
            bridge_name = if_to_br.get(name, None)
            if bridge_name is not None:
                # Translate bridge name to network uuid
                for network_uuid in XendNetwork.get_all():
                    network = XendAPIStore.get(
                        network_uuid, 'network')
                    if network.get_name_label() == bridge_name:
                        XendPIF.create_phy(network_uuid, name,
                                           mac, mtu)
                        break
                else:
                    log.debug("Cannot find network for bridge %s "
                              "when configuring PIF %s",
                              (bridge_name, name))     
Example #12
0
 def init(self):
     xoptions = XendOptions.instance()
     self.default_reserved_memory = xoptions.get_reserved_memory() * 1024 * 1024  # KiB
     if self.default_reserved_memory <= 0:
         return
     self.enable_memory_pool = 1
     self.dom0_ballooning = xoptions.get_enable_dom0_ballooning()
     if not self.dom0_ballooning:
         return
     self.reserve_memory = 0
     self.untouched_memory = 0
     # init reserved memory
     # if not reserve_memory_size:
     xc = xen.lowlevel.xc.xc()
     physinfo = xc.physinfo()
     total_mem = physinfo["total_memory"]
     if total_mem < self.reserve_memory:
         self.default_reserved_memory = total_mem
     self.reserve_memory = self.default_reserved_memory
     self.untouched_memory = self.default_reserved_memory
     log.debug("MemoryPool: init reserved_memory %d KiB" % self.reserve_memory)
Example #13
0
 def init(self):
     xoptions = XendOptions.instance()
     self.default_reserved_memory = xoptions.get_reserved_memory() * 1024 * 1024 #KiB
     if self.default_reserved_memory <= 0:
         return
     self.enable_memory_pool = 1   
     self.dom0_ballooning = xoptions.get_enable_dom0_ballooning() 
     if not self.dom0_ballooning:
         return
     self.reserve_memory = 0 
     self.untouched_memory = 0
     #init reserved memory
     #if not reserve_memory_size: 
     xc = xen.lowlevel.xc.xc()
     physinfo = xc.physinfo()
     total_mem = physinfo['total_memory'] 
     if total_mem < self.reserve_memory:
         self.default_reserved_memory = total_mem
     self.reserve_memory = self.default_reserved_memory 
     self.untouched_memory = self.default_reserved_memory 
     log.debug("MemoryPool: init reserved_memory %d KiB" %self.reserve_memory)
Example #14
0
    def _init_PIFs(self):
        # Initialise PIFs
        # First configure ones off disk
        saved_pifs = self.state_store.load_state('pif')
        if saved_pifs:
            for pif_uuid, pif in saved_pifs.items():
                try:
                    XendPIF.recreate(pif, pif_uuid)
                except CreateUnspecifiedAttributeError:
                    log.warn("Error recreating PIF %s", pif_uuid)

        # Next discover any existing PIFs and check
        # they are not already configured
        configured_pifs = [
            XendAPIStore.get(pif_uuid, "PIF").get_interface_name()
            for pif_uuid in XendPIF.get_all()
        ]
        unconfigured_pifs = [(name, mtu, mac)
                             for name, mtu, mac in linux_get_phy_ifaces()
                             if name not in configured_pifs]

        # Get a mapping from interface to bridge
        if_to_br = dict([(i, b) for (b, ifs) in Brctl.get_state().items()
                         for i in ifs])

        for name, mtu, mac in unconfigured_pifs:
            # Check PIF is on bridge
            # if not, ignore
            bridge_name = if_to_br.get(name, None)
            if bridge_name is not None:
                # Translate bridge name to network uuid
                for network_uuid in XendNetwork.get_all():
                    network = XendAPIStore.get(network_uuid, 'network')
                    if network.get_name_label() == bridge_name:
                        XendPIF.create_phy(network_uuid, name, mac, mtu)
                        break
                else:
                    log.debug(
                        "Cannot find network for bridge %s "
                        "when configuring PIF %s", (bridge_name, name))
Example #15
0
def forkHelper(cmd, fd, inputHandler, closeToChild):
    child = xPopen3(cmd, True, -1, [fd, xc.handle()])

    if closeToChild:
        child.tochild.close()

    thread = threading.Thread(target = slurp, args = (child.childerr,))
    thread.start()

    try:
        try:
            while 1:
                line = child.fromchild.readline()
                if line == "":
                    break
                else:
                    line = line.rstrip()
                    log.debug('%s', line)
                    inputHandler(line, child.tochild)

            thread.join()

        except IOError, exn:
            raise XendError('Error reading from child process for %s: %s' %
                            (cmd, exn))
    finally:
        child.fromchild.close()
        child.childerr.close()
        if not closeToChild:
            child.tochild.close()

    status = child.wait()
    if status >> 8 == 127:
        raise XendError("%s failed: popen failed" % string.join(cmd))
    elif status != 0:
        raise XendError("%s failed" % string.join(cmd))
Example #16
0
 def saveInputHandler(line, tochild):
     log.debug("In saveInputHandler %s", line)
     if line == "suspend":
         log.debug("Suspending %d ...", dominfo.getDomid())
         dominfo.shutdown('suspend')
         dominfo.waitForShutdown()
         log.info("Domain %d suspended.", dominfo.getDomid())
         tochild.write("done\n")
         tochild.flush()
         log.debug('Written done')
Example #17
0
    def _init_PSCSIs(self):
        # Initialise PSCSIs and PSCSI_HBAs
        saved_pscsis = self.state_store.load_state('pscsi')
        saved_pscsi_table = {}
        if saved_pscsis:
            for pscsi_uuid, pscsi_record in saved_pscsis.items():
                try:
                    saved_pscsi_table[pscsi_record['scsi_id']] = pscsi_uuid
                except KeyError:
                    pass

        saved_pscsi_HBAs = self.state_store.load_state('pscsi_HBA')
        saved_pscsi_HBA_table = {}
        if saved_pscsi_HBAs:
            for pscsi_HBA_uuid, pscsi_HBA_record in saved_pscsi_HBAs.items():
                try:
                    physical_host = int(pscsi_HBA_record['physical_host'])
                    saved_pscsi_HBA_table[physical_host] = pscsi_HBA_uuid
                except (KeyError, ValueError):
                    pass

        pscsi_table = {}
        pscsi_HBA_table = {}

        pscsi_records = []
        for pscsi_mask in xendoptions().get_pscsi_device_mask():
            pscsi_records += vscsi_util.get_all_scsi_devices(pscsi_mask)
        log.debug("pscsi record count: %s" % len(pscsi_records))

        for pscsi_record in pscsi_records:
            scsi_id = pscsi_record['scsi_id']
            if scsi_id:
                saved_HBA_uuid = None

                pscsi_uuid = saved_pscsi_table.get(scsi_id, None)
                if pscsi_uuid is None:
                    pscsi_uuid = uuid.createString()
                    saved_pscsi_table[scsi_id] = pscsi_uuid
                else:
                    try:
                        saved_HBA_uuid = saved_pscsis[pscsi_uuid].get('HBA', None)
                    except KeyError:
                        log.warn("Multi-path SCSI devices are not supported for XenAPI")
                        return

                physical_host = int(pscsi_record['physical_HCTL'].split(':')[0])
                if pscsi_HBA_table.has_key(physical_host):
                    pscsi_HBA_uuid = pscsi_HBA_table[physical_host]
                elif saved_pscsi_HBA_table.has_key(physical_host):
                    pscsi_HBA_uuid = saved_pscsi_HBA_table[physical_host]
                    pscsi_HBA_table[physical_host] = pscsi_HBA_uuid
                else:
                    pscsi_HBA_uuid = uuid.createString()
                    pscsi_HBA_table[physical_host] = pscsi_HBA_uuid

                if saved_HBA_uuid is not None and \
                   saved_HBA_uuid != pscsi_HBA_uuid:
                    log.debug('The PSCSI(%s) host number was changed', scsi_id)
                pscsi_record['HBA'] = pscsi_HBA_uuid
                pscsi_table[pscsi_uuid] = pscsi_record

        for pscsi_uuid, pscsi_record in pscsi_table.items():
            XendPSCSI(pscsi_uuid, pscsi_record)

        for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items():
            XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host})
Example #18
0
def free(required):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  No matter where
    # we expect the free memory to come from, therefore, we need to wait for
    # it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_min_mem
    # is 0, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    need_mem = (required + 1023) / 1024 + BALLOON_OUT_SLACK

    xroot = XendRoot.instance()
    xc = xen.lowlevel.xc.xc()

    try:
        dom0_min_mem = xroot.get_dom0_min_mem()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        last_new_alloc = None
        while retries < RETRY_LIMIT:
            free_mem = xc.physinfo()['free_memory']

            if free_mem >= need_mem:
                log.debug("Balloon: free %d; need %d; done.", free_mem,
                          need_mem)
                return

            if retries == 0:
                log.debug("Balloon: free %d; need %d.", free_mem, need_mem)

            if dom0_min_mem > 0:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem)

                if (new_alloc >= dom0_min_mem and new_alloc != last_new_alloc):
                    log.debug("Balloon: setting dom0 target to %d.", new_alloc)
                    dom0 = XendDomain.instance().privilegedDomain()
                    dom0.setMemoryTarget(new_alloc)
                    last_new_alloc = new_alloc
                    # Continue to retry, waiting for ballooning.

            time.sleep(sleep_time)
            retries += 1
            sleep_time += SLEEP_TIME_GROWTH

        # Not enough memory; diagnose the problem.
        if dom0_min_mem == 0:
            raise VmError(('Not enough free memory and dom0_min_mem is 0, so '
                           'I cannot release any more.  I need %d MiB but '
                           'only have %d.') % (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ('I need %d MiB, but dom0_min_mem is %d and shrinking to '
                 '%d MiB would leave only %d MiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + dom0_alloc - dom0_min_mem))
        else:
            raise VmError('The privileged domain did not balloon!')

    finally:
        del xc
Example #19
0
 def get_PIF_refs(self):
     log.debug(XendPIF.get_all())
     return XendPIF.get_all()
Example #20
0
 def remove_network(self, interface):
     # TODO
     log.debug("remove_network(): Not implemented.")
Example #21
0
 def add_network(self, interface):
     # TODO
     log.debug("add_network(): Not implemented.")
Example #22
0
def free(required):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  No matter where
    # we expect the free memory to come from, therefore, we need to wait for
    # it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_min_mem
    # is 0, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    need_mem = (required + 1023) / 1024 + BALLOON_OUT_SLACK

    xroot = XendRoot.instance()
    xc = xen.lowlevel.xc.xc()

    try:
        dom0_min_mem = xroot.get_dom0_min_mem()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        last_new_alloc = None
        while retries < RETRY_LIMIT:
            free_mem = xc.physinfo()['free_memory']

            if free_mem >= need_mem:
                log.debug("Balloon: free %d; need %d; done.", free_mem,
                          need_mem)
                return

            if retries == 0:
                log.debug("Balloon: free %d; need %d.", free_mem, need_mem)

            if dom0_min_mem > 0:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem)

                if (new_alloc >= dom0_min_mem and
                    new_alloc != last_new_alloc):
                    log.debug("Balloon: setting dom0 target to %d.",
                              new_alloc)
                    dom0 = XendDomain.instance().privilegedDomain()
                    dom0.setMemoryTarget(new_alloc)
                    last_new_alloc = new_alloc
                    # Continue to retry, waiting for ballooning.

            time.sleep(sleep_time)
            retries += 1
            sleep_time += SLEEP_TIME_GROWTH

        # Not enough memory; diagnose the problem.
        if dom0_min_mem == 0:
            raise VmError(('Not enough free memory and dom0_min_mem is 0, so '
                           'I cannot release any more.  I need %d MiB but '
                           'only have %d.') %
                          (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ('I need %d MiB, but dom0_min_mem is %d and shrinking to '
                 '%d MiB would leave only %d MiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + dom0_alloc - dom0_min_mem))
        else:
            raise VmError('The privileged domain did not balloon!')

    finally:
        del xc
Example #23
0
def free(need_mem):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_min_mem
    # is 0, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()
    dom0_start_alloc_mb = get_dom0_current_alloc() / 1024

    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo['free_memory']
        scrub_mem = physinfo['scrub_memory']
        total_mem = physinfo['total_memory']
        if dom0_min_mem > 0:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

            if free_mem >= need_mem:
                log.debug("Balloon: %d KiB free; need %d; done.", free_mem,
                          need_mem)
                return

            if retries == 0:
                rlimit += (
                    (need_mem - free_mem) / 1024 / 1024) * RETRY_LIMIT_INCR
                log.debug(
                    "Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
                    free_mem, scrub_mem, need_mem, rlimit)

            if dom0_min_mem > 0:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)

                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if (new_alloc >= dom0_min_mem
                            and new_alloc != last_new_alloc):
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.",
                                  new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if dom0_min_mem == 0:
            raise VmError(('Not enough free memory and dom0_min_mem is 0, so '
                           'I cannot release any more.  I need %d KiB but '
                           'only have %d.') % (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ('I need %d KiB, but dom0_min_mem is %d and shrinking to '
                 '%d KiB would leave only %d KiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
        else:
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            raise VmError(('Not enough memory is available, and dom0 cannot'
                           ' be shrunk any further'))

    finally:
        del xc
Example #24
0
        args = [blexec]
        if kernel:
            args.append("--kernel=%s" % kernel)
        if ramdisk:
            args.append("--ramdisk=%s" % ramdisk)
        if kernel_args:
            args.append("--args=%s" % kernel_args)
        if quiet:
            args.append("-q")
        args.append("--output=%s" % fifo)
        if blargs:
            args.extend(shlex.split(blargs))
        args.append(disk)

        try:
            log.debug("Launching bootloader as %s." % str(args))
            env = os.environ.copy()
            env['TERM'] = 'vt100'
            oshelp.close_fds()
            os.execvpe(args[0], args, env)
        except OSError, e:
            print e
            pass
        os._exit(1)

    # record that this domain is bootloading
    dom.bootloader_pid = child

    # On Solaris, the master pty side does not have terminal semantics,
    # so don't try to set any attributes, as it will fail.
    if os.uname()[0] != 'SunOS':
Example #25
0
    def __init__(self):
        """Initalises the state of all host specific objects such as

        * host
        * host_CPU
        * host_metrics
        * PIF
        * PIF_metrics
        * network
        * Storage Repository
        * PPCI
        """

        self.xc = xen.lowlevel.xc.xc()
        self.state_store = XendStateStore(xendoptions().get_xend_state_path())
        self.monitor = XendMonitor()
        self.monitor.start()

        # load host state from XML file
        saved_host = self.state_store.load_state('host')
        if saved_host and len(saved_host.keys()) == 1:
            self.uuid = saved_host.keys()[0]
            host = saved_host[self.uuid]
            self.name = host.get('name_label', socket.gethostname())
            self.desc = host.get('name_description', '')
            self.host_metrics_uuid = host.get('metrics_uuid',
                                              uuid.createString())
            try:
                self.other_config = eval(host['other_config'])
            except:
                self.other_config = {}
            self.cpus = {}
        else:
            self.uuid = uuid.createString()
            self.name = socket.gethostname()
            self.desc = ''
            self.other_config = {}
            self.cpus = {}
            self.host_metrics_uuid = uuid.createString()

        # put some arbitrary params in other_config as this
        # is directly exposed via XenAPI
        self.other_config["xen_pagesize"] = self.xeninfo_dict()["xen_pagesize"]
        self.other_config["platform_params"] = self.xeninfo_dict(
        )["platform_params"]

        # load CPU UUIDs
        saved_cpus = self.state_store.load_state('cpu')
        for cpu_uuid, cpu in saved_cpus.items():
            self.cpus[cpu_uuid] = cpu

        cpuinfo = osdep.get_cpuinfo()
        physinfo = self.physinfo_dict()
        cpu_count = physinfo['nr_cpus']
        cpu_features = physinfo['hw_caps']
        virt_caps = physinfo['virt_caps']

        # If the number of CPUs don't match, we should just reinitialise
        # the CPU UUIDs.
        if cpu_count != len(self.cpus):
            self.cpus = {}
            for i in range(cpu_count):
                u = uuid.createString()
                self.cpus[u] = {'uuid': u, 'number': i}

        for u in self.cpus.keys():
            number = self.cpus[u]['number']
            # We can run off the end of the cpuinfo list if domain0 does not
            # have #vcpus == #pcpus. In that case we just replicate one that's
            # in the hash table.
            if not cpuinfo.has_key(number):
                number = cpuinfo.keys()[0]
            if arch.type == "x86":
                self.cpus[u].update({
                    'host':
                    self.uuid,
                    'features':
                    cpu_features,
                    'virt_caps':
                    virt_caps,
                    'speed':
                    int(float(cpuinfo[number]['cpu MHz'])),
                    'vendor':
                    cpuinfo[number]['vendor_id'],
                    'modelname':
                    cpuinfo[number]['model name'],
                    'stepping':
                    cpuinfo[number]['stepping'],
                    'flags':
                    cpuinfo[number]['flags'],
                })
            elif arch.type == "ia64":
                self.cpus[u].update({
                    'host':
                    self.uuid,
                    'features':
                    cpu_features,
                    'speed':
                    int(float(cpuinfo[number]['cpu MHz'])),
                    'vendor':
                    cpuinfo[number]['vendor'],
                    'modelname':
                    cpuinfo[number]['family'],
                    'stepping':
                    cpuinfo[number]['model'],
                    'flags':
                    cpuinfo[number]['features'],
                })
            else:
                self.cpus[u].update({
                    'host': self.uuid,
                    'features': cpu_features,
                })

        self.srs = {}

        # Initialise networks
        # First configure ones off disk
        saved_networks = self.state_store.load_state('network')
        if saved_networks:
            for net_uuid, network in saved_networks.items():
                try:
                    XendNetwork.recreate(network, net_uuid)
                except CreateUnspecifiedAttributeError:
                    log.warn("Error recreating network %s", net_uuid)

        # Next discover any existing bridges and check
        # they are not already configured
        bridges = Brctl.get_state().keys()
        configured_bridges = [
            XendAPIStore.get(network_uuid, "network").get_name_label()
            for network_uuid in XendNetwork.get_all()
        ]
        unconfigured_bridges = [
            bridge for bridge in bridges if bridge not in configured_bridges
        ]
        for unconfigured_bridge in unconfigured_bridges:
            XendNetwork.create_phy(unconfigured_bridge)

        # Initialise PIFs
        # First configure ones off disk
        saved_pifs = self.state_store.load_state('pif')
        if saved_pifs:
            for pif_uuid, pif in saved_pifs.items():
                try:
                    XendPIF.recreate(pif, pif_uuid)
                except CreateUnspecifiedAttributeError:
                    log.warn("Error recreating PIF %s", pif_uuid)

        # Next discover any existing PIFs and check
        # they are not already configured
        configured_pifs = [
            XendAPIStore.get(pif_uuid, "PIF").get_interface_name()
            for pif_uuid in XendPIF.get_all()
        ]
        unconfigured_pifs = [(name, mtu, mac)
                             for name, mtu, mac in linux_get_phy_ifaces()
                             if name not in configured_pifs]

        # Get a mapping from interface to bridge
        if_to_br = dict([(i, b) for (b, ifs) in Brctl.get_state().items()
                         for i in ifs])

        for name, mtu, mac in unconfigured_pifs:
            # Check PIF is on bridge
            # if not, ignore
            bridge_name = if_to_br.get(name, None)
            if bridge_name is not None:
                # Translate bridge name to network uuid
                for network_uuid in XendNetwork.get_all():
                    network = XendAPIStore.get(network_uuid, 'network')
                    if network.get_name_label() == bridge_name:
                        XendPIF.create_phy(network_uuid, name, mac, mtu)
                        break
                else:
                    log.debug(
                        "Cannot find network for bridge %s "
                        "when configuring PIF %s", (bridge_name, name))

        # initialise storage
        saved_srs = self.state_store.load_state('sr')
        if saved_srs:
            for sr_uuid, sr_cfg in saved_srs.items():
                if sr_cfg['type'] == 'qcow_file':
                    self.srs[sr_uuid] = XendQCoWStorageRepo(sr_uuid)
                elif sr_cfg['type'] == 'local':
                    self.srs[sr_uuid] = XendLocalStorageRepo(sr_uuid)

        # Create missing SRs if they don't exist
        if not self.get_sr_by_type('local'):
            image_sr_uuid = uuid.createString()
            self.srs[image_sr_uuid] = XendLocalStorageRepo(image_sr_uuid)

        if not self.get_sr_by_type('qcow_file'):
            qcow_sr_uuid = uuid.createString()
            self.srs[qcow_sr_uuid] = XendQCoWStorageRepo(qcow_sr_uuid)

        saved_pbds = self.state_store.load_state('pbd')
        if saved_pbds:
            for pbd_uuid, pbd_cfg in saved_pbds.items():
                try:
                    XendPBD.recreate(pbd_uuid, pbd_cfg)
                except CreateUnspecifiedAttributeError:
                    log.warn("Error recreating PBD %s", pbd_uuid)

        # Initialise PPCIs
        saved_ppcis = self.state_store.load_state('ppci')
        saved_ppci_table = {}
        if saved_ppcis:
            for ppci_uuid, ppci_record in saved_ppcis.items():
                try:
                    saved_ppci_table[ppci_record['name']] = ppci_uuid
                except KeyError:
                    pass

        for pci_dev in PciUtil.get_all_pci_devices():
            ppci_record = {
                'domain': pci_dev.domain,
                'bus': pci_dev.bus,
                'slot': pci_dev.slot,
                'func': pci_dev.func,
                'vendor_id': pci_dev.vendor,
                'vendor_name': pci_dev.vendorname,
                'device_id': pci_dev.device,
                'device_name': pci_dev.devicename,
                'revision_id': pci_dev.revision,
                'class_code': pci_dev.classcode,
                'class_name': pci_dev.classname,
                'subsystem_vendor_id': pci_dev.subvendor,
                'subsystem_vendor_name': pci_dev.subvendorname,
                'subsystem_id': pci_dev.subdevice,
                'subsystem_name': pci_dev.subdevicename,
                'driver': pci_dev.driver
            }
            # If saved uuid exists, use it. Otherwise create one.
            ppci_uuid = saved_ppci_table.get(pci_dev.name, uuid.createString())
            XendPPCI(ppci_uuid, ppci_record)
Example #26
0
def free(need_mem, dominfo):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_ballooning
    # is False, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()

    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT

        # stop tmem from absorbing any more memory (must THAW when done!)
        xc.tmem_control(0,TMEMC_FREEZE,-1, 0, 0, "")

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo['free_memory']
        scrub_mem = physinfo['scrub_memory']
        total_mem = physinfo['total_memory']
        if dom0_ballooning:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        freeable_mem = free_mem + scrub_mem
        if freeable_mem < need_mem and need_mem < max_free_mem:
            # flush memory from tmem to scrub_mem and reobtain physinfo
            need_tmem_kb = need_mem - freeable_mem
            tmem_kb = xc.tmem_control(0,TMEMC_FLUSH,-1, need_tmem_kb, 0, "")
            log.debug("Balloon: tmem relinquished %d KiB of %d KiB requested.",
                      tmem_kb, need_tmem_kb)
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

        # Check whethercurrent machine is a numa system and the new 
        # created hvm has all its vcpus in the same node, if all the 
        # conditions above are fit. We will wait until all the pages 
        # in scrub list are freed (if waiting time go beyond 20s, 
        # we will stop waiting it.)
        if physinfo['nr_nodes'] > 1 and retries == 0:
            oldnode = -1
            waitscrub = 1
            vcpus = dominfo.info['cpus'][0]
            for vcpu in vcpus:
                nodenum = 0
                for node in physinfo['node_to_cpu']:
                    for cpu in node:
                        if vcpu == cpu:
                            if oldnode == -1:
                                oldnode = nodenum
                            elif oldnode != nodenum:
                                waitscrub = 0
                    nodenum = nodenum + 1

            if waitscrub == 1 and scrub_mem > 0:
                log.debug("wait for scrub %s", scrub_mem)
                while scrub_mem > 0 and retries < rlimit:
                    time.sleep(sleep_time)
                    physinfo = xc.physinfo()
                    free_mem = physinfo['free_memory']
                    scrub_mem = physinfo['scrub_memory']
                    retries += 1
                    sleep_time += SLEEP_TIME_GROWTH
                log.debug("scrub for %d times", retries)

            retries = 0
            sleep_time = SLEEP_TIME_GROWTH

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

            if free_mem >= need_mem:
                log.debug("Balloon: %d KiB free; need %d; done.",
                          free_mem, need_mem)
                return

            if retries == 0:
                rlimit += ((need_mem - free_mem)/1024/1024) * RETRY_LIMIT_INCR
                log.debug("Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
                          free_mem, scrub_mem, need_mem, rlimit)

            if dom0_ballooning:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)

                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if (new_alloc >= dom0_min_mem and
                        new_alloc != last_new_alloc):
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.",
                                  new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if not dom0_ballooning:
            raise VmError(('Not enough free memory and enable-dom0-ballooning '
                           'is False, so I cannot release any more.  '
                           'I need %d KiB but only have %d.') %
                          (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ('I need %d KiB, but dom0_min_mem is %d and shrinking to '
                 '%d KiB would leave only %d KiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
        else:
            dom0_start_alloc_mb = get_dom0_current_alloc() / 1024
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            raise VmError(
                ('Not enough memory is available, and dom0 cannot'
                 ' be shrunk any further'))

    finally:
        # allow tmem to accept pages again
        xc.tmem_control(0,TMEMC_THAW,-1, 0, 0, "")
        del xc
Example #27
0
    def _init_PSCSIs(self):
        # Initialise PSCSIs and PSCSI_HBAs
        saved_pscsis = self.state_store.load_state('pscsi')
        saved_pscsi_table = {}
        if saved_pscsis:
            for pscsi_uuid, pscsi_record in saved_pscsis.items():
                try:
                    saved_pscsi_table[pscsi_record['scsi_id']] = pscsi_uuid
                except KeyError:
                    pass

        saved_pscsi_HBAs = self.state_store.load_state('pscsi_HBA')
        saved_pscsi_HBA_table = {}
        if saved_pscsi_HBAs:
            for pscsi_HBA_uuid, pscsi_HBA_record in saved_pscsi_HBAs.items():
                try:
                    physical_host = int(pscsi_HBA_record['physical_host'])
                    saved_pscsi_HBA_table[physical_host] = pscsi_HBA_uuid
                except (KeyError, ValueError):
                    pass

        pscsi_table = {}
        pscsi_HBA_table = {}

        pscsi_records = []
        for pscsi_mask in xendoptions().get_pscsi_device_mask():
            pscsi_records += vscsi_util.get_all_scsi_devices(pscsi_mask)
        log.debug("pscsi record count: %s" % len(pscsi_records))

        for pscsi_record in pscsi_records:
            scsi_id = pscsi_record['scsi_id']
            if scsi_id:
                saved_HBA_uuid = None

                pscsi_uuid = saved_pscsi_table.get(scsi_id, None)
                if pscsi_uuid is None:
                    pscsi_uuid = uuid.createString()
                    saved_pscsi_table[scsi_id] = pscsi_uuid
                else:
                    try:
                        saved_HBA_uuid = saved_pscsis[pscsi_uuid].get(
                            'HBA', None)
                    except KeyError:
                        log.warn(
                            "Multi-path SCSI devices are not supported for XenAPI"
                        )
                        return

                physical_host = int(
                    pscsi_record['physical_HCTL'].split(':')[0])
                if pscsi_HBA_table.has_key(physical_host):
                    pscsi_HBA_uuid = pscsi_HBA_table[physical_host]
                elif saved_pscsi_HBA_table.has_key(physical_host):
                    pscsi_HBA_uuid = saved_pscsi_HBA_table[physical_host]
                    pscsi_HBA_table[physical_host] = pscsi_HBA_uuid
                else:
                    pscsi_HBA_uuid = uuid.createString()
                    pscsi_HBA_table[physical_host] = pscsi_HBA_uuid

                if saved_HBA_uuid is not None and \
                   saved_HBA_uuid != pscsi_HBA_uuid:
                    log.debug('The PSCSI(%s) host number was changed', scsi_id)
                pscsi_record['HBA'] = pscsi_HBA_uuid
                pscsi_table[pscsi_uuid] = pscsi_record

        for pscsi_uuid, pscsi_record in pscsi_table.items():
            XendPSCSI(pscsi_uuid, pscsi_record)

        for physical_host, pscsi_HBA_uuid in pscsi_HBA_table.items():
            XendPSCSI_HBA(pscsi_HBA_uuid, {'physical_host': physical_host})
Example #28
0
 def add_network(self, interface):
     # TODO
     log.debug("add_network(): Not implemented.")
Example #29
0
 def get_PIF_refs(self):
     log.debug(XendPIF.get_all())
     return XendPIF.get_all()
Example #30
0
 def remove_usbdev(self, busid):
     log.debug("remove_usbdev(): Not implemented.")
Example #31
0
def free(need_mem, dominfo):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_ballooning
    # is False, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()

    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo["free_memory"]
        scrub_mem = physinfo["scrub_memory"]
        total_mem = physinfo["total_memory"]
        if dom0_ballooning:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        # Check whethercurrent machine is a numa system and the new
        # created hvm has all its vcpus in the same node, if all the
        # conditions above are fit. We will wait until all the pages
        # in scrub list are freed (if waiting time go beyond 20s,
        # we will stop waiting it.)
        if physinfo["nr_nodes"] > 1 and retries == 0:
            oldnode = -1
            waitscrub = 1
            vcpus = dominfo.info["cpus"][0]
            for vcpu in vcpus:
                nodenum = 0
                for node in physinfo["node_to_cpu"]:
                    for cpu in node:
                        if vcpu == cpu:
                            if oldnode == -1:
                                oldnode = nodenum
                            elif oldnode != nodenum:
                                waitscrub = 0
                    nodenum = nodenum + 1

            if waitscrub == 1 and scrub_mem > 0:
                log.debug("wait for scrub %s", scrub_mem)
                while scrub_mem > 0 and retries < rlimit:
                    time.sleep(sleep_time)
                    physinfo = xc.physinfo()
                    free_mem = physinfo["free_memory"]
                    scrub_mem = physinfo["scrub_memory"]
                    retries += 1
                    sleep_time += SLEEP_TIME_GROWTH
                log.debug("scrub for %d times", retries)

            retries = 0
            sleep_time = SLEEP_TIME_GROWTH

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo["free_memory"]
            scrub_mem = physinfo["scrub_memory"]

            if free_mem >= need_mem:
                log.debug("Balloon: %d KiB free; need %d; done.", free_mem, need_mem)
                return

            if retries == 0:
                rlimit += ((need_mem - free_mem) / 1024 / 1024) * RETRY_LIMIT_INCR
                log.debug(
                    "Balloon: %d KiB free; %d to scrub; need %d; retries: %d.", free_mem, scrub_mem, need_mem, rlimit
                )

            if dom0_ballooning:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)

                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if new_alloc >= dom0_min_mem and new_alloc != last_new_alloc:
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.", new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if not dom0_ballooning:
            raise VmError(
                (
                    "Not enough free memory and enable-dom0-ballooning "
                    "is False, so I cannot release any more.  "
                    "I need %d KiB but only have %d."
                )
                % (need_mem, free_mem)
            )
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ("I need %d KiB, but dom0_min_mem is %d and shrinking to " "%d KiB would leave only %d KiB free.")
                % (need_mem, dom0_min_mem, dom0_min_mem, free_mem + scrub_mem + dom0_alloc - dom0_min_mem)
            )
        else:
            dom0_start_alloc_mb = get_dom0_current_alloc() / 1024
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            raise VmError(("Not enough memory is available, and dom0 cannot" " be shrunk any further"))

    finally:
        del xc
Example #32
0
 def remove_network(self, interface):
     # TODO
     log.debug("remove_network(): Not implemented.")
Example #33
0
        args = [ blexec ]
        if kernel:
            args.append("--kernel=%s" % kernel)
        if ramdisk:
            args.append("--ramdisk=%s" % ramdisk)
        if kernel_args:
            args.append("--args=%s" % kernel_args)
        if quiet:
            args.append("-q")
        args.append("--output=%s" % fifo)
        if blargs:
            args.extend(shlex.split(blargs))
        args.append(disk)

        try:
            log.debug("Launching bootloader as %s." % str(args))
            env = os.environ.copy()
            env['TERM'] = 'vt100'
            oshelp.close_fds()
            os.execvpe(args[0], args, env)
        except OSError, e:
            print e
            pass
        os._exit(1)

    # record that this domain is bootloading
    dom.bootloader_pid = child

    # On Solaris, the master pty side does not have terminal semantics,
    # so don't try to set any attributes, as it will fail.
    if os.uname()[0] != 'SunOS':
Example #34
0
 def remove_usbdev(self, busid):
     log.debug("remove_usbdev(): Not implemented.")
Example #35
0
def free(need_mem, dominfo):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_ballooning
    # is False, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()
    memory_pool = MemoryPool.instance() 
    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        guest_size = 0
        hvm = dominfo.info.is_hvm()
        if memory_pool.is_enabled() and dominfo.domid:
            if not hvm :
                if need_mem <= 4 * 1024: 
                    guest_size = 32
                else:
                    guest_size = dominfo.image.getBitSize()
            if guest_size == 32:
                dom0_ballooning = 0
        else: #no ballooning as memory pool enabled
            dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT
        mem_need_balloon = 0
        left_memory_pool = 0
        mem_target = 0
        untouched_memory_pool = 0
        real_need_mem = need_mem

        # stop tmem from absorbing any more memory (must THAW when done!)
        xc.tmem_control(0,TMEMC_FREEZE,-1, 0, 0, 0, "")

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo['free_memory']
        scrub_mem = physinfo['scrub_memory']
        total_mem = physinfo['total_memory']
        if memory_pool.is_enabled() and dominfo.domid:
            if guest_size != 32 or hvm:
                if need_mem > 4 * 1024: 
                    dominfo.alloc_mem = need_mem
                left_memory_pool = memory_pool.get_left_memory()
                if need_mem > left_memory_pool:
                    dominfo.alloc_mem = 0
                    raise VmError(('Not enough free memory'
                                   ' so I cannot release any more.  '
                                   'I need %d KiB but only have %d in the pool.') %
                                   (need_mem, memory_pool.get_left_memory()))
                else:
                    untouched_memory_pool = memory_pool.get_untouched_memory()
                    if (left_memory_pool - untouched_memory_pool) > need_mem:
                        dom0_ballooning = 0
                    else:
                        mem_need_balloon = need_mem - left_memory_pool + untouched_memory_pool
                        need_mem = free_mem + scrub_mem + mem_need_balloon

        if dom0_ballooning:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        freeable_mem = free_mem + scrub_mem
        if freeable_mem < need_mem and need_mem < max_free_mem:
            # flush memory from tmem to scrub_mem and reobtain physinfo
            need_tmem_kb = need_mem - freeable_mem
            tmem_kb = xc.tmem_control(0,TMEMC_FLUSH,-1, need_tmem_kb, 0, 0, "")
            log.debug("Balloon: tmem relinquished %d KiB of %d KiB requested.",
                      tmem_kb, need_tmem_kb)
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']
            if free_mem >= need_mem:
                if (guest_size != 32 or hvm) and dominfo.domid:
                    memory_pool.decrease_untouched_memory(mem_need_balloon)
                    memory_pool.decrease_memory(real_need_mem)
                else:
                    log.debug("Balloon: %d KiB free; need %d; done.",
                              free_mem, need_mem)
                return

            if retries == 0:
                rlimit += ((need_mem - free_mem)/1024/1024) * RETRY_LIMIT_INCR
                log.debug("Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
                          free_mem, scrub_mem, need_mem, rlimit)

            if dom0_ballooning:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)
                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if (new_alloc >= dom0_min_mem and
                        new_alloc != last_new_alloc):
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.",
                                  new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if not dom0_ballooning:
            dominfo.alloc_mem = 0 
            raise VmError(('Not enough free memory and enable-dom0-ballooning '
                           'is False, so I cannot release any more.  '
                           'I need %d KiB but only have %d.') %
                          (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            dominfo.alloc_mem = 0 
            raise VmError(
                ('I need %d KiB, but dom0_min_mem is %d and shrinking to '
                 '%d KiB would leave only %d KiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
        else:
            dom0_start_alloc_mb = get_dom0_current_alloc() / 1024
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            dominfo.alloc_mem = 0 
            raise VmError(
                ('Not enough memory is available, and dom0 cannot'
                 ' be shrunk any further'))

    finally:
        # allow tmem to accept pages again
        xc.tmem_control(0,TMEMC_THAW,-1, 0, 0, 0, "")
        del xc
Example #36
0
def free(need_mem):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_min_mem
    # is 0, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()
    dom0_start_alloc_mb = get_dom0_current_alloc() / 1024

    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo['free_memory']
        scrub_mem = physinfo['scrub_memory']
        total_mem = physinfo['total_memory']
        if dom0_min_mem > 0:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

            if free_mem >= need_mem:
                log.debug("Balloon: %d KiB free; need %d; done.",
                          free_mem, need_mem)
                return

            if retries == 0:
                rlimit += ((need_mem - free_mem)/1024/1024) * RETRY_LIMIT_INCR
                log.debug("Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
                          free_mem, scrub_mem, need_mem, rlimit)

            if dom0_min_mem > 0:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)

                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if (new_alloc >= dom0_min_mem and
                        new_alloc != last_new_alloc):
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.",
                                  new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if dom0_min_mem == 0:
            raise VmError(('Not enough free memory and dom0_min_mem is 0, so '
                           'I cannot release any more.  I need %d KiB but '
                           'only have %d.') %
                          (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            raise VmError(
                ('I need %d KiB, but dom0_min_mem is %d and shrinking to '
                 '%d KiB would leave only %d KiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
        else:
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            raise VmError(
                ('Not enough memory is available, and dom0 cannot'
                 ' be shrunk any further'))

    finally:
        del xc
Example #37
0
def free(need_mem, dominfo):
    """Balloon out memory from the privileged domain so that there is the
    specified required amount (in KiB) free.
    """

    # We check whether there is enough free memory, and if not, instruct dom0
    # to balloon out to free some up.  Memory freed by a destroyed domain may
    # not appear in the free_memory field immediately, because it needs to be
    # scrubbed before it can be released to the free list, which is done
    # asynchronously by Xen; ballooning is asynchronous also.  Such memory
    # does, however, need to be accounted for when calculating how much dom0
    # needs to balloon.  No matter where we expect the free memory to come
    # from, we need to wait for it to become available.
    #
    # We are not allowed to balloon below dom0_min_mem, or if dom0_ballooning
    # is False, we cannot balloon at all.  Memory can still become available
    # through a rebooting domain, however.
    #
    # Eventually, we time out (presumably because there really isn't enough
    # free memory).
    #
    # We don't want to set the memory target (triggering a watch) when that
    # has already been done, but we do want to respond to changing memory
    # usage, so we recheck the required alloc each time around the loop, but
    # track the last used value so that we don't trigger too many watches.

    xoptions = XendOptions.instance()
    dom0 = XendDomain.instance().privilegedDomain()
    xc = xen.lowlevel.xc.xc()
    memory_pool = MemoryPool.instance()
    try:
        dom0_min_mem = xoptions.get_dom0_min_mem() * 1024
        dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        guest_size = 0
        hvm = dominfo.info.is_hvm()
        if memory_pool.is_enabled() and dominfo.domid:
            if not hvm:
                if need_mem <= 4 * 1024:
                    guest_size = 32
                else:
                    guest_size = dominfo.image.getBitSize()
            if guest_size == 32:
                dom0_ballooning = 0
        else:  #no ballooning as memory pool enabled
            dom0_ballooning = xoptions.get_enable_dom0_ballooning()
        dom0_alloc = get_dom0_current_alloc()

        retries = 0
        sleep_time = SLEEP_TIME_GROWTH
        new_alloc = 0
        last_new_alloc = None
        last_free = None
        rlimit = RETRY_LIMIT
        mem_need_balloon = 0
        left_memory_pool = 0
        mem_target = 0
        untouched_memory_pool = 0
        real_need_mem = need_mem

        # stop tmem from absorbing any more memory (must THAW when done!)
        xc.tmem_control(0, TMEMC_FREEZE, -1, 0, 0, 0, "")

        # If unreasonable memory size is required, we give up waiting
        # for ballooning or scrubbing, as if had retried.
        physinfo = xc.physinfo()
        free_mem = physinfo['free_memory']
        scrub_mem = physinfo['scrub_memory']
        total_mem = physinfo['total_memory']
        if memory_pool.is_enabled() and dominfo.domid:
            if guest_size != 32 or hvm:
                if need_mem > 4 * 1024:
                    dominfo.alloc_mem = need_mem
                left_memory_pool = memory_pool.get_left_memory()
                if need_mem > left_memory_pool:
                    dominfo.alloc_mem = 0
                    raise VmError(
                        ('Not enough free memory'
                         ' so I cannot release any more.  '
                         'I need %d KiB but only have %d in the pool.') %
                        (need_mem, memory_pool.get_left_memory()))
                else:
                    untouched_memory_pool = memory_pool.get_untouched_memory()
                    if (left_memory_pool - untouched_memory_pool) > need_mem:
                        dom0_ballooning = 0
                    else:
                        mem_need_balloon = need_mem - left_memory_pool + untouched_memory_pool
                        need_mem = free_mem + scrub_mem + mem_need_balloon

        if dom0_ballooning:
            max_free_mem = total_mem - dom0_min_mem
        else:
            max_free_mem = total_mem - dom0_alloc
        if need_mem >= max_free_mem:
            retries = rlimit

        freeable_mem = free_mem + scrub_mem
        if freeable_mem < need_mem and need_mem < max_free_mem:
            # flush memory from tmem to scrub_mem and reobtain physinfo
            need_tmem_kb = need_mem - freeable_mem
            tmem_kb = xc.tmem_control(0, TMEMC_FLUSH, -1, need_tmem_kb, 0, 0,
                                      "")
            log.debug("Balloon: tmem relinquished %d KiB of %d KiB requested.",
                      tmem_kb, need_tmem_kb)
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']

        while retries < rlimit:
            physinfo = xc.physinfo()
            free_mem = physinfo['free_memory']
            scrub_mem = physinfo['scrub_memory']
            if free_mem >= need_mem:
                if (guest_size != 32 or hvm) and dominfo.domid:
                    memory_pool.decrease_untouched_memory(mem_need_balloon)
                    memory_pool.decrease_memory(real_need_mem)
                else:
                    log.debug("Balloon: %d KiB free; need %d; done.", free_mem,
                              need_mem)
                return

            if retries == 0:
                rlimit += (
                    (need_mem - free_mem) / 1024 / 1024) * RETRY_LIMIT_INCR
                log.debug(
                    "Balloon: %d KiB free; %d to scrub; need %d; retries: %d.",
                    free_mem, scrub_mem, need_mem, rlimit)

            if dom0_ballooning:
                dom0_alloc = get_dom0_current_alloc()
                new_alloc = dom0_alloc - (need_mem - free_mem - scrub_mem)
                if free_mem + scrub_mem >= need_mem:
                    if last_new_alloc == None:
                        log.debug("Balloon: waiting on scrubbing")
                        last_new_alloc = dom0_alloc
                else:
                    if (new_alloc >= dom0_min_mem
                            and new_alloc != last_new_alloc):
                        new_alloc_mb = new_alloc / 1024  # Round down
                        log.debug("Balloon: setting dom0 target to %d MiB.",
                                  new_alloc_mb)
                        dom0.setMemoryTarget(new_alloc_mb)
                        last_new_alloc = new_alloc
                # Continue to retry, waiting for ballooning or scrubbing.

            time.sleep(sleep_time)
            if retries < 2 * RETRY_LIMIT:
                sleep_time += SLEEP_TIME_GROWTH
            if last_free != None and last_free >= free_mem + scrub_mem:
                retries += 1
            last_free = free_mem + scrub_mem

        # Not enough memory; diagnose the problem.
        if not dom0_ballooning:
            dominfo.alloc_mem = 0
            raise VmError(
                ('Not enough free memory and enable-dom0-ballooning '
                 'is False, so I cannot release any more.  '
                 'I need %d KiB but only have %d.') % (need_mem, free_mem))
        elif new_alloc < dom0_min_mem:
            dominfo.alloc_mem = 0
            raise VmError(
                ('I need %d KiB, but dom0_min_mem is %d and shrinking to '
                 '%d KiB would leave only %d KiB free.') %
                (need_mem, dom0_min_mem, dom0_min_mem,
                 free_mem + scrub_mem + dom0_alloc - dom0_min_mem))
        else:
            dom0_start_alloc_mb = get_dom0_current_alloc() / 1024
            dom0.setMemoryTarget(dom0_start_alloc_mb)
            dominfo.alloc_mem = 0
            raise VmError(('Not enough memory is available, and dom0 cannot'
                           ' be shrunk any further'))

    finally:
        # allow tmem to accept pages again
        xc.tmem_control(0, TMEMC_THAW, -1, 0, 0, 0, "")
        del xc