def save_cpu_pools(self):
     cpu_pool_records = dict([
         (cpu_pool_uuid,
          XendAPIStore.get(cpu_pool_uuid,
                           XendCPUPool.getClass()).get_record())
         for cpu_pool_uuid in XendCPUPool.get_all_managed()
     ])
     self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records)
Exemple #2
0
    def physinfo(self, show_numa):
        info = self.xc.physinfo()
        tinfo = self.xc.topologyinfo()
        ninfo = self.xc.numainfo()

        info['cpu_mhz'] = info['cpu_khz'] / 1000
        
        # physinfo is in KiB, need it in MiB
        info['total_memory'] = info['total_memory'] / 1024
        info['free_memory']  = info['free_memory'] / 1024
        info['free_cpus'] = len(XendCPUPool.unbound_cpus())

        ITEM_ORDER = ['nr_cpus',
                      'nr_nodes',
                      'cores_per_socket',
                      'threads_per_core',
                      'cpu_mhz',
                      'hw_caps',
                      'virt_caps',
                      'total_memory',
                      'free_memory',
                      'free_cpus',
                      ]

        if show_numa != 0:
            info['cpu_topology']  = \
                 self.format_cpu_to_core_socket_node(tinfo)

            info['numa_info']  = \
                 self.format_numa_info(ninfo)

            ITEM_ORDER += [ 'cpu_topology', 'numa_info' ]

        return [[k, info[k]] for k in ITEM_ORDER]
    def physinfo(self, show_numa):
        info = self.xc.physinfo()
        tinfo = self.xc.topologyinfo()
        ninfo = self.xc.numainfo()

        info['cpu_mhz'] = info['cpu_khz'] / 1000

        # physinfo is in KiB, need it in MiB
        info['total_memory'] = info['total_memory'] / 1024
        info['free_memory'] = info['free_memory'] / 1024
        info['free_cpus'] = len(XendCPUPool.unbound_cpus())

        ITEM_ORDER = [
            'nr_cpus',
            'nr_nodes',
            'cores_per_socket',
            'threads_per_core',
            'cpu_mhz',
            'hw_caps',
            'virt_caps',
            'total_memory',
            'free_memory',
            'free_cpus',
        ]

        if show_numa != 0:
            info['cpu_topology']  = \
                 self.format_cpu_to_core_socket_node(tinfo)

            info['numa_info']  = \
                 self.format_numa_info(ninfo)

            ITEM_ORDER += ['cpu_topology', 'numa_info']

        return [[k, info[k]] for k in ITEM_ORDER]
Exemple #4
0
 def _init_cpu_pools(self):
     # Initialise cpu_pools
     saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass())
     if saved_cpu_pools:
         for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items():
             try:
                 XendCPUPool.recreate(cpu_pool, cpu_pool_uuid)
             except CreateUnspecifiedAttributeError:
                 log.warn("Error recreating %s %s",
                          (XendCPUPool.getClass(), cpu_pool_uuid))
     XendCPUPool.recreate_active_pools()
 def _init_cpu_pools(self):
     # Initialise cpu_pools
     saved_cpu_pools = self.state_store.load_state(XendCPUPool.getClass())
     if saved_cpu_pools:
         for cpu_pool_uuid, cpu_pool in saved_cpu_pools.items():
             try:
                 XendCPUPool.recreate(cpu_pool, cpu_pool_uuid)
             except CreateUnspecifiedAttributeError:
                 log.warn("Error recreating %s %s",
                          (XendCPUPool.getClass(), cpu_pool_uuid))
     XendCPUPool.recreate_active_pools()
Exemple #6
0
 def save_cpu_pools(self):
     cpu_pool_records = dict([(cpu_pool_uuid, XendAPIStore.get(
                 cpu_pool_uuid, XendCPUPool.getClass()).get_record())
                 for cpu_pool_uuid in XendCPUPool.get_all_managed()])
     self.state_store.save_state(XendCPUPool.getClass(), cpu_pool_records)
Exemple #7
0
    def start(self, status):
        # Running the network script will spawn another process, which takes
        # the status fd with it unless we set FD_CLOEXEC.  Failing to do this
        # causes the read in SrvDaemon to hang even when we have written here.
        if status:
            fcntl.fcntl(status, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

        # Prepare to catch SIGTERM (received when 'xend stop' is executed)
        # and call each server's cleanup if possible
        signal.signal(signal.SIGTERM, self.cleanup)
        signal.signal(signal.SIGHUP, self.reloadConfig)

        while True:
            threads = []
            for server in self.servers:
                if server.ready:
                    continue

                thread = Thread(target=server.run,
                                name=server.__class__.__name__)
                thread.setDaemon(True)
                thread.start()
                threads.append(thread)

            # check for when all threads have initialized themselves and then
            # close the status pipe

            retryCount = 0
            threads_left = True
            while threads_left:
                threads_left = False

                for server in self.servers:
                    if not server.ready:
                        threads_left = True
                        break

                if threads_left:
                    time.sleep(.5)
                    retryCount += 1
                    if retryCount > 60:
                        for server in self.servers:
                            if not server.ready:
                                log.error("Server " +
                                          server.__class__.__name__ +
                                          " did not initialise!")
                        break

            if status:
                status.write('0')
                status.close()
                status = None

            # auto start pools before domains are started
            try:
                XendCPUPool.autostart_pools()
            except Exception, e:
                log.exception("Failed while autostarting pools")

            # Reaching this point means we can auto start domains
            try:
                xenddomain().autostart_domains()
            except Exception, e:
                log.exception("Failed while autostarting domains")
Exemple #8
0
    def start(self, status):
        # Running the network script will spawn another process, which takes
        # the status fd with it unless we set FD_CLOEXEC.  Failing to do this
        # causes the read in SrvDaemon to hang even when we have written here.
        if status:
            fcntl.fcntl(status, fcntl.F_SETFD, fcntl.FD_CLOEXEC)

        # Prepare to catch SIGTERM (received when 'xend stop' is executed)
        # and call each server's cleanup if possible
        signal.signal(signal.SIGTERM, self.cleanup)
        signal.signal(signal.SIGHUP, self.reloadConfig)

        while True:
            threads = []
            for server in self.servers:
                if server.ready:
                    continue

                thread = Thread(target=server.run, name=server.__class__.__name__)
                thread.setDaemon(True)
                thread.start()
                threads.append(thread)

            # check for when all threads have initialized themselves and then
            # close the status pipe

            retryCount = 0
            threads_left = True
            while threads_left:
                threads_left = False

                for server in self.servers:
                    if not server.ready:
                        threads_left = True
                        break

                if threads_left:
                    time.sleep(0.5)
                    retryCount += 1
                    if retryCount > 60:
                        for server in self.servers:
                            if not server.ready:
                                log.error("Server " + server.__class__.__name__ + " did not initialise!")
                        break

            if status:
                status.write("0")
                status.close()
                status = None

            # auto start pools before domains are started
            try:
                XendCPUPool.autostart_pools()
            except Exception, e:
                log.exception("Failed while autostarting pools")

            # Reaching this point means we can auto start domains
            try:
                xenddomain().autostart_domains()
            except Exception, e:
                log.exception("Failed while autostarting domains")
Exemple #9
0
    def physinfo(self, show_numa):
        info = self.xc.physinfo()
        tinfo = self.xc.topologyinfo()
        ninfo = self.xc.numainfo()

        info['cpu_mhz'] = info['cpu_khz'] / 1000

        configured_floor = xendoptions().get_dom0_min_mem() * 1024
        from xen.xend import balloon
        try:
            kernel_floor = balloon.get_dom0_min_target()
        except:
            kernel_floor = 0
        dom0_min_mem = max(configured_floor, kernel_floor)
        dom0_mem = balloon.get_dom0_current_alloc()
        extra_mem = 0
        if dom0_min_mem > 0 and dom0_mem > dom0_min_mem:
            extra_mem = dom0_mem - dom0_min_mem
        info['free_memory'] = info['free_memory'] + info['scrub_memory']
        info['max_free_memory'] = info['free_memory'] + extra_mem
        info['free_cpus'] = len(XendCPUPool.unbound_cpus())

        # Convert KiB to MiB, rounding down to be conservative
        info['total_memory'] = info['total_memory'] / 1024
        info['free_memory'] = info['free_memory'] / 1024
        info['max_free_memory'] = info['max_free_memory'] / 1024

        # FIXME:  These are hard-coded to be the inverse of the getXenMemory
        #         functions in image.py.  Find a cleaner way.
        info['max_para_memory'] = info['max_free_memory'] - 4
        if info['max_para_memory'] < 0:
            info['max_para_memory'] = 0
        info['max_hvm_memory'] = int(
            (info['max_free_memory'] - 12) * (1 - 2.4 / 1024))
        if info['max_hvm_memory'] < 0:
            info['max_hvm_memory'] = 0

        ITEM_ORDER = [
            'nr_cpus',
            'nr_nodes',
            'cores_per_socket',
            'threads_per_core',
            'cpu_mhz',
            'hw_caps',
            'virt_caps',
            'total_memory',
            'free_memory',
            'free_cpus',
            'max_free_memory',
            'max_para_memory',
            'max_hvm_memory',
        ]

        if show_numa != 0:
            info['cpu_topology']  = \
                 self.format_cpu_to_core_socket_node(tinfo)

            info['numa_info']  = \
                 self.format_numa_info(ninfo)

            ITEM_ORDER += ['cpu_topology', 'numa_info']

        return [[k, info[k]] for k in ITEM_ORDER]
Exemple #10
0
    def physinfo(self, show_numa):
        info = self.xc.physinfo()
        tinfo = self.xc.topologyinfo()
        ninfo = self.xc.numainfo()

        info['cpu_mhz'] = info['cpu_khz'] / 1000
        
        configured_floor = xendoptions().get_dom0_min_mem() * 1024
        from xen.xend import balloon
        try:
            kernel_floor = balloon.get_dom0_min_target()
        except:
            kernel_floor = 0
        dom0_min_mem = max(configured_floor, kernel_floor)
        dom0_mem = balloon.get_dom0_current_alloc()
        extra_mem = 0
        if dom0_min_mem > 0 and dom0_mem > dom0_min_mem:
            extra_mem = dom0_mem - dom0_min_mem
        info['free_memory']     = info['free_memory'] + info['scrub_memory']
        info['max_free_memory'] = info['free_memory'] + extra_mem
        info['free_cpus'] = len(XendCPUPool.unbound_cpus())

        # Convert KiB to MiB, rounding down to be conservative
        info['total_memory']    = info['total_memory'] / 1024
        info['free_memory']     = info['free_memory'] / 1024
        info['max_free_memory'] = info['max_free_memory'] / 1024

        # FIXME:  These are hard-coded to be the inverse of the getXenMemory
        #         functions in image.py.  Find a cleaner way.
        info['max_para_memory'] = info['max_free_memory'] - 4
        if info['max_para_memory'] < 0:
            info['max_para_memory'] = 0
        info['max_hvm_memory'] = int((info['max_free_memory']-12) * (1-2.4/1024))
        if info['max_hvm_memory'] < 0:
            info['max_hvm_memory'] = 0

        ITEM_ORDER = ['nr_cpus',
                      'nr_nodes',
                      'cores_per_socket',
                      'threads_per_core',
                      'cpu_mhz',
                      'hw_caps',
                      'virt_caps',
                      'total_memory',
                      'free_memory',
                      'free_cpus',
                      'max_free_memory',
                      'max_para_memory',
                      'max_hvm_memory',
                      ]

        if show_numa != 0:
            info['cpu_topology']  = \
                 self.format_cpu_to_core_socket_node(tinfo)

            info['numa_info']  = \
                 self.format_numa_info(ninfo)

            ITEM_ORDER += [ 'cpu_topology', 'numa_info' ]

        return [[k, info[k]] for k in ITEM_ORDER]