示例#1
0
    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id
            hostname = host.name

            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR,
                              OpenNebulaHost.STATE_DISABLED):
                continue

            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug(
                    "Skipping node '%s' (monitoring information not yet available)"
                    % hostname)
                continue

            self.max_nod_id += 1

            nod_id = self.max_nod_id
            capacity = Capacity(
                [constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])

            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i + 1, 100)

            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)

            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" %
                              (node.id, node.hostname, node.capacity))
        return new_nodes
    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea            
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id                
            hostname = host.name
            
            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR, OpenNebulaHost.STATE_DISABLED):
                continue
            
            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug("Skipping node '%s' (monitoring information not yet available)" % hostname)
                continue
            
            self.max_nod_id += 1
            
            nod_id = self.max_nod_id
            capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
            
            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i+1, 100)            
            
            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)
            
            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" % (node.id, node.hostname, node.capacity))
        return new_nodes
示例#3
0
    def test_resource_tuple(self):
        
        multiinst = [(constants.RES_CPU,ResourceTuple.MULTI_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)]
        
        self.slottable = SlotTable(multiinst)
                
        c1_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c1_100.set_quantity(constants.RES_CPU, 100)
        c1_100.set_quantity(constants.RES_MEM, 1024)
        c1_100 = self.slottable.create_resource_tuple_from_capacity(c1_100)

        c2_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c2_100.set_ninstances(constants.RES_CPU, 2)
        c2_100.set_quantity_instance(constants.RES_CPU, 1, 100)
        c2_100.set_quantity_instance(constants.RES_CPU, 2, 100)
        c2_100.set_quantity(constants.RES_MEM, 1024)
        c2_100 = self.slottable.create_resource_tuple_from_capacity(c2_100)

        c1_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c1_50.set_quantity(constants.RES_CPU, 50)
        c1_50.set_quantity(constants.RES_MEM, 1024)
        c1_50 = self.slottable.create_resource_tuple_from_capacity(c1_50)

        c2_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c2_50.set_ninstances(constants.RES_CPU, 2)
        c2_50.set_quantity_instance(constants.RES_CPU, 1, 50)
        c2_50.set_quantity_instance(constants.RES_CPU, 2, 50)
        c2_50.set_quantity(constants.RES_MEM, 1024)
        c2_50 = self.slottable.create_resource_tuple_from_capacity(c2_50)

        assert c1_100.fits_in(c2_100)
        assert not c1_100.fits_in(c1_50)
        assert not c1_100.fits_in(c2_50)

        assert not c2_100.fits_in(c1_100)
        assert not c2_100.fits_in(c1_50)
        assert not c2_100.fits_in(c2_50)

        assert c1_50.fits_in(c1_100)
        assert c1_50.fits_in(c2_100)
        assert c1_50.fits_in(c2_50)

        assert c2_50.fits_in(c1_100)
        assert c2_50.fits_in(c2_100)
        assert not c2_50.fits_in(c1_50)

        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c2_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100,100]
        
        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c1_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100]
        empty.incr(c1_100)
        assert empty._single_instance[0] == 2048
        assert empty._multi_instance[1] == [100,100]

        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c1_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100]
        empty.incr(c1_50)
        assert empty._single_instance[0] == 2048
        assert empty._multi_instance[1] == [100,50]
   
        c1_100a = ResourceTuple.copy(c1_100)
        c1_100a.decr(c1_50)
        assert c1_100a._single_instance[0] == 0
        assert c1_100a._multi_instance[1] == [50]

        c2_100a = ResourceTuple.copy(c2_100)
        c2_100a._single_instance[0] = 2048
        c2_100a.decr(c1_50)
        
        assert c2_100a._single_instance[0] == 1024
        assert c2_100a._multi_instance[1] == [50,100]
        c2_100a.decr(c1_50)
        assert c2_100a._single_instance[0] == 0
        assert c2_100a._multi_instance[1] == [0,100]

        c2_100a = ResourceTuple.copy(c2_100)
        c2_100a._single_instance[0] = 2048
        c2_100a.decr(c2_50)
        assert c2_100a._single_instance[0] == 1024
        assert c2_100a._multi_instance[1] == [0,100]
        c2_100a.decr(c2_50)
        assert c2_100a._single_instance[0] == 0
        assert c2_100a._multi_instance[1] == [0,0]
示例#4
0
class OpenNebulaHaizeaVM(object):
    HAIZEA_PARAM = "HAIZEA"
    HAIZEA_START = "START"
    HAIZEA_START_NOW = "now"
    HAIZEA_START_BESTEFFORT = "best_effort"
    HAIZEA_DURATION = "DURATION"
    HAIZEA_DURATION_UNLIMITED = "unlimited"
    HAIZEA_PREEMPTIBLE = "PREEMPTIBLE"
    HAIZEA_PREEMPTIBLE_YES = "yes"
    HAIZEA_PREEMPTIBLE_NO = "no"
    HAIZEA_GROUP = "GROUP"
  
    
    def __init__(self, opennebula_vm):                        
        # If there is no HAIZEA parameter, the default is to treat the
        # request as an immediate request with unlimited duration
        if not opennebula_vm.template.has_key(OpenNebulaHaizeaVM.HAIZEA_PARAM):
            self.start = OpenNebulaHaizeaVM.HAIZEA_START_NOW
            self.duration = OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED
            self.preemptible = OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_NO
            self.group = None
        else:
            self.start = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_START]
            self.duration = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_DURATION]
            self.preemptible = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE]
            if opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM].has_key(OpenNebulaHaizeaVM.HAIZEA_GROUP):
                self.group = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_GROUP]
            else:
                self.group = None
                
        self.submit_time = UNIX2DateTime(opennebula_vm.stime)
                
        # Create Timestamp object
        if self.start == OpenNebulaHaizeaVM.HAIZEA_START_NOW:
            self.start = Timestamp(Timestamp.NOW)
        elif self.start == OpenNebulaHaizeaVM.HAIZEA_START_BESTEFFORT:
            self.start = Timestamp(Timestamp.UNSPECIFIED)
        elif self.start[0] == "+":
            # Relative time
            self.start = Timestamp(round_datetime(self.submit_time + ISO.ParseTime(self.start[1:])))
        else:
            self.start = Timestamp(ISO.ParseDateTime(self.start))
            
        # Create Duration object
        if self.duration == OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED:
            # This is an interim solution (make it run for a century).
            # TODO: Integrate concept of unlimited duration in the lease datastruct
            self.duration = Duration(DateTimeDelta(36500))
        else:
            self.duration = Duration(ISO.ParseTimeDelta(self.duration))
            

        self.preemptible = (self.preemptible == OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_YES)

    
        self.capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
        
        # CPU
        # CPUs in VMs are not reported the same as in hosts.
        # THere are two template values: CPU and VCPU.
        # CPU reports the percentage of the CPU needed by the VM.
        # VCPU, which is optional, reports how many CPUs are needed.
        cpu = int(float(opennebula_vm.template["CPU"]) * 100)
        if opennebula_vm.template.has_key("VCPU"):
            ncpu = int(opennebula_vm.template["VCPU"])
        else:
            ncpu = 1
        self.capacity.set_ninstances(constants.RES_CPU, ncpu)
        for i in range(ncpu):
            self.capacity.set_quantity_instance(constants.RES_CPU, i+1, cpu)            
        
        # Memory. Unlike hosts, memory is reported directly in MBs
        self.capacity.set_quantity(constants.RES_MEM, int(opennebula_vm.template["MEMORY"]))

        self.one_id = opennebula_vm.id