Ejemplo n.º 1
0
    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id
            hostname = host.name

            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR,
                              OpenNebulaHost.STATE_DISABLED):
                continue

            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug(
                    "Skipping node '%s' (monitoring information not yet available)"
                    % hostname)
                continue

            self.max_nod_id += 1

            nod_id = self.max_nod_id
            capacity = Capacity(
                [constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])

            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i + 1, 100)

            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)

            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" %
                              (node.id, node.hostname, node.capacity))
        return new_nodes
Ejemplo n.º 2
0
    def __init__(self, bandwidth):
        DeploymentEnactment.__init__(self)
        self.logger = logging.getLogger("ENACT.SIMUL.INFO")
                
        self.bandwidth = bandwidth
        
        imgcapacity = Capacity([constants.RES_NETOUT])
        imgcapacity.set_quantity(constants.RES_NETOUT, self.bandwidth)

        # TODO: Determine node number based on site
        self.imagenode = ResourcePoolNode(1000, "image_node", imgcapacity)
Ejemplo n.º 3
0
    def __init__(self, bandwidth):
        DeploymentEnactment.__init__(self)
        self.logger = logging.getLogger("ENACT.SIMUL.INFO")

        self.bandwidth = bandwidth

        imgcapacity = Capacity([constants.RES_NETOUT])
        imgcapacity.set_quantity(constants.RES_NETOUT, self.bandwidth)

        # TODO: Determine node number based on site
        self.imagenode = ResourcePoolNode(1000, "image_node", imgcapacity)
Ejemplo n.º 4
0
    def __fetch_nodes(self):
        new_nodes = []
        hosts = self.rpc.hostpool_info()
        hostnames = set([n.hostname for n in self.nodes.values()])
        for host in hosts:
            # CPU
            # OpenNebula reports each CPU as "100"
            # (so, a 4-core machine is reported as "400")
            # We need to convert this to a multi-instance
            # resource type in Haizea            
            cpu = host.max_cpu
            ncpu = cpu / 100
            enact_id = host.id                
            hostname = host.name
            
            # We want to skip nodes we're already aware of ...
            if hostname in hostnames:
                continue

            # ... and those in an error or disabled state ...
            if host.state in (OpenNebulaHost.STATE_ERROR, OpenNebulaHost.STATE_DISABLED):
                continue
            
            # ... and those were monitoring information is not yet available.
            if cpu == 0:
                self.logger.debug("Skipping node '%s' (monitoring information not yet available)" % hostname)
                continue
            
            self.max_nod_id += 1
            
            nod_id = self.max_nod_id
            capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
            
            capacity.set_ninstances(constants.RES_CPU, ncpu)
            for i in range(ncpu):
                capacity.set_quantity_instance(constants.RES_CPU, i+1, 100)            
            
            # Memory. Must divide by 1024 to obtain quantity in MB
            capacity.set_quantity(constants.RES_MEM, host.max_mem / 1024.0)
            
            # Disk
            # OpenNebula doesn't report this correctly yet.
            # We set it to an arbitrarily high value.
            capacity.set_quantity(constants.RES_DISK, 80000)

            node = ResourcePoolNode(nod_id, hostname, capacity)
            node.enactment_info = enact_id
            self.nodes[nod_id] = node
            new_nodes.append(node)
            self.logger.debug("Fetched node %i %s %s" % (node.id, node.hostname, node.capacity))
        return new_nodes
Ejemplo n.º 5
0
    def __schedule_imagetransfer_edf(self, lease, musttransfer, earliest, nexttime):
        # Estimate image transfer time 
        bandwidth = self.deployment_enact.get_bandwidth()
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= len(musttransfer)

        # Determine start time
        start = self.__get_last_transfer_slot(lease.start.requested, transfer_duration)
        if start == None or start < nexttime:
            raise NotSchedulableException("Could not schedule the file transfer to complete in time.")
        
        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
        for pnode in musttransfer.values():
            res[pnode] = self.slottable.create_resource_tuple_from_capacity(resnode)
        
        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.deadline = lease.start.requested
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        newtransfer.start = start
        newtransfer.end = start + transfer_duration
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease, vnode, pnode)
        
        bisect.insort(self.transfers, newtransfer)
        
        return [newtransfer]
Ejemplo n.º 6
0
    def __schedule_imagetransfer_fifo(self, lease, musttransfer, earliest):
        # Estimate image transfer time
        bandwidth = self.imagenode_bandwidth
        config = get_config()
        mechanism = config.get("transfer-mechanism")

        # The starting time is the first available slot, which was
        # included in the "earliest" dictionary.
        pnodes = musttransfer.values()
        start = earliest[pnodes[0]].transfer_start
        transfer_duration = self.__estimate_image_transfer_time(
            lease, bandwidth)

        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.
            id] = self.slottable.create_resource_tuple_from_capacity(
                resimgnode)
        for n in musttransfer.values():
            res[n] = self.slottable.create_resource_tuple_from_capacity(
                resnode)

        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.start = start
        if mechanism == constants.TRANSFER_UNICAST:
            newtransfer.end = start + (len(musttransfer) * transfer_duration)
        if mechanism == constants.TRANSFER_MULTICAST:
            newtransfer.end = start + transfer_duration

        newtransfer.deadline = None
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease.id, vnode, pnode)

        bisect.insort(self.transfers, newtransfer)

        return [newtransfer]
Ejemplo n.º 7
0
    def __schedule_imagetransfer_edf(self, lease, musttransfer, earliest):
        # Estimate image transfer time
        bandwidth = self.deployment_enact.get_bandwidth()
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        transfer_duration = self.__estimate_image_transfer_time(
            lease, bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= len(musttransfer)

        # Determine start time
        start = self.__get_last_transfer_slot(lease.start.requested,
                                              transfer_duration)

        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.
            id] = self.slottable.create_resource_tuple_from_capacity(
                resimgnode)
        for pnode in musttransfer.values():
            res[pnode] = self.slottable.create_resource_tuple_from_capacity(
                resnode)

        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.deadline = lease.start.requested
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        newtransfer.start = start
        newtransfer.end = start + transfer_duration
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease.id, vnode, pnode)

        bisect.insort(self.transfers, newtransfer)

        return [newtransfer]
Ejemplo n.º 8
0
 def __gen_lease(self):
     submit_time = None
     user_id = None
     
     res = self.config.get(LWFGenerator.NODES_SEC, LWFGenerator.RESOURCES_OPT)
     res = Capacity.from_resources_string(res)        
     numnodes = self._get_numnodes(None)
     requested_resources = dict([(i+1,res) for i in xrange(numnodes)])
     
     start, delta = self._get_start(self.start_type, None)
     start = Timestamp(TimeDelta(seconds=start))
     
     duration = self._get_duration()
     duration = Duration(TimeDelta(seconds=duration))
     deadline = None
     preemptible = False
     software = self._get_software()
     
     l = Lease.create_new(submit_time, user_id, requested_resources, 
                          start, duration, deadline, preemptible, software)
     
     return l
Ejemplo n.º 9
0
 def __schedule_imagetransfer_fifo(self, lease, musttransfer, earliest):
     # Estimate image transfer time 
     bandwidth = self.imagenode_bandwidth
     config = get_config()
     mechanism = config.get("transfer-mechanism")
     
     # The starting time is the first available slot, which was
     # included in the "earliest" dictionary.
     pnodes = musttransfer.values()
     start = earliest[pnodes[0]].transfer_start
     transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
     
     res = {}
     resimgnode = Capacity([constants.RES_NETOUT])
     resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
     resnode = Capacity([constants.RES_NETIN])
     resnode.set_quantity(constants.RES_NETIN, bandwidth)
     res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
     for n in musttransfer.values():
         res[n] = self.slottable.create_resource_tuple_from_capacity(resnode)
      
     newtransfer = FileTransferResourceReservation(lease, res)
     newtransfer.start = start
     if mechanism == constants.TRANSFER_UNICAST:
         newtransfer.end = start + (len(musttransfer) * transfer_duration)
     if mechanism == constants.TRANSFER_MULTICAST:
         newtransfer.end = start + transfer_duration
     
     newtransfer.deadline = None
     newtransfer.state = ResourceReservation.STATE_SCHEDULED
     newtransfer.file = lease.software.image_id
     for vnode, pnode in musttransfer.items():
         newtransfer.piggyback(lease, vnode, pnode)
         
     bisect.insort(self.transfers, newtransfer)
     
     return [newtransfer]
Ejemplo n.º 10
0
    def schedule_migration(self, lease, vmrr, nexttime):
        if type(lease.software) == UnmanagedSoftwareEnvironment:
            return []

        # This code is the same as the one in vm_scheduler
        # Should be factored out
        last_vmrr = lease.get_last_vmrr()
        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode],
                                          vmrr.nodes[vnode]))
                                 for vnode in vmrr.nodes])

        mustmigrate = False
        for vnode in vnode_migrations:
            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
                mustmigrate = True
                break

        if not mustmigrate:
            return []

        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
            start = nexttime
            end = nexttime
            res = {}
            migr_rr = DiskImageMigrationResourceReservation(
                lease, start, end, res, vmrr, vnode_migrations)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            return [migr_rr]

        # Figure out what migrations can be done simultaneously
        migrations = []
        while len(vnode_migrations) > 0:
            pnodes = set()
            migration = {}
            for vnode in vnode_migrations:
                origin = vnode_migrations[vnode][0]
                dest = vnode_migrations[vnode][1]
                if not origin in pnodes and not dest in pnodes:
                    migration[vnode] = vnode_migrations[vnode]
                    pnodes.add(origin)
                    pnodes.add(dest)
            for vnode in migration:
                del vnode_migrations[vnode]
            migrations.append(migration)

        # Create migration RRs
        start = max(last_vmrr.post_rrs[-1].end, nexttime)
        bandwidth = self.resourcepool.info.get_migration_bandwidth()
        migr_rrs = []
        for m in migrations:
            mb_to_migrate = lease.software.image_size * len(m.keys())
            migr_time = estimate_transfer_time(mb_to_migrate, bandwidth)
            end = start + migr_time
            res = {}
            for (origin, dest) in m.values():
                resorigin = Capacity([constants.RES_NETOUT])
                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
                resdest = Capacity([constants.RES_NETIN])
                resdest.set_quantity(constants.RES_NETIN, bandwidth)
                res[origin] = self.slottable.create_resource_tuple_from_capacity(
                    resorigin)
                res[dest] = self.slottable.create_resource_tuple_from_capacity(
                    resdest)
            migr_rr = DiskImageMigrationResourceReservation(
                lease, start, start + migr_time, res, vmrr, m)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            migr_rrs.append(migr_rr)
            start = end

        return migr_rrs
Ejemplo n.º 11
0
    def add_physical_node_clicked_callback(self, *args):
        try:
            count = int(self.builder.get_object('num_nodes').get_text())
            cpu = int(self.builder.get_object('nodeset_cpu').get_text())
            mem = int(self.builder.get_object('nodeset_memory').get_text())
            disk = int(self.builder.get_object('nodeset_disk').get_text())
            net_in = int(self.builder.get_object('nodeset_net_in').get_text())
            net_out = int(
                self.builder.get_object('nodeset_net_out').get_text())

            capacity = Capacity(['Net-in', 'Net-out', 'Disk', 'CPU', 'Memory'])
            capacity.set_quantity('Net-in', net_in)
            capacity.set_quantity('Net-out', net_out)
            capacity.set_quantity('Disk', disk)
            capacity.set_quantity('CPU', cpu)
            capacity.set_quantity('Memory', mem)
            self.site.nodes.node_sets.append((count, capacity))
            self.reload_physical_nodes()
        except:
            pass
Ejemplo n.º 12
0
    def run(self):
        self.parse_options()

        if self.opt.file != None:
            lease_elem = ET.parse(self.opt.file).getroot()
            # If a relative starting time is used, replace for an
            # absolute starting time.
            exact = lease.find("start/exact")
            if exact != None:
                exact_time = exact.get("time")
                exact.set("time", str(self.__absolute_time(exact_time)))
            lease_xml_str = ET.tostring(lease_elem)
        else:
            if self.opt.preemptible == None:
                preemptible = False
            else:
                preemptible = self.opt.preemptible

            capacity = Capacity([constants.RES_CPU, constants.RES_MEM])
            capacity.set_quantity(constants.RES_CPU, int(self.opt.cpu) * 100)
            capacity.set_quantity(constants.RES_MEM, int(self.opt.mem))
            requested_resources = dict([(i + 1, capacity)
                                        for i in range(self.opt.numnodes)])
            if self.opt.duration == haizea_request_lease.DURATION_UNLIMITED:
                # This is an interim solution (make it run for a century).
                # TODO: Integrate concept of unlimited duration in the lease datastruct
                duration = DateTimeDelta(36500)
            else:
                duration = ISO.ParseTimeDelta(self.opt.duration)

            if self.opt.start == haizea_request_lease.START_NOW:
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(Timestamp.NOW),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)
            elif self.opt.start == haizea_request_lease.START_BESTEFFORT:
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(Timestamp.UNSPECIFIED),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)
            else:
                start = self.__absolute_time(self.opt.start)
                lease = Lease(lease_id=None,
                              submit_time=None,
                              requested_resources=requested_resources,
                              start=Timestamp(start),
                              duration=Duration(duration),
                              deadline=None,
                              preemptible=preemptible,
                              software=DiskImageSoftwareEnvironment(
                                  self.opt.vmimage, self.opt.vmimagesize),
                              state=None)

            lease_xml_str = ET.tostring(lease.to_xml())

        server = self.create_rpc_proxy(self.opt.server)

        try:
            lease_id = server.create_lease(lease_xml_str)
            print "Lease submitted correctly."
            print "Lease ID: %i" % lease_id
        except xmlrpclib.Fault, err:
            print >> sys.stderr, "XMLRPC fault: %s" % err.faultString
            if self.opt.debug:
                raise
Ejemplo n.º 13
0
 def add_physical_node_clicked_callback(self, *args):
     try:
         count = int(self.builder.get_object('num_nodes').get_text())
         cpu = int(self.builder.get_object('nodeset_cpu').get_text())
         mem = int(self.builder.get_object('nodeset_memory').get_text())
         disk = int(self.builder.get_object('nodeset_disk').get_text())
         net_in = int(self.builder.get_object('nodeset_net_in').get_text())
         net_out = int(self.builder.get_object('nodeset_net_out').get_text())
         
         capacity = Capacity(['Net-in', 'Net-out', 'Disk', 'CPU', 'Memory'])
         capacity.set_quantity('Net-in', net_in)
         capacity.set_quantity('Net-out', net_out)
         capacity.set_quantity('Disk', disk)
         capacity.set_quantity('CPU', cpu)
         capacity.set_quantity('Memory', mem)
         self.site.nodes.node_sets.append((count,capacity))
         self.reload_physical_nodes()
     except:
         pass
Ejemplo n.º 14
0
    def test_resource_tuple(self):
        
        multiinst = [(constants.RES_CPU,ResourceTuple.MULTI_INSTANCE),(constants.RES_MEM,ResourceTuple.SINGLE_INSTANCE)]
        
        self.slottable = SlotTable(multiinst)
                
        c1_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c1_100.set_quantity(constants.RES_CPU, 100)
        c1_100.set_quantity(constants.RES_MEM, 1024)
        c1_100 = self.slottable.create_resource_tuple_from_capacity(c1_100)

        c2_100 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c2_100.set_ninstances(constants.RES_CPU, 2)
        c2_100.set_quantity_instance(constants.RES_CPU, 1, 100)
        c2_100.set_quantity_instance(constants.RES_CPU, 2, 100)
        c2_100.set_quantity(constants.RES_MEM, 1024)
        c2_100 = self.slottable.create_resource_tuple_from_capacity(c2_100)

        c1_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c1_50.set_quantity(constants.RES_CPU, 50)
        c1_50.set_quantity(constants.RES_MEM, 1024)
        c1_50 = self.slottable.create_resource_tuple_from_capacity(c1_50)

        c2_50 = Capacity([constants.RES_CPU,constants.RES_MEM])
        c2_50.set_ninstances(constants.RES_CPU, 2)
        c2_50.set_quantity_instance(constants.RES_CPU, 1, 50)
        c2_50.set_quantity_instance(constants.RES_CPU, 2, 50)
        c2_50.set_quantity(constants.RES_MEM, 1024)
        c2_50 = self.slottable.create_resource_tuple_from_capacity(c2_50)

        assert c1_100.fits_in(c2_100)
        assert not c1_100.fits_in(c1_50)
        assert not c1_100.fits_in(c2_50)

        assert not c2_100.fits_in(c1_100)
        assert not c2_100.fits_in(c1_50)
        assert not c2_100.fits_in(c2_50)

        assert c1_50.fits_in(c1_100)
        assert c1_50.fits_in(c2_100)
        assert c1_50.fits_in(c2_50)

        assert c2_50.fits_in(c1_100)
        assert c2_50.fits_in(c2_100)
        assert not c2_50.fits_in(c1_50)

        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c2_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100,100]
        
        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c1_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100]
        empty.incr(c1_100)
        assert empty._single_instance[0] == 2048
        assert empty._multi_instance[1] == [100,100]

        empty = self.slottable.create_empty_resource_tuple()
        empty.incr(c1_100)
        assert empty._single_instance[0] == 1024
        assert empty._multi_instance[1] == [100]
        empty.incr(c1_50)
        assert empty._single_instance[0] == 2048
        assert empty._multi_instance[1] == [100,50]
   
        c1_100a = ResourceTuple.copy(c1_100)
        c1_100a.decr(c1_50)
        assert c1_100a._single_instance[0] == 0
        assert c1_100a._multi_instance[1] == [50]

        c2_100a = ResourceTuple.copy(c2_100)
        c2_100a._single_instance[0] = 2048
        c2_100a.decr(c1_50)
        
        assert c2_100a._single_instance[0] == 1024
        assert c2_100a._multi_instance[1] == [50,100]
        c2_100a.decr(c1_50)
        assert c2_100a._single_instance[0] == 0
        assert c2_100a._multi_instance[1] == [0,100]

        c2_100a = ResourceTuple.copy(c2_100)
        c2_100a._single_instance[0] = 2048
        c2_100a.decr(c2_50)
        assert c2_100a._single_instance[0] == 1024
        assert c2_100a._multi_instance[1] == [0,100]
        c2_100a.decr(c2_50)
        assert c2_100a._single_instance[0] == 0
        assert c2_100a._multi_instance[1] == [0,0]
Ejemplo n.º 15
0
def create_capacities(slottable):
    FULL_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
    FULL_NODE.set_quantity(constants.RES_CPU, 100)
    FULL_NODE.set_quantity(constants.RES_MEM, 1024)
    FULL_NODE = slottable.create_resource_tuple_from_capacity(FULL_NODE)
    
    HALF_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
    HALF_NODE.set_quantity(constants.RES_CPU, 50)
    HALF_NODE.set_quantity(constants.RES_MEM, 512)
    HALF_NODE = slottable.create_resource_tuple_from_capacity(HALF_NODE)

    QRTR_NODE = Capacity([constants.RES_CPU,constants.RES_MEM])
    QRTR_NODE.set_quantity(constants.RES_CPU, 25)
    QRTR_NODE.set_quantity(constants.RES_MEM, 256)
    QRTR_NODE = slottable.create_resource_tuple_from_capacity(QRTR_NODE)

    EMPT_NODE = slottable.create_empty_resource_tuple()
    
    return FULL_NODE, HALF_NODE, QRTR_NODE, EMPT_NODE
Ejemplo n.º 16
0
    def __init__(self, opennebula_vm):                        
        # If there is no HAIZEA parameter, the default is to treat the
        # request as an immediate request with unlimited duration
        if not opennebula_vm.template.has_key(OpenNebulaHaizeaVM.HAIZEA_PARAM):
            self.start = OpenNebulaHaizeaVM.HAIZEA_START_NOW
            self.duration = OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED
            self.preemptible = OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_NO
            self.group = None
        else:
            self.start = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_START]
            self.duration = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_DURATION]
            self.preemptible = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE]
            if opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM].has_key(OpenNebulaHaizeaVM.HAIZEA_GROUP):
                self.group = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_GROUP]
            else:
                self.group = None
                
        self.submit_time = UNIX2DateTime(opennebula_vm.stime)
                
        # Create Timestamp object
        if self.start == OpenNebulaHaizeaVM.HAIZEA_START_NOW:
            self.start = Timestamp(Timestamp.NOW)
        elif self.start == OpenNebulaHaizeaVM.HAIZEA_START_BESTEFFORT:
            self.start = Timestamp(Timestamp.UNSPECIFIED)
        elif self.start[0] == "+":
            # Relative time
            self.start = Timestamp(round_datetime(self.submit_time + ISO.ParseTime(self.start[1:])))
        else:
            self.start = Timestamp(ISO.ParseDateTime(self.start))
            
        # Create Duration object
        if self.duration == OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED:
            # This is an interim solution (make it run for a century).
            # TODO: Integrate concept of unlimited duration in the lease datastruct
            self.duration = Duration(DateTimeDelta(36500))
        else:
            self.duration = Duration(ISO.ParseTimeDelta(self.duration))
            

        self.preemptible = (self.preemptible == OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_YES)

    
        self.capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK])
        
        # CPU
        # CPUs in VMs are not reported the same as in hosts.
        # THere are two template values: CPU and VCPU.
        # CPU reports the percentage of the CPU needed by the VM.
        # VCPU, which is optional, reports how many CPUs are needed.
        cpu = int(float(opennebula_vm.template["CPU"]) * 100)
        if opennebula_vm.template.has_key("VCPU"):
            ncpu = int(opennebula_vm.template["VCPU"])
        else:
            ncpu = 1
        self.capacity.set_ninstances(constants.RES_CPU, ncpu)
        for i in range(ncpu):
            self.capacity.set_quantity_instance(constants.RES_CPU, i+1, cpu)            
        
        # Memory. Unlike hosts, memory is reported directly in MBs
        self.capacity.set_quantity(constants.RES_MEM, int(opennebula_vm.template["MEMORY"]))

        self.one_id = opennebula_vm.id
Ejemplo n.º 17
0
    def schedule_migration(self, lease, vmrr, nexttime):
        if type(lease.software) == UnmanagedSoftwareEnvironment:
            return []
        
        # This code is the same as the one in vm_scheduler
        # Should be factored out
 
        last_vmrr = lease.get_last_vmrr()

        vnode_mappings = self.resourcepool.get_disk_image_mappings(lease)
        vnode_migrations = dict([(vnode, (vnode_mappings[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes if vnode_mappings[vnode] != vmrr.nodes[vnode]])
        
        mustmigrate = False
        for vnode in vnode_migrations:
            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
                mustmigrate = True
                break
            
        if not mustmigrate:
            return []

        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
            start = nexttime
            end = nexttime
            res = {}
            migr_rr = DiskImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            return [migr_rr]

        # Figure out what migrations can be done simultaneously
        migrations = []
        while len(vnode_migrations) > 0:
            pnodes = set()
            migration = {}
            for vnode in vnode_migrations:
                origin = vnode_migrations[vnode][0]
                dest = vnode_migrations[vnode][1]
                if not origin in pnodes and not dest in pnodes:
                    migration[vnode] = vnode_migrations[vnode]
                    pnodes.add(origin)
                    pnodes.add(dest)
            for vnode in migration:
                del vnode_migrations[vnode]
            migrations.append(migration)
        
        # Create migration RRs
        start = max(last_vmrr.post_rrs[-1].end, nexttime)
        bandwidth = self.resourcepool.info.get_migration_bandwidth()
        migr_rrs = []
        for m in migrations:
            mb_per_physnode = {}
            for vnode, (pnode_from, pnode_to) in m.items():
                mb_per_physnode[pnode_from] = mb_per_physnode.setdefault(pnode_from, 0) + lease.software.image_size
            max_mb_to_migrate = max(mb_per_physnode.values())
            migr_time = estimate_transfer_time(max_mb_to_migrate, bandwidth)
            end = start + migr_time
            res = {}
            for (origin,dest) in m.values():
                resorigin = Capacity([constants.RES_NETOUT])
                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
                resdest = Capacity([constants.RES_NETIN])
                resdest.set_quantity(constants.RES_NETIN, bandwidth)
                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
            migr_rr = DiskImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            migr_rrs.append(migr_rr)
            start = end
        
        return migr_rrs
Ejemplo n.º 18
0
    def run(self):
        self.parse_options()
        
        if self.opt.file != None:
            lease_elem = ET.parse(self.opt.file).getroot()
            # If a relative starting time is used, replace for an
            # absolute starting time.
            exact = lease.find("start/exact")
            if exact != None:
                exact_time = exact.get("time")
                exact.set("time", str(self.__absolute_time(exact_time)))            
            lease_xml_str = ET.tostring(lease_elem)
        else:
            if self.opt.preemptible == None:
                preemptible = False
            else:
                preemptible = self.opt.preemptible
            
            capacity = Capacity([constants.RES_CPU, constants.RES_MEM])
            capacity.set_quantity(constants.RES_CPU, int(self.opt.cpu) * 100)
            capacity.set_quantity(constants.RES_MEM, int(self.opt.mem))    
            requested_resources = dict([(i+1, capacity) for i in range(self.opt.numnodes)])    
            if self.opt.duration == haizea_request_lease.DURATION_UNLIMITED:
                # This is an interim solution (make it run for a century).
                # TODO: Integrate concept of unlimited duration in the lease datastruct
                duration = DateTimeDelta(36500)
            else:
                duration = ISO.ParseTimeDelta(self.opt.duration)
    
            if self.opt.start == haizea_request_lease.START_NOW:
                lease = Lease(lease_id = None,
                              submit_time = None,
                              requested_resources = requested_resources, 
                              start = Timestamp(Timestamp.NOW),
                              duration = Duration(duration),
                              deadline = None, 
                              preemptible=preemptible,
                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
                              state = None
                              )
            elif self.opt.start == haizea_request_lease.START_BESTEFFORT:
                lease = Lease(lease_id = None,
                              submit_time = None,
                              requested_resources = requested_resources, 
                              start = Timestamp(Timestamp.UNSPECIFIED),
                              duration = Duration(duration),
                              deadline = None, 
                              preemptible=preemptible,
                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
                              state = None
                              )
            else:
                start = self.__absolute_time(self.opt.start)
                lease = Lease(lease_id = None,
                              submit_time = None,
                              requested_resources = requested_resources, 
                              start = Timestamp(start),
                              duration = Duration(duration),
                              deadline = None, 
                              preemptible=preemptible,
                              software = DiskImageSoftwareEnvironment(self.opt.vmimage, self.opt.vmimagesize),
                              state = None
                              )

            lease_xml_str = ET.tostring(lease.to_xml())

        server = self.create_rpc_proxy(self.opt.server)
        
        try:
            lease_id = server.create_lease(lease_xml_str)
            print "Lease submitted correctly."
            print "Lease ID: %i" % lease_id
        except xmlrpclib.Fault, err:
            print >> sys.stderr, "XMLRPC fault: %s" % err.faultString
            if self.opt.debug:
                raise