コード例 #1
0
ファイル: pricing.py プロジェクト: queba/haizea
 def __init__(self, slottable):
     """Constructor
     
     Argument
     slottable -- A fully constructed SlotTable
     """        
     RatePricePolicy.__init__(self, slottable)
     random.seed(get_config().config.getint("pricing", "seed"))
     self.min_rate = get_config().config.getfloat("pricing", "min-rate")
     self.max_rate = get_config().config.getfloat("pricing", "max-rate")
コード例 #2
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def __schedule_deadline(self, lease, vmrr, earliest, nexttime):
        config = get_config()
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")
        is_ready = False
            
        musttransfer = {}
        mustpool = {}
        nodeassignment = vmrr.nodes
        start = lease.start.requested
        end = lease.start.requested + lease.duration.requested
        for (vnode, pnode) in nodeassignment.items():
            self.logger.debug("Scheduling image transfer of '%s' for vnode %i to physnode %i" % (lease.software.image_id, vnode, pnode))

            if reusealg == constants.REUSE_IMAGECACHES:
                if self.resourcepool.exists_reusable_image(pnode, lease.software.image_id, start):
                    self.logger.debug("No need to schedule an image transfer (reusing an image in pool)")
                    mustpool[vnode] = pnode                            
                else:
                    self.logger.debug("Need to schedule a transfer.")
                    musttransfer[vnode] = pnode
            else:
                self.logger.debug("Need to schedule a transfer.")
                musttransfer[vnode] = pnode

        if len(musttransfer) == 0:
            is_ready = True
            transfer_rrs = []
        else:
            try:
                transfer_rrs = self.__schedule_imagetransfer_edf(lease, musttransfer, earliest, nexttime)
            except NotSchedulableException, exc:
                raise
コード例 #3
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def __schedule_asap(self, lease, vmrr, earliest, nexttime):
        config = get_config()
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")

        is_ready = False

        transfer_rrs = []
        musttransfer = {}
        piggybacking = []
        for (vnode, pnode) in vmrr.nodes.items():
            earliest_type = earliest[pnode].type
            if earliest_type == ImageTransferEarliestStartingTime.EARLIEST_REUSE:
                # Add to pool
                self.logger.debug("Reusing image for V%i->P%i." % (vnode, pnode))
                self.resourcepool.add_mapping_to_existing_reusable_image(pnode, lease.software.image_id, lease, vnode, vmrr.end)
                self.resourcepool.add_diskimage(pnode, lease.software.image_id, lease.software.image_size, lease, vnode)
            elif earliest_type == ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK:
                # We can piggyback on an existing transfer
                transfer_rr = earliest[pnode].piggybacking_on
                transfer_rr.piggyback(lease, vnode, pnode)
                self.logger.debug("Piggybacking transfer for V%i->P%i on existing transfer in lease %i." % (vnode, pnode, transfer_rr.lease.id))
                piggybacking.append(transfer_rr)
            else:
                # Transfer
                musttransfer[vnode] = pnode
                self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode))

        if len(musttransfer)>0:
            transfer_rrs = self.__schedule_imagetransfer_fifo(lease, musttransfer, earliest)
            
        if len(musttransfer)==0 and len(piggybacking)==0:
            is_ready = True
            
        return transfer_rrs, is_ready
コード例 #4
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def __schedule_imagetransfer_edf(self, lease, musttransfer, earliest, nexttime):
        # Estimate image transfer time 
        bandwidth = self.deployment_enact.get_bandwidth()
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= len(musttransfer)

        # Determine start time
        start = self.__get_last_transfer_slot(lease.start.requested, transfer_duration)
        if start == None or start < nexttime:
            raise NotSchedulableException("Could not schedule the file transfer to complete in time.")
        
        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
        for pnode in musttransfer.values():
            res[pnode] = self.slottable.create_resource_tuple_from_capacity(resnode)
        
        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.deadline = lease.start.requested
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        newtransfer.start = start
        newtransfer.end = start + transfer_duration
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease, vnode, pnode)
        
        bisect.insort(self.transfers, newtransfer)
        
        return [newtransfer]
コード例 #5
0
    def find_earliest_starting_times(self, lease, nexttime):
        node_ids = [node.id for node in self.resourcepool.get_nodes()]
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")

        if type(lease.software) == UnmanagedSoftwareEnvironment:
            earliest = {}
            for node in node_ids:
                earliest[node] = EarliestStartingTime(
                    nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
            return earliest

        # Figure out earliest times assuming we have to transfer the images
        transfer_duration = self.__estimate_image_transfer_time(
            lease, self.imagenode_bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= lease.numnodes
        start = self.__get_next_transfer_slot(nexttime, transfer_duration)
        earliest = {}
        for node in node_ids:
            earliest[node] = ImageTransferEarliestStartingTime(
                start + transfer_duration,
                ImageTransferEarliestStartingTime.EARLIEST_IMAGETRANSFER)
            earliest[node].transfer_start = start

        # Check if we can reuse images
        if reusealg == constants.REUSE_IMAGECACHES:
            nodeswithimg = self.resourcepool.get_nodes_with_reusable_image(
                lease.software.image_id)
            for node in nodeswithimg:
                earliest[node].time = nexttime
                earliest[
                    node].type = ImageTransferEarliestStartingTime.EARLIEST_REUSE

        # Check if we can avoid redundant transfers
        if avoidredundant:
            if mechanism == constants.TRANSFER_UNICAST:
                # Piggybacking not supported if unicasting
                # each individual image
                pass
            if mechanism == constants.TRANSFER_MULTICAST:
                # We can only piggyback on transfers that haven't started yet
                transfers = [
                    t for t in self.transfers
                    if t.state == ResourceReservation.STATE_SCHEDULED
                ]
                for t in transfers:
                    if t.file == lease.software.image_id:
                        start = t.end
                        if start > nexttime:
                            for n in earliest:
                                if start < earliest[n].time:
                                    earliest[n].time = start
                                    earliest[
                                        n].type = ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK
                                    earliest[n].piggybacking_on = t

        return earliest
コード例 #6
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
 def __estimate_image_transfer_time(self, lease, bandwidth):
     config = get_config()
     force_transfer_time = config.get("force-imagetransfer-time")
     if force_transfer_time != None:
         return force_transfer_time
     else:      
         return estimate_transfer_time(lease.software.image_size, bandwidth)    
コード例 #7
0
 def Cognitive(self, lease, nexttime):
     lease.print_contents()
     try:
         self.__schedule_lease(lease, nexttime=nexttime)
         self.logger.info("Cognitive lease lease #%i has been scheduled." %
                          lease.id)
         lease.print_contents()
     except NotSchedulableException, exc:
         # Exception parameter set to save the node we tried to
         self.logger.info(
             "Cognitive lease lease request #%i cannot be scheduled now a time, it will be queued on node"
             % (lease.id))
         lease.start = Timestamp(Timestamp.UNSPECIFIED)
         lease.preemptible = True
         lease.cognitive = False
         lease.set_state(Lease.STATE_QUEUED)
         self.logger.info(
             "Queued Cognitive lease request #%i, %i nodes for %s." %
             (lease.id, lease.numnodes, lease.duration.requested))
         get_persistence().persist_lease(lease)
         try:
             self.logger.info(
                 "Next request in the queue is lease %i. Attempting to schedule..."
                 % lease.id)
             lease.print_contents()
             self.__schedule_lease(lease, nexttime)
         except NotSchedulableException, msg:
             # Put back on queue: TO THE NEXT PROCESS
             lease.queue_starttime = int(round(time.time() * 1000))
             self.queue.enqueue(lease)
             self.logger.info(
                 "Lease %i could not be scheduled at this time." % lease.id)
             if get_config().get(
                     "backfilling") == constants.BACKFILLING_OFF:
                 done = True
コード例 #8
0
ファイル: lease_scheduler.py プロジェクト: Hamdy/haizea
 def __process_queue(self, nexttime):
     """ Traverses the queue in search of leases that can be scheduled.
     
     This method processes the queue in order, but takes into account that
     it may be possible to schedule leases in the future (using a 
     backfilling algorithm)
     
     Arguments:
     nexttime -- The next time at which the scheduler can allocate resources.
     """        
     
     done = False
     newqueue = Queue()
     while not done and not self.is_queue_empty():
         if not self.vm_scheduler.can_schedule_in_future() and self.slottable.is_full(nexttime, restype = constants.RES_CPU):
             self.logger.debug("Used up all future reservations and slot table is full. Skipping rest of queue.")
             done = True
         else:
             lease = self.queue.dequeue()
             try:
                 self.logger.info("Next request in the queue is lease %i. Attempting to schedule..." % lease.id)
                 lease.print_contents()
                 self.__schedule_lease(lease, nexttime)
             except NotSchedulableException, msg:
                 # Put back on queue
                 newqueue.enqueue(lease)
                 self.logger.info("Lease %i could not be scheduled at this time." % lease.id)
                 if get_config().get("backfilling") == constants.BACKFILLING_OFF:
                     done = True
コード例 #9
0
ファイル: lease_scheduler.py プロジェクト: queba/haizea
    def _handle_end_lease(self, l):
        """Performs actions that have to be done each time a lease ends.
        
        Arguments:
        lease -- Lease that has ended
        """
        l.set_state(Lease.STATE_DONE)
        l.duration.actual = l.duration.accumulated
        l.end = round_datetime(get_clock().get_time())
        if get_config().get("sanity-check"):
            if l.duration.known != None and l.duration.known < l.duration.requested:
                duration = l.duration.known
            else:
                duration = l.duration.requested
                
            assert duration == l.duration.actual

            if l.start.is_requested_exact():
                assert l.vm_rrs[0].start >= l.start.requested
            if l.deadline != None:
                assert l.end <= l.deadline

        self.preparation_scheduler.cleanup(l)
        self.completed_leases.add(l)
        self.leases.remove(l)
        self.accounting.at_lease_done(l)
コード例 #10
0
ファイル: lease_scheduler.py プロジェクト: queba/haizea
    def __preempt_lease(self, lease, preemption_time):
        """ Preempts a lease.
        
        This method preempts a lease such that any resources allocated
        to that lease after a given time are freed up. This may require
        scheduling the lease to suspend before that time, or cancelling
        the lease altogether.
        
        Arguments:
        lease -- Lease to schedule.
        preemption_time -- Time at which lease must be preempted
        """       
        
        self.logger.info("Preempting lease #%i..." % (lease.id))
        self.logger.vdebug("Lease before preemption:")
        lease.print_contents()
        vmrr = lease.get_last_vmrr()
        
        if vmrr.state == ResourceReservation.STATE_SCHEDULED and vmrr.start >= preemption_time:
            self.logger.debug("Lease was set to start in the middle of the preempting lease.")
            must_cancel_and_requeue = True
        else:
            susptype = get_config().get("suspension")
            if susptype == constants.SUSPENSION_NONE:
                must_cancel_and_requeue = True
            else:
                can_suspend = self.vm_scheduler.can_suspend_at(lease, preemption_time)
                if not can_suspend:
                    self.logger.debug("Suspending the lease does not meet scheduling threshold.")
                    must_cancel_and_requeue = True
                else:
                    if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:
                        self.logger.debug("Can't suspend lease because only suspension of single-node leases is allowed.")
                        must_cancel_and_requeue = True
                    else:
                        self.logger.debug("Lease can be suspended")
                        must_cancel_and_requeue = False
                    
        if must_cancel_and_requeue:
            self.logger.info("... lease #%i has been cancelled and requeued." % lease.id)
            if lease.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
                self.preparation_scheduler.cancel_preparation(lease, remove_files = False)
            else:
                self.preparation_scheduler.cancel_preparation(lease)
            self.vm_scheduler.cancel_vm(vmrr)
            lease.remove_vmrr(vmrr)
            # TODO: Take into account other states
            if lease.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
                lease.set_state(Lease.STATE_SUSPENDED_QUEUED)
            else:
                lease.set_state(Lease.STATE_QUEUED)
            self.__enqueue_in_order(lease)
        else:
            self.logger.info("... lease #%i will be suspended at %s." % (lease.id, preemption_time))
            self.vm_scheduler.preempt_vm(vmrr, preemption_time)            
            
        get_persistence().persist_lease(lease)

        self.logger.vdebug("Lease after preemption:")
        lease.print_contents()
コード例 #11
0
 def __estimate_image_transfer_time(self, lease, bandwidth):
     config = get_config()
     force_transfer_time = config.get("force-imagetransfer-time")
     if force_transfer_time != None:
         return force_transfer_time
     else:
         return estimate_transfer_time(lease.software.image_size, bandwidth)
コード例 #12
0
    def Cognitive(self,lease, nexttime):
	lease.print_contents()
	try:
		self.__schedule_lease(lease, nexttime=nexttime)
                self.logger.info("Cognitive lease lease #%i has been scheduled." % lease.id)
                lease.print_contents()
	except NotSchedulableException, exc:
		# Exception parameter set to save the node we tried to 
		self.logger.info("Cognitive lease lease request #%i cannot be scheduled now a time, it will be queued on node" % (lease.id))
		lease.start = Timestamp(Timestamp.UNSPECIFIED)
		lease.preemptible = True
		lease.cognitive = False            
		lease.set_state(Lease.STATE_QUEUED)
		self.logger.info("Queued Cognitive lease request #%i, %i nodes for %s." % (lease.id, lease.numnodes, lease.duration.requested))
		get_persistence().persist_lease(lease)
                try:
                    self.logger.info("Next request in the queue is lease %i. Attempting to schedule..." % lease.id)
                    lease.print_contents()
                    self.__schedule_lease(lease, nexttime)
                except NotSchedulableException, msg:
                    # Put back on queue: TO THE NEXT PROCESS 
		    lease.queue_starttime = int(round(time.time() * 1000))
                    self.queue.enqueue(lease)
                    self.logger.info("Lease %i could not be scheduled at this time." % lease.id)
                    if get_config().get("backfilling") == constants.BACKFILLING_OFF:
                        done = True
コード例 #13
0
ファイル: pricing.py プロジェクト: queba/haizea
 def __init__(self, slottable):
     """Constructor
     
     Argument
     slottable -- A fully constructed SlotTable
     """        
     PricingPolicy.__init__(self, slottable)
     self.rate = get_config().config.getfloat("pricing", "rate")
コード例 #14
0
ファイル: pricing.py プロジェクト: queba/haizea
    def get_surcharge(self, preempted_leases):
        if get_config().get("suspension") == constants.SUSPENSION_NONE:
            return 0
        surcharge = 0
        for l in preempted_leases:
            suspend_time = l.estimate_suspend_time()
            resume_time = l.estimate_resume_time()
            surcharge += ((suspend_time + resume_time).seconds / 3600.0) * l.numnodes * self.rate 

        return surcharge    
コード例 #15
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def _add_diskimages(self, pnode_id, diskimage_id, diskimage_size, vnodes, timeout):
        self.logger.debug("Adding image for leases=%s in nod_id=%i" % (vnodes, pnode_id))

        config = get_config()
        reusealg = config.get("diskimage-reuse")
        if reusealg == constants.REUSE_IMAGECACHES:
            maxcachesize = config.get("diskimage-cache-size")
        else:
            maxcachesize = None
            
        pnode = self.resourcepool.get_node(pnode_id)

        if reusealg == constants.REUSE_NONE:
            for (lease, vnode) in vnodes:
                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease, vnode)
        elif reusealg == constants.REUSE_IMAGECACHES:
            # Sometimes we might find that the image is already deployed
            # (although unused). In that case, don't add another copy to
            # the pool. Just "reactivate" it.
            if pnode.exists_reusable_image(diskimage_id):
                for (lease, vnode) in vnodes:
                    pnode.add_mapping_to_existing_reusable_image(diskimage_id, lease, vnode, timeout)
            else:
                if maxcachesize == constants.CACHESIZE_UNLIMITED:
                    can_add_to_cache = True
                else:
                    # We may have to remove images from the cache
                    cachesize = pnode.get_reusable_images_size()
                    reqsize = cachesize + diskimage_size
                    if reqsize > maxcachesize:
                        # Have to shrink cache
                        desiredsize = maxcachesize - diskimage_size
                        self.logger.debug("Adding the image would make the size of pool in node %i = %iMB. Will try to bring it down to %i" % (pnode_id, reqsize, desiredsize))
                        pnode.print_files()
                        success = pnode.purge_downto(maxcachesize)
                        if not success:
                            can_add_to_cache = False
                        else:
                            can_add_to_cache = True
                    else:
                        can_add_to_cache = True
                        
                if can_add_to_cache:
                    self.resourcepool.add_reusable_image(pnode_id, diskimage_id, diskimage_size, vnodes, timeout)
                else:
                    # This just means we couldn't add the image
                    # to the pool. We will have to make do with just adding the tainted images.
                    self.logger.debug("Unable to add to pool. Must create individual disk images directly instead.")
                    
            # Besides adding the image to the cache, we need to create a separate image for
            # this specific lease
            for (lease, vnode) in vnodes:
                self.resourcepool.add_diskimage(pnode_id, diskimage_id, diskimage_size, lease, vnode)
                    
        pnode.print_files()
コード例 #16
0
    def __process_queue(self, nexttime):
        """ Traverses the queue in search of leases that can be scheduled.
        
        This method processes the queue in order, but takes into account that
        it may be possible to schedule leases in the future (using a 
        backfilling algorithm)
        
        Arguments:
        nexttime -- The next time at which the scheduler can allocate resources.
        """
        done = False
        newqueue = Queue()

        while not done and not self.is_queue_empty():
            if not self.vm_scheduler.can_schedule_in_future(
            ) and self.slottable.is_full(nexttime, restype=constants.RES_CPU):
                self.logger.debug(
                    "Used up all future reservations and slot table is full. Skipping rest of queue."
                )
                done = True
            else:
                lease = self.queue.dequeue()
                #############################################################################################################################
                #############################################################################################################################
                #			Third change (2): Calculate queueing time (Scheduling overhead)					    #
                #############################################################################################################################
                #############################################################################################################################
                lease.queue_endtime = int(round(time.time() * 1000))
                lease.queuingtime = lease.queuingtime + (lease.queue_endtime -
                                                         lease.queue_starttime)
                nexttime = self.add_overhead(nexttime, lease.queuingtime)
                #############################################################################################################################
                try:
                    self.logger.info(
                        "Next request in the queue is lease %i. Attempting to schedule..."
                        % lease.id)
                    lease.print_contents()
                    self.__schedule_lease(lease, nexttime)
                except NotSchedulableException, msg:
                    # Put back on queue
                    #############################################################################################################################
                    #############################################################################################################################
                    #			Third change (1): Calculate queueing time (Scheduling overhead)					    #
                    #############################################################################################################################
                    #############################################################################################################################
                    lease.queue_starttime = int(round(time.time() * 1000))
                    #############################################################################################################################
                    newqueue.enqueue(lease)
                    self.logger.info(
                        "Lease %i could not be scheduled at this time." %
                        lease.id)
                    if get_config().get(
                            "backfilling") == constants.BACKFILLING_OFF:
                        done = True
コード例 #17
0
 def estimate_migration_time(self, lease):
     migration = get_config().get("migration")
     if migration == constants.MIGRATE_YES:
         vmrr = lease.get_last_vmrr()
         images_in_pnode = dict([(pnode,0) for pnode in set(vmrr.nodes.values())])
         for (vnode,pnode) in vmrr.nodes.items():
             images_in_pnode[pnode] += lease.software.image_size
         max_to_transfer = max(images_in_pnode.values())
         bandwidth = self.resourcepool.info.get_migration_bandwidth()
         return estimate_transfer_time(max_to_transfer, bandwidth)
     elif migration == constants.MIGRATE_YES_NOTRANSFER:
         return TimeDelta(seconds=0)
コード例 #18
0
 def estimate_migration_time(self, lease):
     migration = get_config().get("migration")
     if migration == constants.MIGRATE_YES:
         vmrr = lease.get_last_vmrr()
         images_in_pnode = dict([(pnode, 0)
                                 for pnode in set(vmrr.nodes.values())])
         for (vnode, pnode) in vmrr.nodes.items():
             images_in_pnode[pnode] += lease.software.image_size
         max_to_transfer = max(images_in_pnode.values())
         bandwidth = self.resourcepool.info.get_migration_bandwidth()
         return estimate_transfer_time(max_to_transfer, bandwidth)
     elif migration == constants.MIGRATE_YES_NOTRANSFER:
         return TimeDelta(seconds=0)
コード例 #19
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def find_earliest_starting_times(self, lease, nexttime):
        node_ids = [node.id for node in self.resourcepool.get_nodes()]  
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")
        
        if type(lease.software) == UnmanagedSoftwareEnvironment:
            earliest = {}
            for node in node_ids:
                earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
            return earliest
        
        # Figure out earliest times assuming we have to transfer the images
        transfer_duration = self.__estimate_image_transfer_time(lease, self.imagenode_bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= lease.numnodes
        start = self.__get_next_transfer_slot(nexttime, transfer_duration)
        earliest = {}
        for node in node_ids:
            earliest[node] = ImageTransferEarliestStartingTime(start + transfer_duration, ImageTransferEarliestStartingTime.EARLIEST_IMAGETRANSFER)
            earliest[node].transfer_start = start
                
        # Check if we can reuse images
        if reusealg == constants.REUSE_IMAGECACHES:
            nodeswithimg = self.resourcepool.get_nodes_with_reusable_image(lease.software.image_id)
            for node in nodeswithimg:
                earliest[node].time = nexttime
                earliest[node].type = ImageTransferEarliestStartingTime.EARLIEST_REUSE
        
                
        # Check if we can avoid redundant transfers
        if avoidredundant:
            if mechanism == constants.TRANSFER_UNICAST:
                # Piggybacking not supported if unicasting 
                # each individual image
                pass
            if mechanism == constants.TRANSFER_MULTICAST:                
                # We can only piggyback on transfers that haven't started yet
                transfers = [t for t in self.transfers if t.state == ResourceReservation.STATE_SCHEDULED]
                for t in transfers:
                    if t.file == lease.software.image_id:
                        start = t.end
                        if start > nexttime:
                            for n in earliest:
                                if start < earliest[n].time:
                                    earliest[n].time = start
                                    earliest[n].type = ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK
                                    earliest[n].piggybacking_on = t

        return earliest
コード例 #20
0
    def resume(self, action):
        for vnode in action.vnodes:
            # Unpack action
            vid = action.vnodes[vnode].enactment_info
            
            self.logger.debug("Sending request to resume VM for L%iV%i (ONE: vid=%i)"
                         % (action.lease_haizea_id, vnode, vid))

            try:
                self.rpc.vm_resume(vid)
                self.logger.debug("Request succesful.")
            except Exception, msg:
                raise OpenNebulaEnactmentError("vm.resume", msg)
            
            # Space out commands to avoid OpenNebula from getting saturated
            # TODO: We should spawn out a thread to do this, so Haizea isn't
            # blocking until all these commands end
            interval = get_config().get("enactment-overhead").seconds
            sleep(interval)
コード例 #21
0
    def __schedule_asap(self, lease, vmrr, earliest):
        config = get_config()
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")

        is_ready = False

        transfer_rrs = []
        musttransfer = {}
        piggybacking = []
        for (vnode, pnode) in vmrr.nodes.items():
            earliest_type = earliest[pnode].type
            if earliest_type == ImageTransferEarliestStartingTime.EARLIEST_REUSE:
                # Add to pool
                self.logger.debug("Reusing image for V%i->P%i." %
                                  (vnode, pnode))
                self.resourcepool.add_mapping_to_existing_reusable_image(
                    pnode, lease.software.image_id, lease.id, vnode, vmrr.end)
                self.resourcepool.add_diskimage(pnode, lease.software.image_id,
                                                lease.software.image_size,
                                                lease.id, vnode)
            elif earliest_type == ImageTransferEarliestStartingTime.EARLIEST_PIGGYBACK:
                # We can piggyback on an existing transfer
                transfer_rr = earliest[pnode].piggybacking_on
                transfer_rr.piggyback(lease.id, vnode, pnode)
                self.logger.debug(
                    "Piggybacking transfer for V%i->P%i on existing transfer in lease %i."
                    % (vnode, pnode, transfer_rr.lease.id))
                piggybacking.append(transfer_rr)
            else:
                # Transfer
                musttransfer[vnode] = pnode
                self.logger.debug("Must transfer V%i->P%i." % (vnode, pnode))

        if len(musttransfer) > 0:
            transfer_rrs = self.__schedule_imagetransfer_fifo(
                lease, musttransfer, earliest)

        if len(musttransfer) == 0 and len(piggybacking) == 0:
            is_ready = True

        return transfer_rrs, is_ready
コード例 #22
0
    def resume(self, action):
        for vnode in action.vnodes:
            # Unpack action
            vid = action.vnodes[vnode].enactment_info

            self.logger.debug(
                "Sending request to resume VM for L%iV%i (ONE: vid=%i)" %
                (action.lease_haizea_id, vnode, vid))

            try:
                self.rpc.vm_resume(vid)
                self.logger.debug("Request succesful.")
            except Exception, msg:
                raise OpenNebulaEnactmentError("vm.resume", msg)

            # Space out commands to avoid OpenNebula from getting saturated
            # TODO: We should spawn out a thread to do this, so Haizea isn't
            # blocking until all these commands end
            interval = get_config().get("enactment-overhead").seconds
            sleep(interval)
コード例 #23
0
    def __schedule_imagetransfer_fifo(self, lease, musttransfer, earliest):
        # Estimate image transfer time
        bandwidth = self.imagenode_bandwidth
        config = get_config()
        mechanism = config.get("transfer-mechanism")

        # The starting time is the first available slot, which was
        # included in the "earliest" dictionary.
        pnodes = musttransfer.values()
        start = earliest[pnodes[0]].transfer_start
        transfer_duration = self.__estimate_image_transfer_time(
            lease, bandwidth)

        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.
            id] = self.slottable.create_resource_tuple_from_capacity(
                resimgnode)
        for n in musttransfer.values():
            res[n] = self.slottable.create_resource_tuple_from_capacity(
                resnode)

        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.start = start
        if mechanism == constants.TRANSFER_UNICAST:
            newtransfer.end = start + (len(musttransfer) * transfer_duration)
        if mechanism == constants.TRANSFER_MULTICAST:
            newtransfer.end = start + transfer_duration

        newtransfer.deadline = None
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease.id, vnode, pnode)

        bisect.insort(self.transfers, newtransfer)

        return [newtransfer]
コード例 #24
0
ファイル: preemption.py プロジェクト: queba/haizea
    def get_lease_preemptability_score(self, preemptor, preemptee, time):
        """Computes the lease preemptability score
        
        See class documentation for details on what policy is implemented here.
        See documentation of PreemptabilityPolicy.get_lease_preemptability_score
        for more details on this function.
        
        Arguments:
        preemptor -- Preemptor lease
        preemptee -- Preemptee lease
        time -- Time at which preemption would take place
        """
        susptype = get_config().get("suspension")

        if preemptee.get_type() == Lease.DEADLINE:
            # We can only preempt leases in these states
            if not preemptee.get_state() in (
                Lease.STATE_SCHEDULED,
                Lease.STATE_READY,
                Lease.STATE_ACTIVE,
                Lease.STATE_SUSPENDING,
                Lease.STATE_SUSPENDED_PENDING,
                Lease.STATE_SUSPENDED_SCHEDULED,
            ):
                return -1

            deadline = preemptee.deadline
            if susptype == constants.SUSPENSION_NONE:
                remaining_duration = preemptee.duration.requested
                delay = preemptor.duration.requested
            else:
                remaining_duration = preemptee.get_remaining_duration_at(time)
                delay = (
                    preemptee.estimate_suspend_time() + preemptor.duration.requested + preemptee.estimate_resume_time()
                )
            if time + delay + remaining_duration < deadline:
                slack = (deadline - (time + delay)) / remaining_duration
                return slack
            else:
                return -1
コード例 #25
0
    def __schedule_deadline(self, lease, vmrr, earliest):
        config = get_config()
        reusealg = config.get("diskimage-reuse")
        avoidredundant = config.get("avoid-redundant-transfers")
        is_ready = False

        musttransfer = {}
        mustpool = {}
        nodeassignment = vmrr.nodes
        start = lease.start.requested
        end = lease.start.requested + lease.duration.requested
        for (vnode, pnode) in nodeassignment.items():
            lease_id = lease.id
            self.logger.debug(
                "Scheduling image transfer of '%s' for vnode %i to physnode %i"
                % (lease.software.image_id, vnode, pnode))

            if reusealg == constants.REUSE_IMAGECACHES:
                if self.resourcepool.exists_reusable_image(
                        pnode, lease.software.image_id, start):
                    self.logger.debug(
                        "No need to schedule an image transfer (reusing an image in pool)"
                    )
                    mustpool[vnode] = pnode
                else:
                    self.logger.debug("Need to schedule a transfer.")
                    musttransfer[vnode] = pnode
            else:
                self.logger.debug("Need to schedule a transfer.")
                musttransfer[vnode] = pnode

        if len(musttransfer) == 0:
            is_ready = True
        else:
            try:
                transfer_rrs = self.__schedule_imagetransfer_edf(
                    lease, musttransfer, earliest)
            except NotSchedulableException, exc:
                raise
コード例 #26
0
    def __schedule_imagetransfer_edf(self, lease, musttransfer, earliest):
        # Estimate image transfer time
        bandwidth = self.deployment_enact.get_bandwidth()
        config = get_config()
        mechanism = config.get("transfer-mechanism")
        transfer_duration = self.__estimate_image_transfer_time(
            lease, bandwidth)
        if mechanism == constants.TRANSFER_UNICAST:
            transfer_duration *= len(musttransfer)

        # Determine start time
        start = self.__get_last_transfer_slot(lease.start.requested,
                                              transfer_duration)

        res = {}
        resimgnode = Capacity([constants.RES_NETOUT])
        resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
        resnode = Capacity([constants.RES_NETIN])
        resnode.set_quantity(constants.RES_NETIN, bandwidth)
        res[self.imagenode.
            id] = self.slottable.create_resource_tuple_from_capacity(
                resimgnode)
        for pnode in musttransfer.values():
            res[pnode] = self.slottable.create_resource_tuple_from_capacity(
                resnode)

        newtransfer = FileTransferResourceReservation(lease, res)
        newtransfer.deadline = lease.start.requested
        newtransfer.state = ResourceReservation.STATE_SCHEDULED
        newtransfer.file = lease.software.image_id
        newtransfer.start = start
        newtransfer.end = start + transfer_duration
        for vnode, pnode in musttransfer.items():
            newtransfer.piggyback(lease.id, vnode, pnode)

        bisect.insort(self.transfers, newtransfer)

        return [newtransfer]
コード例 #27
0
ファイル: lease_scheduler.py プロジェクト: Hamdy/haizea
 def fail_lease(self, lease, exc=None):
     """Transitions a lease to a failed state, and does any necessary cleaning up
     
     Arguments:
     lease -- Lease to fail
     exc -- The exception that made the lease fail
     """
     treatment = get_config().get("lease-failure-handling")
     
     if treatment == constants.ONFAILURE_CANCEL:
         # In this case, a lease failure is handled by cancelling the lease,
         # but allowing Haizea to continue to run normally.
         rrs = lease.get_scheduled_reservations()
         for r in rrs:
             self.slottable.remove_reservation(r)
         lease.set_state(Lease.STATE_FAIL)
         self.completed_leases.add(lease)
         self.leases.remove(lease)
         get_persistence().persist_lease(lease)
     elif treatment == constants.ONFAILURE_EXIT or treatment == constants.ONFAILURE_EXIT_RAISE:
         # In this case, a lease failure makes Haizea exit. This is useful when debugging,
         # so we can immediately know about any errors.
         raise UnrecoverableError(exc)
コード例 #28
0
    def fail_lease(self, lease, exc=None):
        """Transitions a lease to a failed state, and does any necessary cleaning up
        
        Arguments:
        lease -- Lease to fail
        exc -- The exception that made the lease fail
        """
        treatment = get_config().get("lease-failure-handling")

        if treatment == constants.ONFAILURE_CANCEL:
            # In this case, a lease failure is handled by cancelling the lease,
            # but allowing Haizea to continue to run normally.
            rrs = lease.get_scheduled_reservations()
            for r in rrs:
                self.slottable.remove_reservation(r)
            lease.set_state(Lease.STATE_FAIL)
            self.completed_leases.add(lease)
            self.leases.remove(lease)
            get_persistence().persist_lease(lease)
        elif treatment == constants.ONFAILURE_EXIT or treatment == constants.ONFAILURE_EXIT_RAISE:
            # In this case, a lease failure makes Haizea exit. This is useful when debugging,
            # so we can immediately know about any errors.
            raise UnrecoverableError(exc)
コード例 #29
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
 def __schedule_imagetransfer_fifo(self, lease, musttransfer, earliest):
     # Estimate image transfer time 
     bandwidth = self.imagenode_bandwidth
     config = get_config()
     mechanism = config.get("transfer-mechanism")
     
     # The starting time is the first available slot, which was
     # included in the "earliest" dictionary.
     pnodes = musttransfer.values()
     start = earliest[pnodes[0]].transfer_start
     transfer_duration = self.__estimate_image_transfer_time(lease, bandwidth)
     
     res = {}
     resimgnode = Capacity([constants.RES_NETOUT])
     resimgnode.set_quantity(constants.RES_NETOUT, bandwidth)
     resnode = Capacity([constants.RES_NETIN])
     resnode.set_quantity(constants.RES_NETIN, bandwidth)
     res[self.imagenode.id] = self.slottable.create_resource_tuple_from_capacity(resimgnode)
     for n in musttransfer.values():
         res[n] = self.slottable.create_resource_tuple_from_capacity(resnode)
      
     newtransfer = FileTransferResourceReservation(lease, res)
     newtransfer.start = start
     if mechanism == constants.TRANSFER_UNICAST:
         newtransfer.end = start + (len(musttransfer) * transfer_duration)
     if mechanism == constants.TRANSFER_MULTICAST:
         newtransfer.end = start + transfer_duration
     
     newtransfer.deadline = None
     newtransfer.state = ResourceReservation.STATE_SCHEDULED
     newtransfer.file = lease.software.image_id
     for vnode, pnode in musttransfer.items():
         newtransfer.piggyback(lease, vnode, pnode)
         
     bisect.insort(self.transfers, newtransfer)
     
     return [newtransfer]
コード例 #30
0
ファイル: imagetransfer.py プロジェクト: queba/haizea
    def schedule_migration(self, lease, vmrr, nexttime):
        if type(lease.software) == UnmanagedSoftwareEnvironment:
            return []
        
        # This code is the same as the one in vm_scheduler
        # Should be factored out
 
        last_vmrr = lease.get_last_vmrr()

        vnode_mappings = self.resourcepool.get_disk_image_mappings(lease)
        vnode_migrations = dict([(vnode, (vnode_mappings[vnode], vmrr.nodes[vnode])) for vnode in vmrr.nodes if vnode_mappings[vnode] != vmrr.nodes[vnode]])
        
        mustmigrate = False
        for vnode in vnode_migrations:
            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
                mustmigrate = True
                break
            
        if not mustmigrate:
            return []

        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
            start = nexttime
            end = nexttime
            res = {}
            migr_rr = DiskImageMigrationResourceReservation(lease, start, end, res, vmrr, vnode_migrations)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            return [migr_rr]

        # Figure out what migrations can be done simultaneously
        migrations = []
        while len(vnode_migrations) > 0:
            pnodes = set()
            migration = {}
            for vnode in vnode_migrations:
                origin = vnode_migrations[vnode][0]
                dest = vnode_migrations[vnode][1]
                if not origin in pnodes and not dest in pnodes:
                    migration[vnode] = vnode_migrations[vnode]
                    pnodes.add(origin)
                    pnodes.add(dest)
            for vnode in migration:
                del vnode_migrations[vnode]
            migrations.append(migration)
        
        # Create migration RRs
        start = max(last_vmrr.post_rrs[-1].end, nexttime)
        bandwidth = self.resourcepool.info.get_migration_bandwidth()
        migr_rrs = []
        for m in migrations:
            mb_per_physnode = {}
            for vnode, (pnode_from, pnode_to) in m.items():
                mb_per_physnode[pnode_from] = mb_per_physnode.setdefault(pnode_from, 0) + lease.software.image_size
            max_mb_to_migrate = max(mb_per_physnode.values())
            migr_time = estimate_transfer_time(max_mb_to_migrate, bandwidth)
            end = start + migr_time
            res = {}
            for (origin,dest) in m.values():
                resorigin = Capacity([constants.RES_NETOUT])
                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
                resdest = Capacity([constants.RES_NETIN])
                resdest.set_quantity(constants.RES_NETIN, bandwidth)
                res[origin] = self.slottable.create_resource_tuple_from_capacity(resorigin)
                res[dest] = self.slottable.create_resource_tuple_from_capacity(resdest)                
            migr_rr = DiskImageMigrationResourceReservation(lease, start, start + migr_time, res, vmrr, m)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            migr_rrs.append(migr_rr)
            start = end
        
        return migr_rrs
コード例 #31
0
    def schedule_migration(self, lease, vmrr, nexttime):
        if type(lease.software) == UnmanagedSoftwareEnvironment:
            return []

        # This code is the same as the one in vm_scheduler
        # Should be factored out
        last_vmrr = lease.get_last_vmrr()
        vnode_migrations = dict([(vnode, (last_vmrr.nodes[vnode],
                                          vmrr.nodes[vnode]))
                                 for vnode in vmrr.nodes])

        mustmigrate = False
        for vnode in vnode_migrations:
            if vnode_migrations[vnode][0] != vnode_migrations[vnode][1]:
                mustmigrate = True
                break

        if not mustmigrate:
            return []

        if get_config().get("migration") == constants.MIGRATE_YES_NOTRANSFER:
            start = nexttime
            end = nexttime
            res = {}
            migr_rr = DiskImageMigrationResourceReservation(
                lease, start, end, res, vmrr, vnode_migrations)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            return [migr_rr]

        # Figure out what migrations can be done simultaneously
        migrations = []
        while len(vnode_migrations) > 0:
            pnodes = set()
            migration = {}
            for vnode in vnode_migrations:
                origin = vnode_migrations[vnode][0]
                dest = vnode_migrations[vnode][1]
                if not origin in pnodes and not dest in pnodes:
                    migration[vnode] = vnode_migrations[vnode]
                    pnodes.add(origin)
                    pnodes.add(dest)
            for vnode in migration:
                del vnode_migrations[vnode]
            migrations.append(migration)

        # Create migration RRs
        start = max(last_vmrr.post_rrs[-1].end, nexttime)
        bandwidth = self.resourcepool.info.get_migration_bandwidth()
        migr_rrs = []
        for m in migrations:
            mb_to_migrate = lease.software.image_size * len(m.keys())
            migr_time = estimate_transfer_time(mb_to_migrate, bandwidth)
            end = start + migr_time
            res = {}
            for (origin, dest) in m.values():
                resorigin = Capacity([constants.RES_NETOUT])
                resorigin.set_quantity(constants.RES_NETOUT, bandwidth)
                resdest = Capacity([constants.RES_NETIN])
                resdest.set_quantity(constants.RES_NETIN, bandwidth)
                res[origin] = self.slottable.create_resource_tuple_from_capacity(
                    resorigin)
                res[dest] = self.slottable.create_resource_tuple_from_capacity(
                    resdest)
            migr_rr = DiskImageMigrationResourceReservation(
                lease, start, start + migr_time, res, vmrr, m)
            migr_rr.state = ResourceReservation.STATE_SCHEDULED
            migr_rrs.append(migr_rr)
            start = end

        return migr_rrs
コード例 #32
0
ファイル: mapper.py プロジェクト: queba/haizea
    def __preempt_lease_deadline(self, lease_to_preempt, preemption_start_time, preemption_end_time, nexttime):
        self.logger.debug("Attempting to preempt lease %i" % lease_to_preempt.id)
        self.slottable.push_state([lease_to_preempt])  
         
        feasible = True
        cancelled = []
        new_state = {}
        durs = {}

        preempt_vmrr = lease_to_preempt.get_vmrr_at(preemption_start_time)
        
        susptype = get_config().get("suspension")
        
        cancel = False
        
        if susptype == constants.SUSPENSION_NONE:
            self.logger.debug("Lease %i will be cancelled because suspension is not supported." % lease_to_preempt.id)
            cancel = True
        else:
            if preempt_vmrr == None:
                self.logger.debug("Lease %i was set to start in the middle of the preempting lease." % lease_to_preempt.id)
                cancel = True
            else:
                can_suspend = self.vm_scheduler.can_suspend_at(lease_to_preempt, preemption_start_time, nexttime)
                
                if not can_suspend:
                    self.logger.debug("Suspending lease %i does not meet scheduling threshold." % lease_to_preempt.id)
                    cancel = True
                else:
                    self.logger.debug("Lease %i will be suspended." % lease_to_preempt.id)
                    
        after_vmrrs = lease_to_preempt.get_vmrr_after(preemption_start_time)

        if not cancel:
            # Preempting
            durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_start_time)             
            self.vm_scheduler.preempt_vm(preempt_vmrr, min(preemption_start_time,preempt_vmrr.end))
            susp_time = preempt_vmrr.post_rrs[-1].end - preempt_vmrr.post_rrs[0].start
            durs[lease_to_preempt] += susp_time
                                    
        else:                                
            cancelled.append(lease_to_preempt.id)

            if preempt_vmrr != None:
                durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preempt_vmrr.start)             
                
                lease_to_preempt.remove_vmrr(preempt_vmrr)
                self.vm_scheduler.cancel_vm(preempt_vmrr)

                # Cancel future VMs
                for after_vmrr in after_vmrrs:
                    lease_to_preempt.remove_vmrr(after_vmrr)
                    self.vm_scheduler.cancel_vm(after_vmrr)                   
                after_vmrrs=[]
                if preempt_vmrr.state == ResourceReservation.STATE_ACTIVE:
                    last_vmrr = lease_to_preempt.get_last_vmrr()
                    if last_vmrr != None and last_vmrr.is_suspending():
                        new_state[lease_to_preempt] = Lease.STATE_SUSPENDED_SCHEDULED
                    else:
                        # The VMRR we're preempting is the active one
                        new_state[lease_to_preempt] = Lease.STATE_READY
            else:
                durs[lease_to_preempt] = lease_to_preempt.get_remaining_duration_at(preemption_start_time)             
                lease_state = lease_to_preempt.get_state()
                if lease_state == Lease.STATE_ACTIVE:
                    # Don't do anything. The lease is active, but not in the VMs
                    # we're preempting.
                    new_state[lease_to_preempt] = None
                elif lease_state in (Lease.STATE_SUSPENDING, Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_SCHEDULED):
                    # Don't do anything. The lease is suspending or suspended. 
                    # Must stay that way.
                    new_state[lease_to_preempt] = None
                elif lease_state != Lease.STATE_READY:
                    new_state[lease_to_preempt] = Lease.STATE_READY   
                    
        # Cancel future VMs
        for after_vmrr in after_vmrrs:
            lease_to_preempt.remove_vmrr(after_vmrr)
            self.vm_scheduler.cancel_vm(after_vmrr)                   

        dur = durs[lease_to_preempt]
        node_ids = self.slottable.nodes.keys()
        earliest = {}
   
        try:
            if lease_to_preempt.id in cancelled:
                last_vmrr = lease_to_preempt.get_last_vmrr()
                if last_vmrr != None and last_vmrr.is_suspending():
                    override_state = Lease.STATE_SUSPENDED_PENDING
                else:
                    override_state = None
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(preemption_end_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
                (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state)
            else:
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(preemption_end_time, EarliestStartingTime.EARLIEST_NOPREPARATION)                
                (new_vmrr, preemptions) = self.vm_scheduler.reschedule_deadline(lease_to_preempt, dur, nexttime, earliest, override_state = Lease.STATE_SUSPENDED_PENDING)

            # Add VMRR to lease
            lease_to_preempt.append_vmrr(new_vmrr)
            
    
            # Add resource reservations to slottable
            
            # Pre-VM RRs (if any)
            for rr in new_vmrr.pre_rrs:
                self.slottable.add_reservation(rr)
                
            # VM
            self.slottable.add_reservation(new_vmrr)
            
            # Post-VM RRs (if any)
            for rr in new_vmrr.post_rrs:
                self.slottable.add_reservation(rr)                    
        except NotSchedulableException:
            feasible = False

        if not feasible:
            self.logger.debug("Unable to preempt lease %i" % lease_to_preempt.id)
            self.slottable.pop_state()
            raise NotSchedulableException, "Unable to preempt leases to make room for lease."
        else:
            self.logger.debug("Was able to preempt lease %i" % lease_to_preempt.id)
            self.slottable.pop_state(discard = True)

            for l in new_state:
                if new_state[l] != None:
                    l.state_machine.state = new_state[l]

            self.logger.vdebug("Lease %i after preemption:" % lease_to_preempt.id)
            lease_to_preempt.print_contents()                         
コード例 #33
0
ファイル: simulated.py プロジェクト: queba/haizea
 def get_migration_bandwidth(self):
     return get_config().get("imagetransfer-bandwidth")
コード例 #34
0
    def __preempt_lease(self, lease, preemption_time):
        """ Preempts a lease.
        
        This method preempts a lease such that any resources allocated
        to that lease after a given time are freed up. This may require
        scheduling the lease to suspend before that time, or cancelling
        the lease altogether.
        
        Arguments:
        lease -- Lease to schedule.
        preemption_time -- Time at which lease must be preempted
        """

        self.logger.info("Preempting lease #%i..." % (lease.id))
        self.logger.vdebug("Lease before preemption:")
        lease.print_contents()
        vmrr = lease.get_last_vmrr()

        if vmrr.state == ResourceReservation.STATE_SCHEDULED and vmrr.start >= preemption_time:
            self.logger.debug(
                "Lease was set to start in the middle of the preempting lease."
            )
            must_cancel_and_requeue = True
        else:
            susptype = get_config().get("suspension")
            if susptype == constants.SUSPENSION_NONE:
                must_cancel_and_requeue = True
            else:
                can_suspend = self.vm_scheduler.can_suspend_at(
                    lease, preemption_time)
                if not can_suspend:
                    self.logger.debug(
                        "Suspending the lease does not meet scheduling threshold."
                    )
                    must_cancel_and_requeue = True
                else:
                    if lease.numnodes > 1 and susptype == constants.SUSPENSION_SERIAL:
                        self.logger.debug(
                            "Can't suspend lease because only suspension of single-node leases is allowed."
                        )
                        must_cancel_and_requeue = True
                    else:
                        self.logger.debug("Lease can be suspended")
                        must_cancel_and_requeue = False

        if must_cancel_and_requeue:
            self.logger.info("... lease #%i has been cancelled and requeued." %
                             lease.id)
            self.preparation_scheduler.cancel_preparation(lease)
            self.vm_scheduler.cancel_vm(vmrr)
            lease.remove_vmrr(vmrr)
            # TODO: Take into account other states
            if lease.get_state() == Lease.STATE_SUSPENDED_SCHEDULED:
                lease.set_state(Lease.STATE_SUSPENDED_QUEUED)
            else:
                lease.set_state(Lease.STATE_QUEUED)
            self.__enqueue_in_order(lease)
        else:
            self.logger.info("... lease #%i will be suspended at %s." %
                             (lease.id, preemption_time))
            self.vm_scheduler.preempt_vm(vmrr, preemption_time)

        get_persistence().persist_lease(lease)

        self.logger.vdebug("Lease after preemption:")
        lease.print_contents()
コード例 #35
0
    def __schedule_lease(self, lease, nexttime):
        """ Schedules a lease.
        
        This method orchestrates the preparation and VM scheduler to
        schedule a lease.
        
        Arguments:
        lease -- Lease to schedule.
        nexttime -- The next time at which the scheduler can allocate resources.
        """

        lease_state = lease.get_state()
        migration = get_config().get("migration")

        # Determine earliest start time in each node
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            # This lease might require preparation. Ask the preparation
            # scheduler for the earliest starting time.
            earliest = self.preparation_scheduler.find_earliest_starting_times(
                lease, nexttime)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            # This lease may have to be migrated.
            # We have to ask both the preparation scheduler and the VM
            # scheduler what would be the earliest possible starting time
            # on each node, assuming we have to transfer files between
            # nodes.

            node_ids = self.slottable.nodes.keys()
            earliest = {}
            if migration == constants.MIGRATE_NO:
                # If migration is disabled, the earliest starting time
                # is simply nexttime.
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(
                        nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
            else:
                # Otherwise, we ask the preparation scheduler and the VM
                # scheduler how long it would take them to migrate the
                # lease state.
                prep_migr_time = self.preparation_scheduler.estimate_migration_time(
                    lease)
                vm_migr_time = self.vm_scheduler.estimate_migration_time(lease)
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(
                        nexttime + prep_migr_time + vm_migr_time,
                        EarliestStartingTime.EARLIEST_MIGRATION)
        else:
            raise InconsistentLeaseStateError(
                lease, doing="scheduling a best-effort lease")

        # Now, we give the lease to the VM scheduler, along with the
        # earliest possible starting times. If the VM scheduler can
        # schedule VMs for this lease, it will return a resource reservation
        # that we can add to the slot table, along with a list of
        # leases that have to be preempted.
        # If the VM scheduler can't schedule the VMs, it will throw an
        # exception (we don't catch it here, and it is just thrown up
        # to the calling method.
        (vmrr,
         preemptions) = self.vm_scheduler.schedule(lease, nexttime, earliest)
        if vmrr == None and preemptions == None:
            return

        # If scheduling the lease involves preempting other leases,
        # go ahead and preempt them.
        if len(preemptions) > 0:
            self.logger.info(
                "Must preempt leases %s to make room for lease #%i" %
                ([l.id for l in preemptions], lease.id))
            for l in preemptions:
                self.__preempt_lease(l, preemption_time=vmrr.start)
                l.strv_counter = l.strv_counter + 1

        # Schedule lease preparation
        is_ready = False
        preparation_rrs = []
        if lease_state in (Lease.STATE_SUSPENDED_PENDING,
                           Lease.STATE_SUSPENDED_QUEUED
                           ) and migration != constants.MIGRATE_NO:
            # The lease might require migration
            migr_rrs = self.preparation_scheduler.schedule_migration(
                lease, vmrr, nexttime)
            if len(migr_rrs) > 0:
                end_migr = migr_rrs[-1].end
            else:
                end_migr = nexttime
            migr_rrs += self.vm_scheduler.schedule_migration(
                lease, vmrr, end_migr)
            migr_rrs.reverse()
            for migr_rr in migr_rrs:
                vmrr.pre_rrs.insert(0, migr_rr)
            if len(migr_rrs) == 0:
                is_ready = True
        elif lease_state in (Lease.STATE_SUSPENDED_PENDING,
                             Lease.STATE_SUSPENDED_QUEUED
                             ) and migration == constants.MIGRATE_NO:
            # No migration means the lease is ready
            is_ready = True
        elif lease_state in (Lease.STATE_PENDING, Lease.STATE_QUEUED):
            # The lease might require initial preparation
            preparation_rrs, is_ready = self.preparation_scheduler.schedule(
                lease, vmrr, earliest)

        # At this point, the lease is feasible.
        # Commit changes by adding RRs to lease and to slot table

        # Add preparation RRs (if any) to lease
        for rr in preparation_rrs:
            lease.append_preparationrr(rr)

        # Add VMRR to lease
        lease.append_vmrr(vmrr)

        # Add resource reservations to slottable

        # Preparation RRs (if any)
        for rr in preparation_rrs:
            self.slottable.add_reservation(rr)

        # Pre-VM RRs (if any)
        for rr in vmrr.pre_rrs:
            self.slottable.add_reservation(rr)

        # VM
        self.slottable.add_reservation(vmrr)

        # Post-VM RRs (if any)
        for rr in vmrr.post_rrs:
            self.slottable.add_reservation(rr)

        # Change lease state
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            lease.set_state(Lease.STATE_SCHEDULED)
            if is_ready:
                lease.set_state(Lease.STATE_READY)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)

        get_persistence().persist_lease(lease)

        lease.print_contents()
コード例 #36
0
ファイル: lease_scheduler.py プロジェクト: queba/haizea
    def __schedule_lease(self, lease, nexttime):            
        """ Schedules a lease.
        
        This method orchestrates the preparation and VM scheduler to
        schedule a lease.
        
        Arguments:
        lease -- Lease to schedule.
        nexttime -- The next time at which the scheduler can allocate resources.
        """       
                
        lease_state = lease.get_state()
        migration = get_config().get("migration")
        
        # Determine earliest start time in each node
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            # This lease might require preparation. Ask the preparation
            # scheduler for the earliest starting time.
            earliest = self.preparation_scheduler.find_earliest_starting_times(lease, nexttime)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            # This lease may have to be migrated.
            # We have to ask both the preparation scheduler and the VM
            # scheduler what would be the earliest possible starting time
            # on each node, assuming we have to transfer files between
            # nodes.

            node_ids = self.slottable.nodes.keys()
            earliest = {}
            if migration == constants.MIGRATE_NO:
                # If migration is disabled, the earliest starting time
                # is simply nexttime.
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
            else:
                # Otherwise, we ask the preparation scheduler and the VM
                # scheduler how long it would take them to migrate the
                # lease state.
                prep_migr_time = self.preparation_scheduler.estimate_migration_time(lease)            
                vm_migr_time = self.vm_scheduler.estimate_migration_time(lease)
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(nexttime + prep_migr_time + vm_migr_time, EarliestStartingTime.EARLIEST_MIGRATION)
        else:
            raise InconsistentLeaseStateError(lease, doing = "scheduling a best-effort lease")

        # Now, we give the lease to the VM scheduler, along with the
        # earliest possible starting times. If the VM scheduler can
        # schedule VMs for this lease, it will return a resource reservation
        # that we can add to the slot table, along with a list of
        # leases that have to be preempted.
        # If the VM scheduler can't schedule the VMs, it will throw an
        # exception (we don't catch it here, and it is just thrown up
        # to the calling method.
        (vmrr, preemptions) = self.vm_scheduler.schedule(lease, lease.duration.get_remaining_duration(), nexttime, earliest)
        
        ## BEGIN NOT-FIT-FOR-PRODUCTION CODE
        ## Pricing shouldn't live here. Instead, it should happen before a lease is accepted
        ## It is being done here in the interest of developing a first prototype
        ## that incorporates pricing in simulations (but not interactively yet)
        
        # Call pricing policy
        lease_price = get_policy().price_lease(lease, preemptions)
        
        # Determine whether to accept price or not (this in particular
        # should happen in the lease admission step)
        if lease.extras.has_key("simul_userrate"):
            user_rate = float(lease.extras["simul_userrate"])
            if get_config().get("policy.pricing") != "free":
                user_price = get_policy().pricing.get_base_price(lease, user_rate)
                # We want to record the rate at which the lease was priced
                lease.extras["rate"] = get_policy().pricing.rate
                if lease_price > user_price:
                    lease.price = -1
                    lease.extras["rejected_price"] = lease_price
                    raise NotSchedulableException, "Lease priced at %.2f. User is only willing to pay %.2f" % (lease_price, user_price)
        
        lease.price = lease_price
        ## END NOT-FIT-FOR-PRODUCTION CODE
                                
        # Schedule lease preparation
        is_ready = False
        preparation_rrs = []
        if lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration != constants.MIGRATE_NO:
            # The lease might require migration
            migr_rrs = self.preparation_scheduler.schedule_migration(lease, vmrr, nexttime)
            if len(migr_rrs) > 0:
                end_migr = migr_rrs[-1].end
            else:
                end_migr = nexttime
            migr_rrs += self.vm_scheduler.schedule_migration(lease, vmrr, end_migr)
            migr_rrs.reverse()
            for migr_rr in migr_rrs:
                vmrr.pre_rrs.insert(0, migr_rr)
            if len(migr_rrs) == 0:
                is_ready = True
        elif lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration == constants.MIGRATE_NO:
            # No migration means the lease is ready
            is_ready = True
        elif lease_state in (Lease.STATE_PENDING, Lease.STATE_QUEUED):
            # The lease might require initial preparation
            preparation_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, earliest, nexttime)

        # If scheduling the lease involves preempting other leases,
        # go ahead and preempt them.
        if len(preemptions) > 0:
            self.logger.info("Must preempt leases %s to make room for lease #%i" % ([l.id for l in preemptions], lease.id))
            for l in preemptions:
                self.__preempt_lease(l, preemption_time=vmrr.start)

        # At this point, the lease is feasible.
        # Commit changes by adding RRs to lease and to slot table
        
        # Add preparation RRs (if any) to lease
        for rr in preparation_rrs:
            lease.append_preparationrr(rr)
        
        # Add VMRR to lease
        lease.append_vmrr(vmrr)
        

        # Add resource reservations to slottable
        
        # Preparation RRs (if any)
        for rr in preparation_rrs:
            self.slottable.add_reservation(rr)
        
        # Pre-VM RRs (if any)
        for rr in vmrr.pre_rrs:
            self.slottable.add_reservation(rr)
            
        # VM
        self.slottable.add_reservation(vmrr)
        
        # Post-VM RRs (if any)
        for rr in vmrr.post_rrs:
            self.slottable.add_reservation(rr)
          
        # Change lease state
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            lease.set_state(Lease.STATE_SCHEDULED)
            if is_ready:
                lease.set_state(Lease.STATE_READY)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)

        get_persistence().persist_lease(lease)

        lease.print_contents()
コード例 #37
0
ファイル: lease_scheduler.py プロジェクト: Hamdy/haizea
    def __schedule_lease(self, lease, nexttime):            
        """ Schedules a lease.
        
        This method orchestrates the preparation and VM scheduler to
        schedule a lease.
        
        Arguments:
        lease -- Lease to schedule.
        nexttime -- The next time at which the scheduler can allocate resources.
        """       
                
        lease_state = lease.get_state()
        migration = get_config().get("migration")
        
        # Determine earliest start time in each node
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            # This lease might require preparation. Ask the preparation
            # scheduler for the earliest starting time.
            earliest = self.preparation_scheduler.find_earliest_starting_times(lease, nexttime)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            # This lease may have to be migrated.
            # We have to ask both the preparation scheduler and the VM
            # scheduler what would be the earliest possible starting time
            # on each node, assuming we have to transfer files between
            # nodes.

            node_ids = self.slottable.nodes.keys()
            earliest = {}
            if migration == constants.MIGRATE_NO:
                # If migration is disabled, the earliest starting time
                # is simply nexttime.
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(nexttime, EarliestStartingTime.EARLIEST_NOPREPARATION)
            else:
                # Otherwise, we ask the preparation scheduler and the VM
                # scheduler how long it would take them to migrate the
                # lease state.
                prep_migr_time = self.preparation_scheduler.estimate_migration_time(lease)            
                vm_migr_time = self.vm_scheduler.estimate_migration_time(lease)
                for node in node_ids:
                    earliest[node] = EarliestStartingTime(nexttime + prep_migr_time + vm_migr_time, EarliestStartingTime.EARLIEST_MIGRATION)
        else:
            raise InconsistentLeaseStateError(lease, doing = "scheduling a best-effort lease")

        # Now, we give the lease to the VM scheduler, along with the
        # earliest possible starting times. If the VM scheduler can
        # schedule VMs for this lease, it will return a resource reservation
        # that we can add to the slot table, along with a list of
        # leases that have to be preempted.
        # If the VM scheduler can't schedule the VMs, it will throw an
        # exception (we don't catch it here, and it is just thrown up
        # to the calling method.
        (vmrr, preemptions) = self.vm_scheduler.schedule(lease, nexttime, earliest)
                                
        # If scheduling the lease involves preempting other leases,
        # go ahead and preempt them.
        if len(preemptions) > 0:
            self.logger.info("Must preempt leases %s to make room for lease #%i" % ([l.id for l in preemptions], lease.id))
            for l in preemptions:
                self.__preempt_lease(l, preemption_time=vmrr.start)
                
        # Schedule lease preparation
        is_ready = False
        preparation_rrs = []
        if lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration != constants.MIGRATE_NO:
            # The lease might require migration
            migr_rrs = self.preparation_scheduler.schedule_migration(lease, vmrr, nexttime)
            if len(migr_rrs) > 0:
                end_migr = migr_rrs[-1].end
            else:
                end_migr = nexttime
            migr_rrs += self.vm_scheduler.schedule_migration(lease, vmrr, end_migr)
            migr_rrs.reverse()
            for migr_rr in migr_rrs:
                vmrr.pre_rrs.insert(0, migr_rr)
            if len(migr_rrs) == 0:
                is_ready = True
        elif lease_state in (Lease.STATE_SUSPENDED_PENDING, Lease.STATE_SUSPENDED_QUEUED) and migration == constants.MIGRATE_NO:
            # No migration means the lease is ready
            is_ready = True
        elif lease_state in (Lease.STATE_PENDING, Lease.STATE_QUEUED):
            # The lease might require initial preparation
            preparation_rrs, is_ready = self.preparation_scheduler.schedule(lease, vmrr, earliest)

        # At this point, the lease is feasible.
        # Commit changes by adding RRs to lease and to slot table
        
        # Add preparation RRs (if any) to lease
        for rr in preparation_rrs:
            lease.append_preparationrr(rr)
        
        # Add VMRR to lease
        lease.append_vmrr(vmrr)
        

        # Add resource reservations to slottable
        
        # Preparation RRs (if any)
        for rr in preparation_rrs:
            self.slottable.add_reservation(rr)
        
        # Pre-VM RRs (if any)
        for rr in vmrr.pre_rrs:
            self.slottable.add_reservation(rr)
            
        # VM
        self.slottable.add_reservation(vmrr)
        
        # Post-VM RRs (if any)
        for rr in vmrr.post_rrs:
            self.slottable.add_reservation(rr)
          
        # Change lease state
        if lease_state == Lease.STATE_PENDING or lease_state == Lease.STATE_QUEUED:
            lease.set_state(Lease.STATE_SCHEDULED)
            if is_ready:
                lease.set_state(Lease.STATE_READY)
        elif lease_state == Lease.STATE_SUSPENDED_PENDING or lease_state == Lease.STATE_SUSPENDED_QUEUED:
            lease.set_state(Lease.STATE_SUSPENDED_SCHEDULED)

        get_persistence().persist_lease(lease)

        lease.print_contents()
コード例 #38
0
    def _add_diskimages(self, pnode_id, diskimage_id, diskimage_size, vnodes,
                        timeout):
        self.logger.debug("Adding image for leases=%s in nod_id=%i" %
                          (vnodes, pnode_id))

        config = get_config()
        reusealg = config.get("diskimage-reuse")
        if reusealg == constants.REUSE_IMAGECACHES:
            maxcachesize = config.get("diskimage-cache-size")
        else:
            maxcachesize = None

        pnode = self.resourcepool.get_node(pnode_id)

        if reusealg == constants.REUSE_NONE:
            for (lease_id, vnode) in vnodes:
                self.resourcepool.add_diskimage(pnode_id, diskimage_id,
                                                diskimage_size, lease_id,
                                                vnode)
        elif reusealg == constants.REUSE_IMAGECACHES:
            # Sometimes we might find that the image is already deployed
            # (although unused). In that case, don't add another copy to
            # the pool. Just "reactivate" it.
            if pnode.exists_reusable_image(diskimage_id):
                for (lease_id, vnode) in vnodes:
                    pnode.add_mapping_to_existing_reusable_image(
                        diskimage_id, lease_id, vnode, timeout)
            else:
                if maxcachesize == constants.CACHESIZE_UNLIMITED:
                    can_add_to_cache = True
                else:
                    # We may have to remove images from the cache
                    cachesize = pnode.get_reusable_images_size()
                    reqsize = cachesize + diskimage_size
                    if reqsize > maxcachesize:
                        # Have to shrink cache
                        desiredsize = maxcachesize - diskimage_size
                        self.logger.debug(
                            "Adding the image would make the size of pool in node %i = %iMB. Will try to bring it down to %i"
                            % (pnode_id, reqsize, desiredsize))
                        pnode.print_files()
                        success = pnode.purge_downto(maxcachesize)
                        if not success:
                            can_add_to_cache = False
                        else:
                            can_add_to_cache = True
                    else:
                        can_add_to_cache = True

                if can_add_to_cache:
                    self.resourcepool.add_reusable_image(
                        pnode_id, diskimage_id, diskimage_size, vnodes,
                        timeout)
                else:
                    # This just means we couldn't add the image
                    # to the pool. We will have to make do with just adding the tainted images.
                    self.logger.debug(
                        "Unable to add to pool. Must create individual disk images directly instead."
                    )

            # Besides adding the image to the cache, we need to create a separate image for
            # this specific lease
            for (lease_id, vnode) in vnodes:
                self.resourcepool.add_diskimage(pnode_id, diskimage_id,
                                                diskimage_size, lease_id,
                                                vnode)

        pnode.print_files()