def notify_event(self, lease, event):
        """Notifies an event that affects a lease.
        
        This is the entry point of asynchronous events into the scheduler. Currently,
        the only supported event is the premature end of a VM (i.e., before its
        scheduled end). Other events will emerge when we integrate Haizea with OpenNebula 1.4,
        since that version will support sending asynchronous events to Haizea.
        
        Arguments:
        lease -- Lease the event refers to
        event -- Event type
        """
        time = get_clock().get_time()
        if event == constants.EVENT_END_VM:
            vmrr = lease.get_last_vmrr()
            self._handle_end_rr(vmrr)
            # TODO: Exception handling
            self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)
            self._handle_end_lease(lease)
            get_persistence().persist_lease(lease)

            # We need to reevaluate the schedule to see if there are any
            # leases scheduled in the future that could be rescheduled
            # to start earlier
            nexttime = get_clock().get_next_schedulable_time()
            self.reevaluate_schedule(nexttime)
Beispiel #2
0
 def notify_event(self, lease, event):
     """Notifies an event that affects a lease.
     
     This is the entry point of asynchronous events into the scheduler. Currently,
     the only supported event is the premature end of a VM (i.e., before its
     scheduled end). Other events will emerge when we integrate Haizea with OpenNebula 1.4,
     since that version will support sending asynchronous events to Haizea.
     
     Arguments:
     lease -- Lease the event refers to
     event -- Event type
     """
     time = get_clock().get_time()
     if event == constants.EVENT_END_VM:
         vmrr = lease.get_last_vmrr()
         self._handle_end_rr(vmrr)
         # TODO: Exception handling
         self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)
         self._handle_end_lease(lease)
         get_persistence().persist_lease(lease)
         
         # We need to reevaluate the schedule to see if there are any 
         # leases scheduled in the future that could be rescheduled
         # to start earlier
         nexttime = get_clock().get_next_schedulable_time()
         self.reevaluate_schedule(nexttime)
Beispiel #3
0
    def at_timestep(self, lease_scheduler):
        """See AccountingProbe.at_timestep"""
        now = get_clock().get_time()
        hour1 = self.diff1(now)
        print "NOW cntr =", self.cntr
        if (hour1):
            print "Calculated"
            self.throughput1 = self.cntr

        now = get_clock().get_time()
        hour2 = self.diff2(now)
        print "NOW cntr =", self.cntr
        if (hour2):
            print "Calculated"
            self.throughput2 = self.cntr
    def at_timestep(self, lease_scheduler):
        """See AccountingProbe.at_timestep"""  
	now = get_clock().get_time()
	hour1 = self.diff1 (now)
	print "NOW cntr =", self.cntr
	if ( hour1 ):
		print "Calculated"
		self.throughput1 =self.cntr 

	now = get_clock().get_time()
	hour2 = self.diff2 (now)
	print "NOW cntr =", self.cntr 
	if ( hour2 ):
		print "Calculated"
		self.throughput2 =self.cntr 
Beispiel #5
0
    def _handle_end_lease(self, l):
        """Performs actions that have to be done each time a lease ends.
        
        Arguments:
        lease -- Lease that has ended
        """
        l.set_state(Lease.STATE_DONE)
        l.duration.actual = l.duration.accumulated
        l.end = round_datetime(get_clock().get_time())
        if get_config().get("sanity-check"):
            if l.duration.known != None and l.duration.known < l.duration.requested:
                duration = l.duration.known
            else:
                duration = l.duration.requested
                
            assert duration == l.duration.actual

            if l.start.is_requested_exact():
                assert l.vm_rrs[0].start >= l.start.requested
            if l.deadline != None:
                assert l.end <= l.deadline

        self.preparation_scheduler.cleanup(l)
        self.completed_leases.add(l)
        self.leases.remove(l)
        self.accounting.at_lease_done(l)
    def stop(self):
        """Stop collecting data
        
        """
        time = get_clock().get_time()

        # Stop the counters
        for counter_id in self.__data.counters:
            self.append_to_counter(counter_id,
                                   self.__data.counters[counter_id][-1][2])

        # Add the averages
        for counter_id in self.__data.counters:
            l = self.__normalize_times(self.__data.counters[counter_id])
            avgtype = self.__data.counter_avg_type[counter_id]
            if avgtype == AccountingDataCollection.AVERAGE_NONE:
                self.__data.counters[counter_id] = self.__add_no_average(l)
            elif avgtype == AccountingDataCollection.AVERAGE_NORMAL:
                self.__data.counters[counter_id] = self.__add_average(l)
            elif avgtype == AccountingDataCollection.AVERAGE_TIMEWEIGHTED:
                self.__data.counters[
                    counter_id] = self.__add_timeweighted_average(l)

        e = Experiment()
        self.__db.add(e)
        self.__db.commit()
        e.description = "Experiment %s" % str(e.id)
        self.__db.commit()

        for probe in self.__probes:
            probe.finalize_accounting(self.__db)
Beispiel #7
0
 def at_timestep(self, lease_scheduler):
     """See AccountingProbe.at_timestep"""
     util = lease_scheduler.vm_scheduler.get_utilization(get_clock().get_time())
     self.utilization  = sum([v for k,v in util.items() if k != None])
     self.total_utilization += self.utilization 
     self.count +=1
     self.util_list.append(self.utilization)
    def stop(self):
        """Stop collecting data
        
        """               
        time = get_clock().get_time()

        # Stop the counters
        for counter_id in self.__data.counters:
            self.append_to_counter(counter_id, self.__data.counters[counter_id][-1][2])
        
        # Add the averages
        for counter_id in self.__data.counters:
            l = self.__normalize_times(self.__data.counters[counter_id])
            avgtype = self.__data.counter_avg_type[counter_id]
            if avgtype == AccountingDataCollection.AVERAGE_NONE:
                self.__data.counters[counter_id] = self.__add_no_average(l)
            elif avgtype == AccountingDataCollection.AVERAGE_NORMAL:
                self.__data.counters[counter_id] = self.__add_average(l)
            elif avgtype == AccountingDataCollection.AVERAGE_TIMEWEIGHTED:
                self.__data.counters[counter_id] = self.__add_timeweighted_average(l)
        
        e = Experiment()
        self.__db.add(e)
        self.__db.commit()
        e.description = "Experiment %s" % str(e.id)
        self.__db.commit()
        
        for probe in self.__probes:
            probe.finalize_accounting(self.__db)
Beispiel #9
0
 def makeRecord(self,
                name,
                lvl,
                fn,
                lno,
                msg,
                args,
                exc_info,
                func=None,
                extra=None):
     # Modify "extra" parameter keyword
     try:
         haizeatime = get_clock().get_time()
     except:
         # This is a kludge. Basically, calling get_clock will
         # fail if Manager is not yet fully constructed (since it's
         # a singleton). The more correct solution is to more cleanly
         # separate the initialization code in the Manager from the
         # initialization that actually involves interacting with
         # other components (which may want to use the logger)
         haizeatime = "                      "
     extra = {"haizeatime": haizeatime}
     if sys.version_info[1] <= 4:
         name = "[%s] %s" % (haizeatime, name)
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg,
                                          args, exc_info)
     else:
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg,
                                          args, exc_info, func, extra)
Beispiel #10
0
    def cancel_lease(self, lease):
        """Cancels a lease.
        
        Arguments:
        lease -- Lease to cancel
        """
        time = get_clock().get_time()
        
        self.logger.info("Cancelling lease %i..." % lease.id)
            
        lease_state = lease.get_state()
        
        if lease_state == Lease.STATE_PENDING:
            # If a lease is pending, we just need to change its state and
            # remove it from the lease table. Since this is done at the
            # end of this method, we do nothing here.
            pass

        elif lease_state == Lease.STATE_ACTIVE:
            # If a lease is active, that means we have to shut down its VMs to cancel it.
            self.logger.info("Lease %i is active. Stopping active reservation..." % lease.id)
            vmrr = lease.get_active_vmrrs(time)[0]
            self._handle_end_rr(vmrr)
            self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)
            
            # Force machines to shut down
            try:
                self.vm_scheduler.resourcepool.stop_vms(lease, vmrr)
            except EnactmentError, exc:
                self.logger.error("Enactment error when shutting down VMs.")
                # Right now, this is a non-recoverable error, so we just
                # propagate it upwards.
                # In the future, it may be possible to react to these
                # kind of errors.
                raise            
Beispiel #11
0
 def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None):
     # Modify "extra" parameter keyword
     haizeatime = get_clock().get_time()
     extra = { "haizeatime" : haizeatime}
     if sys.version_info[1] <= 4:
         name = "[%s] %s" % (haizeatime, name)
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info)
     else:
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra)
 def get_accumulated_requests(self):
     # When reading from a trace file, there are no
     # "accumulated requests". Rather, we just take whatever
     # requests are in the trace up to the current time
     # reported by the resource manager
     time = get_clock().get_time()
     nowreq = [r for r in self.requests if r.submit_time <= time]
     self.requests = [r for r in self.requests if r.submit_time > time]   
     return nowreq              
 def at_lease_done(self, lease):
     """See AccountingProbe.at_lease_done""" 
     now = get_clock().get_time()    
     self.IMTT =  now - lease.get_start_actual()       
     if lease.get_type() == Lease.IMMEDIATE:
         if lease.get_state() == Lease.STATE_REJECTED:
             self.accounting.incr_counter(IMProbe.COUNTER_REJECTED, lease.id)
         else:
             self.AVG_IM_TT += self.IMTT  # Acommulating IM TT
 def get_accumulated_requests(self):
     # When reading from a trace file, there are no
     # "accumulated requests". Rather, we just take whatever
     # requests are in the trace up to the current time
     # reported by the resource manager
     time = get_clock().get_time()
     nowreq = [r for r in self.requests if r.submit_time <= time]
     self.requests = [r for r in self.requests if r.submit_time > time]   
     return nowreq              
 def at_lease_done(self, lease):
     """See AccountingProbe.at_lease_done"""
     now = get_clock().get_time()    
     self.ARTT =  now - lease.get_start_actual()
     if lease.get_type() == Lease.ADVANCE_RESERVATION:
         if lease.get_state() == Lease.STATE_REJECTED:
             self.accounting.incr_counter(ARProbe.COUNTER_REJECTED, lease.id)
         else:
             self.AVG_AR_TT += self.ARTT  # Acommulating AR TT
Beispiel #16
0
 def at_lease_done(self, lease):
     """See AccountingProbe.at_lease_done"""
     now = get_clock().get_time()
     self.IMTT = now - lease.get_start_actual()
     if lease.get_type() == Lease.IMMEDIATE:
         if lease.get_state() == Lease.STATE_REJECTED:
             self.accounting.incr_counter(IMProbe.COUNTER_REJECTED,
                                          lease.id)
         else:
             self.AVG_IM_TT += self.IMTT  # Acommulating IM TT
Beispiel #17
0
 def at_lease_done(self, lease):
     """See AccountingProbe.at_lease_done"""
     now = get_clock().get_time()
     self.ARTT = now - lease.get_start_actual()
     if lease.get_type() == Lease.ADVANCE_RESERVATION:
         if lease.get_state() == Lease.STATE_REJECTED:
             self.accounting.incr_counter(ARProbe.COUNTER_REJECTED,
                                          lease.id)
         else:
             self.AVG_AR_TT += self.ARTT  # Acommulating AR TT
Beispiel #18
0
 def decr_counter(self, counter_id, lease_id = None, amount = 1):
     """Decrement a counter
     
     @param counter_id: Name of the counter
     @type counter_id: C{str}
     @param lease_id: Optionally, the ID of the lease that caused this increment.
     @type lease_id: C{int}
     """        
     time = get_clock().get_time()
     self.append_to_counter(counter_id, self.__data.counters[counter_id][-1][2] - amount, lease_id)
    def at_lease_done(self, lease):
	now = get_clock().get_time()
	completion_ = now - lease.submit_time
        """See AccountingProbe.at_lease_done""" 
	self.accounting.incr_counter(TotalTimeprob.COUNTER_BESTEFFORTCOMPLETED, lease.id)                       
        self.accounting.set_lease_stat(TotalTimeprob.LEASE_STAT_WAITINGTIME, lease.id, completion_)
	self.completion.append (completion_)
	#######################
	self.total_waiting = self.total_waiting + lease.get_waiting_time().seconds
	#######################
	self.cntr =self.cntr + 1
 def decr_counter(self, counter_id, lease_id=None):
     """Decrement a counter
     
     @param counter_id: Name of the counter
     @type counter_id: C{str}
     @param lease_id: Optionally, the ID of the lease that caused this increment.
     @type lease_id: C{int}
     """
     time = get_clock().get_time()
     self.append_to_counter(counter_id,
                            self.__data.counters[counter_id][-1][2] - 1,
                            lease_id)
Beispiel #21
0
 def at_timestep(self, lease_scheduler):
     util = {}
     reservations = lease_scheduler.vm_scheduler.slottable.get_reservations_at(get_clock().get_time())
     for r in reservations:
         
         for node in r.resources_in_pnode:
             use = r.resources_in_pnode[node].get_by_type(constants.RES_CPU)
             util[node] = use + util.get(node, 0.0)
     self.accounting.append_to_counter(CpuLoadOnPhysicalNodes.COUNTER_CPU_LOAD_ON_PNODE, util)
     
     for pnode, cpacity in lease_scheduler.vm_scheduler.slottable.nodes.iteritems():
             if not pnode in  self.all_pnode_capacities:
                 self.all_pnode_capacities[pnode] = cpacity.capacity.get_by_type(constants.RES_CPU)  
 def _handle_end_lease(self, l):
     """Performs actions that have to be done each time a lease ends.
     
     Arguments:
     lease -- Lease that has ended
     """
     l.set_state(Lease.STATE_DONE)
     l.duration.actual = l.duration.accumulated
     l.end = round_datetime(get_clock().get_time())
     self.preparation_scheduler.cleanup(l)
     self.completed_leases.add(l)
     self.leases.remove(l)
     self.accounting.at_lease_done(l)
Beispiel #23
0
 def _handle_end_lease(self, l):
     """Performs actions that have to be done each time a lease ends.
     
     Arguments:
     lease -- Lease that has ended
     """
     l.set_state(Lease.STATE_DONE)
     l.duration.actual = l.duration.accumulated
     l.end = round_datetime(get_clock().get_time())
     self.preparation_scheduler.cleanup(l)
     self.completed_leases.add(l)
     self.leases.remove(l)
     self.accounting.at_lease_done(l)
Beispiel #24
0
 def at_lease_done(self, lease):
     now = get_clock().get_time()
     completion_ = now - lease.submit_time
     """See AccountingProbe.at_lease_done"""
     self.accounting.incr_counter(TotalTimeprob.COUNTER_BESTEFFORTCOMPLETED,
                                  lease.id)
     self.accounting.set_lease_stat(TotalTimeprob.LEASE_STAT_WAITINGTIME,
                                    lease.id, completion_)
     self.completion.append(completion_)
     #######################
     self.total_waiting = self.total_waiting + lease.get_waiting_time(
     ).seconds
     #######################
     self.cntr = self.cntr + 1
    def at_lease_done(self, lease):
        """See AccountingProbe.at_lease_done""" 

        if lease.get_type() == Lease.BEST_EFFORT:
            wait = lease.get_waiting_time().seconds
            now = get_clock().get_time()    
            BBTT = self.turnaround_time(lease, now).seconds
            self.AVG_BE_TT += BBTT
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_PREEMPTION, lease.id, lease.strv_counter)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_FINISHED, lease.id, now)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_WAITINGTIME, lease.id, wait)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_TTBE, lease.id, BBTT)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_SLOWDOWN, lease.id, lease.get_slowdown())
            self.accounting.incr_counter(BEProbe.COUNTER_BESTEFFORTCOMPLETED, lease.id)
Beispiel #26
0
 def makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func=None, extra=None):
     # Modify "extra" parameter keyword
     try:
         haizeatime = get_clock().get_time()
     except:
         # This is a kludge. Basically, calling get_clock will
         # fail if Manager is not yet fully constructed (since it's
         # a singleton). The more correct solution is to more cleanly
         # separate the initialization code in the Manager from the
         # initialization that actually involves interacting with
         # other components (which may want to use the logger)
         haizeatime = "                      "
     extra = { "haizeatime" : haizeatime}
     if sys.version_info[1] <= 4:
         name = "[%s] %s" % (haizeatime, name)
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info)
     else:
         return logging.Logger.makeRecord(self, name, lvl, fn, lno, msg, args, exc_info, func, extra)
Beispiel #27
0
    def at_lease_done(self, lease):
        """See AccountingProbe.at_lease_done"""

        if lease.get_type() == Lease.BEST_EFFORT:
            wait = lease.get_waiting_time().seconds
            now = get_clock().get_time()
            BBTT = self.turnaround_time(lease, now).seconds
            self.AVG_BE_TT += BBTT
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_PREEMPTION,
                                           lease.id, lease.strv_counter)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_FINISHED,
                                           lease.id, now)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_WAITINGTIME,
                                           lease.id, wait)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_TTBE, lease.id,
                                           BBTT)
            self.accounting.set_lease_stat(BEProbe.LEASE_STAT_SLOWDOWN,
                                           lease.id, lease.get_slowdown())
            self.accounting.incr_counter(BEProbe.COUNTER_BESTEFFORTCOMPLETED,
                                         lease.id)
Beispiel #28
0
 def append_to_counter(self, counter_id, value, lease_id = None):
     """Append a value to a counter
     
     @param counter_id: Name of the counter
     @type counter_id: C{str}
     @param value: Value to append
     @type value: C{int} or C{float}
     @param lease_id: Optionally, the ID of the lease that caused this increment.
     @type lease_id: C{int}
     """      
     time = get_clock().get_time()
     if len(self.__data.counters[counter_id]) > 0:
         prevtime = self.__data.counters[counter_id][-1][0]
         prevlease = self.__data.counters[counter_id][-1][1]
         prevval = self.__data.counters[counter_id][-1][2]
         if time == prevtime:
             self.__data.counters[counter_id][-1][2] = value
         else:
             if prevlease != lease_id or prevval != value:
                 self.__data.counters[counter_id].append([time, lease_id, value])
     else:
         self.__data.counters[counter_id].append([time, lease_id, value])
 def append_to_counter(self, counter_id, value, lease_id=None):
     """Append a value to a counter
     
     @param counter_id: Name of the counter
     @type counter_id: C{str}
     @param value: Value to append
     @type value: C{int} or C{float}
     @param lease_id: Optionally, the ID of the lease that caused this increment.
     @type lease_id: C{int}
     """
     time = get_clock().get_time()
     if len(self.__data.counters[counter_id]) > 0:
         prevtime = self.__data.counters[counter_id][-1][0]
         prevlease = self.__data.counters[counter_id][-1][1]
         prevval = self.__data.counters[counter_id][-1][2]
         if time == prevtime:
             self.__data.counters[counter_id][-1][2] = value
         else:
             if prevlease != lease_id or prevval != value:
                 self.__data.counters[counter_id].append(
                     [time, lease_id, value])
     else:
         self.__data.counters[counter_id].append([time, lease_id, value])
    def request_lease(self, lease):
        """Requests a leases. This is the entry point of leases into the scheduler.
        
        Request a lease. The decision on whether to accept or reject a
        lease is deferred to the policy manager (through its admission
        control policy). 
        
        If the policy determines the lease can be
        accepted, it is marked as "Pending". This still doesn't
        guarantee that the lease will be scheduled (e.g., an AR lease
        could still be rejected if the scheduler determines there are no
        resources for it; but that is a *scheduling* decision, not a admission
        control policy decision). The ultimate fate of the lease is determined
        the next time the scheduling function is called.
        
        If the policy determines the lease cannot be accepted, it is marked
        as rejected.

        Arguments:
        lease -- Lease object. Its state must be STATE_NEW.
        """
        self.logger.info("Lease #%i has been requested." % lease.id)
        if lease.submit_time == None:
            lease.submit_time = round_datetime(get_clock().get_time())
        lease.print_contents()
        lease.set_state(Lease.STATE_PENDING)
        if get_policy().accept_lease(lease):
            self.logger.info("Lease #%i has been marked as pending." %
                             lease.id)
            self.leases.add(lease)
        else:
            self.logger.info("Lease #%i has not been accepted" % lease.id)
            lease.set_state(Lease.STATE_REJECTED)
            self.completed_leases.add(lease)

        self.accounting.at_lease_request(lease)
        get_persistence().persist_lease(lease)
    def cancel_lease(self, lease):
        """Cancels a lease.
        
        Arguments:
        lease -- Lease to cancel
        """
        time = get_clock().get_time()

        self.logger.info("Cancelling lease %i..." % lease.id)

        lease_state = lease.get_state()

        if lease_state == Lease.STATE_PENDING:
            # If a lease is pending, we just need to change its state and
            # remove it from the lease table. Since this is done at the
            # end of this method, we do nothing here.
            pass

        elif lease_state == Lease.STATE_ACTIVE:
            # If a lease is active, that means we have to shut down its VMs to cancel it.
            self.logger.info(
                "Lease %i is active. Stopping active reservation..." %
                lease.id)
            vmrr = lease.get_active_vmrrs(time)[0]
            self._handle_end_rr(vmrr)
            self.vm_scheduler._handle_unscheduled_end_vm(lease, vmrr)

            # Force machines to shut down
            try:
                self.vm_scheduler.resourcepool.stop_vms(lease, vmrr)
            except EnactmentError, exc:
                self.logger.error("Enactment error when shutting down VMs.")
                # Right now, this is a non-recoverable error, so we just
                # propagate it upwards.
                # In the future, it may be possible to react to these
                # kind of errors.
                raise
Beispiel #32
0
    def request_lease(self, lease):
        """Requests a leases. This is the entry point of leases into the scheduler.
        
        Request a lease. The decision on whether to accept or reject a
        lease is deferred to the policy manager (through its admission
        control policy). 
        
        If the policy determines the lease can be
        accepted, it is marked as "Pending". This still doesn't
        guarantee that the lease will be scheduled (e.g., an AR lease
        could still be rejected if the scheduler determines there are no
        resources for it; but that is a *scheduling* decision, not a admission
        control policy decision). The ultimate fate of the lease is determined
        the next time the scheduling function is called.
        
        If the policy determines the lease cannot be accepted, it is marked
        as rejected.

        Arguments:
        lease -- Lease object. Its state must be STATE_NEW.
        """
        self.logger.info("Lease #%i has been requested." % lease.id)
        if lease.submit_time == None:
            lease.submit_time = round_datetime(get_clock().get_time())
        lease.print_contents()
        lease.set_state(Lease.STATE_PENDING)
        if get_policy().accept_lease(lease):
            self.logger.info("Lease #%i has been marked as pending." % lease.id)
            self.leases.add(lease)
        else:
            self.logger.info("Lease #%i has not been accepted" % lease.id)
            lease.set_state(Lease.STATE_REJECTED)
            self.completed_leases.add(lease)
        
        self.accounting.at_lease_request(lease)
        get_persistence().persist_lease(lease)
Beispiel #33
0
    def map(self, lease, requested_resources, start, end, strictend, allow_preemption=False, onlynodes=None):
        """The mapping function
        
        See documentation in Mapper for more details
        """        
        # Generate an availability window at time "start"
        aw = self.slottable.get_availability_window(start)

        nodes = aw.get_nodes_at(start)     
        if onlynodes != None:
            nodes = list(set(nodes) & onlynodes)

        # Get an ordered list of physical nodes
        pnodes = self.policy.sort_hosts(nodes, start, lease)
        
        # Get an ordered list of lease nodes
        vnodes = self.__sort_vnodes(requested_resources)
        
        if allow_preemption:
            # Get the leases that intersect with the requested interval.
            leases = aw.get_leases_between(start, end)
            # Ask the policy engine to sort the leases based on their
            # preemptability
            leases = self.policy.sort_leases(lease, leases, start)
            
            preemptable_leases = leases
        else:
            preemptable_leases = []

        if allow_preemption:
            self.slottable.push_state(preemptable_leases) 

        preempting = []
        nexttime = get_clock().get_next_schedulable_time()
        
        # Try to find a mapping. Each iteration of this loop goes through
        # all the lease nodes and tries to find a mapping. The first
        # iteration assumes no leases can be preempted, and each successive
        # iteration assumes one more lease can be preempted.
        mapping = {}
        done = False
        while not done:
            # Start at the first lease node
            vnodes_pos = 0
            cur_vnode = vnodes[vnodes_pos]
            cur_vnode_capacity = requested_resources[cur_vnode]
            maxend = end 
            
            # Go through all the physical nodes.
            # In each iteration, we try to map as many lease nodes
            # as possible into the physical nodes.
            # "cur_vnode_capacity" holds the capacity of the vnode we are currently
            # trying to map. "need_to_map" is the amount of resources we are 
            # trying to map into the current physical node (which might be
            # more than one lease node).
            for pnode in pnodes:
                # need_to_map is initialized to the capacity of whatever
                # lease node we are trying to map now.
                need_to_map = self.slottable.create_empty_resource_tuple()
                need_to_map.incr(cur_vnode_capacity)
                avail=aw.get_ongoing_availability(start, pnode, preempted_leases = preempting)
                
                # Try to fit as many lease nodes as we can into this physical node
                pnode_done = False
                while not pnode_done:
                    if avail.fits(need_to_map, until = maxend):
                        # In this case, we can fit "need_to_map" into the
                        # physical node.
                        mapping[cur_vnode] = pnode
                        vnodes_pos += 1
                        if vnodes_pos >= len(vnodes):
                            # No more lease nodes to map, we're done.
                            done = True
                            break
                        else:
                            # Advance to the next lease node, and add its
                            # capacity to need_to_map
                            cur_vnode = vnodes[vnodes_pos]
                            cur_vnode_capacity = requested_resources[cur_vnode]
                            need_to_map.incr(cur_vnode_capacity)
                    else:
                        # We couldn't fit the lease node. If we need to
                        # find a mapping that spans the entire requested
                        # interval, then we're done checking this physical node.
                        if strictend:
                            pnode_done = True
                        else:
                            # Otherwise, check what the longest interval
                            # we could fit in this physical node
                            latest = avail.latest_fit(need_to_map)
                            if latest == None:
                                pnode_done = True
                            else:
                                maxend = latest
                    
                if done:
                    break

            # If there's no more leases that we could preempt,
            # we're done.
            if len(preemptable_leases) == 0:
                done = True
            elif not done:
                # Otherwise, add another lease to the list of
                # leases we are preempting
                added = False
                while not added:
                    preemptee = preemptable_leases.pop()
                    try:
                        self.__preempt_lease_deadline(preemptee, start, end, nexttime)
                        preempting.append(preemptee)
                        added = True
                    except NotSchedulableException:
                        if len(preemptable_leases) == 0:
                            done = True
                            break
                    

        if len(mapping) != len(requested_resources):
            # No mapping found
            if allow_preemption:
                self.slottable.pop_state()
            return None, None, None
        else:
            if allow_preemption:
                self.slottable.pop_state(discard = True)
            return mapping, maxend, preempting
Beispiel #34
0
 def at_timestep(self, lease_scheduler):
     """See AccountingProbe.at_timestep"""
     util = lease_scheduler.vm_scheduler.get_utilization(get_clock().get_time())
     utilization = sum([v for k,v in util.items() if k != None])
     self.accounting.append_to_counter(CPUUtilizationProbe.COUNTER_UTILIZATION, utilization)