def _handle_end_lease(self, l): """Performs actions that have to be done each time a lease ends. Arguments: lease -- Lease that has ended """ l.set_state(Lease.STATE_DONE) l.duration.actual = l.duration.accumulated l.end = round_datetime(get_clock().get_time()) if get_config().get("sanity-check"): if l.duration.known != None and l.duration.known < l.duration.requested: duration = l.duration.known else: duration = l.duration.requested assert duration == l.duration.actual if l.start.is_requested_exact(): assert l.vm_rrs[0].start >= l.start.requested if l.deadline != None: assert l.end <= l.deadline self.preparation_scheduler.cleanup(l) self.completed_leases.add(l) self.leases.remove(l) self.accounting.at_lease_done(l)
def __absolute_time(self, time_str): if time_str[0] == "+": # Relative time time = round_datetime(now() + ISO.ParseTime(time_str[1:])) else: time = Parser.ParseDateTime(time_str) return time
def __absolute_time(self, time_str): if time_str[0] == "+": # Relative time time = round_datetime(now() + Parser.TimeDeltaFromString(time_str[1:])) else: time = Parser.DateTimeFromString(time_str) return time
def _handle_end_lease(self, l): """Performs actions that have to be done each time a lease ends. Arguments: lease -- Lease that has ended """ l.set_state(Lease.STATE_DONE) l.duration.actual = l.duration.accumulated l.end = round_datetime(get_clock().get_time()) self.preparation_scheduler.cleanup(l) self.completed_leases.add(l) self.leases.remove(l) self.accounting.at_lease_done(l)
def __init__(self, manager, quantum, non_sched, fastforward = False): """Initializes the real clock. Arguments: manager -- the resource manager quantum -- interval between clock wakeups fastforward -- if True, the clock won't actually sleep for the duration of the quantum.""" Clock.__init__(self, manager) self.fastforward = fastforward if not self.fastforward: self.lastwakeup = None else: self.lastwakeup = round_datetime(now()) self.logger = logging.getLogger("CLOCK") self.starttime = self.get_time() self.nextschedulable = None self.nextperiodicwakeup = None self.quantum = TimeDelta(seconds=quantum) self.non_sched = TimeDelta(seconds=non_sched)
def request_lease(self, lease): """Requests a leases. This is the entry point of leases into the scheduler. Request a lease. The decision on whether to accept or reject a lease is deferred to the policy manager (through its admission control policy). If the policy determines the lease can be accepted, it is marked as "Pending". This still doesn't guarantee that the lease will be scheduled (e.g., an AR lease could still be rejected if the scheduler determines there are no resources for it; but that is a *scheduling* decision, not a admission control policy decision). The ultimate fate of the lease is determined the next time the scheduling function is called. If the policy determines the lease cannot be accepted, it is marked as rejected. Arguments: lease -- Lease object. Its state must be STATE_NEW. """ self.logger.info("Lease #%i has been requested." % lease.id) if lease.submit_time == None: lease.submit_time = round_datetime(get_clock().get_time()) lease.print_contents() lease.set_state(Lease.STATE_PENDING) if get_policy().accept_lease(lease): self.logger.info("Lease #%i has been marked as pending." % lease.id) self.leases.add(lease) else: self.logger.info("Lease #%i has not been accepted" % lease.id) lease.set_state(Lease.STATE_REJECTED) self.completed_leases.add(lease) self.accounting.at_lease_request(lease) get_persistence().persist_lease(lease)
def __init__(self, opennebula_vm): # If there is no HAIZEA parameter, the default is to treat the # request as an immediate request with unlimited duration if not opennebula_vm.template.has_key(OpenNebulaHaizeaVM.HAIZEA_PARAM): self.start = OpenNebulaHaizeaVM.HAIZEA_START_NOW self.duration = OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED self.preemptible = OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_NO self.group = None else: self.start = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_START] self.duration = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_DURATION] self.preemptible = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE] if opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM].has_key(OpenNebulaHaizeaVM.HAIZEA_GROUP): self.group = opennebula_vm.template[OpenNebulaHaizeaVM.HAIZEA_PARAM][OpenNebulaHaizeaVM.HAIZEA_GROUP] else: self.group = None self.submit_time = UNIX2DateTime(opennebula_vm.stime) # Create Timestamp object if self.start == OpenNebulaHaizeaVM.HAIZEA_START_NOW: self.start = Timestamp(Timestamp.NOW) elif self.start == OpenNebulaHaizeaVM.HAIZEA_START_BESTEFFORT: self.start = Timestamp(Timestamp.UNSPECIFIED) elif self.start[0] == "+": # Relative time self.start = Timestamp(round_datetime(self.submit_time + ISO.ParseTime(self.start[1:]))) else: self.start = Timestamp(ISO.ParseDateTime(self.start)) # Create Duration object if self.duration == OpenNebulaHaizeaVM.HAIZEA_DURATION_UNLIMITED: # This is an interim solution (make it run for a century). # TODO: Integrate concept of unlimited duration in the lease datastruct self.duration = Duration(DateTimeDelta(36500)) else: self.duration = Duration(ISO.ParseTimeDelta(self.duration)) self.preemptible = (self.preemptible == OpenNebulaHaizeaVM.HAIZEA_PREEMPTIBLE_YES) self.capacity = Capacity([constants.RES_CPU, constants.RES_MEM, constants.RES_DISK]) # CPU # CPUs in VMs are not reported the same as in hosts. # THere are two template values: CPU and VCPU. # CPU reports the percentage of the CPU needed by the VM. # VCPU, which is optional, reports how many CPUs are needed. cpu = int(float(opennebula_vm.template["CPU"]) * 100) if opennebula_vm.template.has_key("VCPU"): ncpu = int(opennebula_vm.template["VCPU"]) else: ncpu = 1 self.capacity.set_ninstances(constants.RES_CPU, ncpu) for i in range(ncpu): self.capacity.set_quantity_instance(constants.RES_CPU, i+1, cpu) # Memory. Unlike hosts, memory is reported directly in MBs self.capacity.set_quantity(constants.RES_MEM, int(opennebula_vm.template["MEMORY"])) self.one_id = opennebula_vm.id
class RealClock(Clock): """A realtime clock. The real clock wakes up periodically to, in turn, tell the resource manager to wake up. The real clock can also be run in a "fastforward" mode for debugging purposes (however, unlike the simulated clock, the clock will always skip a fixed amount of time into the future). """ def __init__(self, manager, quantum, non_sched, fastforward = False): """Initializes the real clock. Arguments: manager -- the resource manager quantum -- interval between clock wakeups fastforward -- if True, the clock won't actually sleep for the duration of the quantum.""" Clock.__init__(self, manager) self.fastforward = fastforward if not self.fastforward: self.lastwakeup = None else: self.lastwakeup = round_datetime(now()) self.logger = logging.getLogger("CLOCK") self.starttime = self.get_time() self.nextschedulable = None self.nextperiodicwakeup = None self.quantum = TimeDelta(seconds=quantum) self.non_sched = TimeDelta(seconds=non_sched) def get_time(self): """See docstring in base Clock class.""" if not self.fastforward: return now() else: return self.lastwakeup def get_start_time(self): """See docstring in base Clock class.""" return self.starttime def get_next_schedulable_time(self): """See docstring in base Clock class.""" return self.nextschedulable def run(self): """Runs the real clock through time. The clock starts when run() is called. In each iteration of the main loop it will do the following: - Wake up the resource manager - Determine if there will be anything to do before the next time the clock will wake up (after the quantum has passed). Note that this information is readily available on the slot table. If so, set next-wakeup-time to (now + time until slot table event). Otherwise, set it to (now + quantum) - Sleep until next-wake-up-time The clock keeps on tickin' until a SIGINT signal (Ctrl-C if running in the foreground) or a SIGTERM signal is received. """ self.logger.status("Starting clock") self.manager.accounting.start(self.get_start_time()) try: signal.signal(signal.SIGINT, self.signalhandler_gracefulstop) signal.signal(signal.SIGTERM, self.signalhandler_gracefulstop) except ValueError, exc: # This means Haizea is not the main thread, which will happen # when running it as part of a py.test. We simply ignore this # to allow the test to continue. pass # Main loop while not self.done: self.logger.status("Waking up to manage resources") # Save the waking time. We want to use a consistent time in the # resource manager operations (if we use now(), we'll get a different # time every time) if not self.fastforward: self.lastwakeup = round_datetime(self.get_time()) self.logger.status("Wake-up time recorded as %s" % self.lastwakeup) # Next schedulable time self.nextschedulable = round_datetime(self.lastwakeup + self.non_sched) # Check if there are any changes in the resource pool new_nodes = self.manager.scheduler.vm_scheduler.resourcepool.refresh_nodes() for n in new_nodes: rt = self.manager.scheduler.slottable.create_resource_tuple_from_capacity(n.capacity) self.manager.scheduler.slottable.add_node(n.id, rt) # Wake up the resource manager self.manager.process_ending_reservations(self.lastwakeup) self.manager.process_starting_reservations(self.lastwakeup) # TODO: Compute nextschedulable here, before processing requests self.manager.process_requests(self.nextschedulable) self.manager.accounting.at_timestep(self.manager.scheduler) # Next wakeup time time_now = now() if self.lastwakeup + self.quantum <= time_now: quantums = (time_now - self.lastwakeup) / self.quantum quantums = int(ceil(quantums)) * self.quantum self.nextperiodicwakeup = round_datetime(self.lastwakeup + quantums) else: self.nextperiodicwakeup = round_datetime(self.lastwakeup + self.quantum) # Determine if there's anything to do before the next wakeup time nextchangepoint = self.manager.get_next_changepoint() if nextchangepoint != None and nextchangepoint <= self.nextperiodicwakeup: # We need to wake up earlier to handle a slot table event nextwakeup = nextchangepoint self.logger.status("Going back to sleep. Waking up at %s to handle slot table event." % nextwakeup) else: # Nothing to do before waking up nextwakeup = self.nextperiodicwakeup self.logger.status("Going back to sleep. Waking up at %s to see if something interesting has happened by then." % nextwakeup) # The only exit condition from the real clock is if the stop_when_no_more_leases # is set to True, and there's no more work left to do. # TODO: This first if is a kludge. Other options should only interact with # options through the configfile's get method. The "stop-when-no-more-leases" # option is currently OpenNebula-specific (while the real clock isn't; it can # be used by both the simulator and the OpenNebula mode). This has to be # fixed. if self.manager.config._options.has_key("stop-when-no-more-leases"): stop_when_no_more_leases = self.manager.config.get("stop-when-no-more-leases") if stop_when_no_more_leases and not self.manager.exists_more_leases(): self.done = True # Sleep if not self.done: if not self.fastforward: sleep((nextwakeup - now()).seconds) else: self.lastwakeup = nextwakeup self.logger.status("Real clock has stopped") # Stop the resource manager self.manager.graceful_stop()