def qos_share_limit_policy(self, host: Host): total_containers = host.active_list_counter( ) + host.inactive_list_counter() total_max_memory = host.get_max_usable_memoryPG() if self.level == 'BEST': if (host.active_list_counter() > 0): total_used = host.get_container_total_usedPG() local_NAHM = total_max_memory - total_used log_basic.info('Effective Not Used NAHM: %d', local_NAHM) shared_local_NAHM = round(local_NAHM / total_containers) for container in host.container_active_list: if (container.getContainerState() == 'RUNNING') and (shared_local_NAHM > 0): mem_used = container.getUsedMemoryPG() mem_limit = container.getMemoryLimitPG() log_basic.info('C: %s, CMU: %d, CML: %d', container.name, mem_used, mem_limit) new_limit = mem_used + shared_local_NAHM local_NAHM -= shared_local_NAHM container.setMemLimit2(new_limit) log_basic.info( 'Best Effort Adjusts Container: %s, new CML: %d', container.name, container.getMemoryLimitPG()) self.NAHM = local_NAHM log_basic.info('Remain NAHM to start new containers: %d', self.NAHM) elif self.level == 'FAIR': new_limit = round(total_max_memory / total_containers) if host.active_list_counter() > 0: for container in host.container_active_list: if container.getContainerState() == 'RUNNING': mem_limit = container.getMemoryLimitPG() log_basic.info('C: %s, CML: %d', container.name, mem_limit) if new_limit < container.getMinMemoryLimitPG(): new_limit = container.getMinMemoryLimitPG() delta = mem_limit - new_limit self.NAHM += delta container.setMemLimit2(new_limit) log_basic.info( 'Fair Share Stolen Container: %s, Delta: %d, new CML T1\u25BC: %d, new NAHM\u25B2: %d', container.name, delta, container.getMemoryLimitPG(), self.NAHM)
def qos_start_policy(self, host: Host): sorted_list = sorted(host.container_inactive_list, key=lambda container: container.getInactiveTime(), reverse=True) index = 0 log_basic.info('Available NAHM: %d', self.NAHM) if self.level == 'GUARANTEED': while (self.NAHM > 0) and (index < len(sorted_list)): container = sorted_list[index] if (container.getContainerState() == 'QUEUED'): if (container.getMaxMemoryLimitPG() <= self.NAHM) and ( host.has_free_cores() >= container.request_cpus): cpu_allocation = host.get_available_cores( container.request_cpus) if parser['Container']['type'] == 'LXC': container.startContainer() container.setMemLimit2( container.getMaxMemoryLimitPG()) container.setCPUCores(cpu_allocation) host.container_active_list.append(container) host.container_inactive_list.remove(container) log_basic.info( 'Container %s moved during Start from Inactive -> Active with status %s.', container.name, container.state) container.inactive_time = 0 self.NAHM -= container.getMemoryLimitPG() log_basic.info('C: %s, CML: %d, new NAHM\u2193: %d', container.name, container.getMemoryLimitPG(), self.NAHM) index += 1 elif self.level == 'BEST': limit_division = round(self.NAHM / host.inactive_list_counter()) while (self.NAHM > 0) and (index < len(sorted_list)): container = sorted_list[index] if (container.getContainerState() == 'QUEUED'): if (container.getMinMemoryLimitPG() <= limit_division) and (host.has_free_cores() >= container.request_cpus): cpu_allocation = host.get_available_cores( container.request_cpus) if parser['Container']['type'] == 'LXC': container.startContainer() container.setMemLimit2(limit_division) container.setCPUCores(cpu_allocation) host.container_active_list.append(container) host.container_inactive_list.remove(container) log_basic.info( 'Container %s moved during Start from Inactive -> Active with status %s.', container.name, container.state) container.inactive_time = 0 self.NAHM -= container.getMemoryLimitPG() log_basic.info('C: %s, CML: %d, new NAHM\u2193: %d', container.name, container.getMemoryLimitPG(), self.NAHM) index += 1 elif self.level == 'FAIR': limit_division = round(self.NAHM / host.inactive_list_counter()) while (self.NAHM > 0) and (index < len(sorted_list)): container = sorted_list[index] if (container.getContainerState() == 'QUEUED'): if (container.getMinMemoryLimitPG() <= limit_division) and (host.has_free_cores() >= container.request_cpus): cpu_allocation = host.get_available_cores( container.request_cpus) if container.getMaxMemoryLimitPG() > limit_division: new_limit = limit_division else: new_limit = container.getMaxMemoryLimitPG() if parser['Container']['type'] == 'LXC': container.startContainer() container.setMemLimit2(new_limit) container.setCPUCores(cpu_allocation) host.container_active_list.append(container) host.container_inactive_list.remove(container) log_basic.info( 'Container %s moved during Start from Inactive -> Active with status %s.', container.name, container.state) container.inactive_time = 0 self.NAHM -= container.getMemoryLimitPG() log_basic.info('C: %s, CML: %d, new NAHM\u2193: %d', container.name, container.getMemoryLimitPG(), self.NAHM) index += 1
def no_manager(shared_list: list, entry_queue: mp.Queue): logNM = logging.getLogger('Container_Manager') logNM.setLevel(logging.INFO) format = logging.Formatter(fmt='%(asctime)s %(levelname)s %(message)s', datefmt='%d/%m/%Y %H:%M:%S') file_handler = logging.FileHandler(filename='./log/no-manager.log', mode='a') file_handler.setFormatter(format) file_handler.setLevel(logging.DEBUG) logNM.addHandler(file_handler) sched = Basic() config = ConfigParser() config.read('./config/local-config.txt') sched.setLevel(config['QoS']['level']) host = Host() sched_counter = 1 while True: start_time = datetime.now() logNM.info('========================================================') logNM.info('Sched counter: %d', sched_counter) logNM.info('Sched init timestamp: %s', start_time) print(sched_counter, datetime.now()) # Add Created Containers while not entry_queue.empty(): container = entry_queue.get() logNM.info('New Container: %s', container.name) container.inactive_time = datetime.now() container.setContainerState('QUEUED') host.container_inactive_list.append(container) host.update() host.update_containers2() TCML, NAHM, HAM = host.get_host_memory_info() sched.setNAHM(NAHM) logNM.info('NAHM: %d, HAM: %d, TCML: %d', sched.getNAHM(), HAM, TCML) logNM.info('Active List: %s', host.container_active_list) logNM.info('Inactive List: %s', host.container_inactive_list) logNM.info('QoS Test: %s', sched.getLevel()) if (host.inactive_list_counter() != 0): logNM.info( '---------------------------------------------------------') logNM.info('Executing Limit Redistribution Policy:') sched.qos_share_limit_policy(host) logNM.info( '---------------------------------------------------------') logNM.info('Executing Start Inactive Containers:') sched.qos_start_policy(host) else: if (sched.getNAHM() > 0) and (sched.getLevel() in [ 'BEST', 'FAIR' ]) and (host.active_list_counter() > 0): logNM.info( '---------------------------------------------------------' ) logNM.info('Executing NAHM Redistribution:') sched.qos_recovery_limit_policy(host) host.update() host.update_containers2() shared_list[0] = host.container_active_list shared_list[1] = host.container_inactive_list stop_time = datetime.now() logNM.info('Sched end timestamp: %s', stop_time) latency = (stop_time - start_time).total_seconds() logNM.info('New Sched Latency: %f', latency) logNM.info('Sleep time: %f seconds', 1 - latency) logNM.info('========================================================') sched_counter += 1 if (latency < 1): time.sleep(1 - latency)