def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction: # Use a valid PM based on its order. chosen_idx: int = decision_event.valid_pms[0] # Take action to allocate on the chose PM. action: AllocateAction = AllocateAction( vm_id=decision_event.vm_id, pm_id=chosen_idx ) return action
def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction: # Use a rule to choose a valid PM. chosen_idx: int = self._pick_pm_func(decision_event, env) # Take action to allocate on the chose PM. action: AllocateAction = AllocateAction( vm_id=decision_event.vm_id, pm_id=decision_event.valid_pms[chosen_idx]) return action
def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction: valid_pm_num: int = len(decision_event.valid_pms) # Random choose a valid PM. chosen_idx: int = random.randint(0, valid_pm_num - 1) # Take action to allocate on the chosen PM. action: AllocateAction = AllocateAction( vm_id=decision_event.vm_id, pm_id=decision_event.valid_pms[chosen_idx]) return action
def setUp(self): env = Env(scenario="vm_scheduling", topology="tests/data/vm_scheduling/azure.2019.toy", start_tick=0, durations=5, snapshot_resolution=1) metrics, decision_event, is_done = env.step(None) while not is_done: action = AllocateAction(vm_id=decision_event.vm_id, pm_id=decision_event.valid_pms[0]) self.metrics, decision_event, is_done = env.step(action)
def allocate_vm(self, decision_event: DecisionPayload, env: Env) -> AllocateAction: # Initialize the bin. self._init_bin() # Get the number of PM, maximum CPU core and max cpu oversubscription rate. total_pm_info = env.snapshot_list["pms"][ env.frame_index::["cpu_cores_capacity", "cpu_cores_allocated" ]].reshape(-1, 2) cpu_cores_remaining = total_pm_info[:, 0] * self._max_cpu_oversubscription_rate - total_pm_info[:, 1] for i, cpu_core in enumerate(cpu_cores_remaining): self._bins[int(cpu_core)].append(i) self._bin_size[int(cpu_core)] += 1 # Choose a PM that minimize the variance of the PM number in each bin. minimal_var = np.inf cores_need = decision_event.vm_cpu_cores_requirement chosen_idx = 0 for remaining_cores in range(cores_need, len(self._bins)): if self._bin_size[remaining_cores] != 0: self._bin_size[remaining_cores] -= 1 self._bin_size[remaining_cores - cores_need] += 1 var = np.var(np.array(self._bin_size)) if minimal_var > var: minimal_var = var chosen_idx = random.choice(self._bins[remaining_cores]) self._bin_size[remaining_cores] += 1 self._bin_size[remaining_cores - cores_need] -= 1 # Take action to allocate on the chosen pm. action: AllocateAction = AllocateAction(vm_id=decision_event.vm_id, pm_id=chosen_idx) return action
action: PostponeAction = PostponeAction(vm_id=decision_event.vm_id, postpone_step=1) else: # Get the capacity and allocated cores from snapshot. valid_pm_info = env.snapshot_list["pms"][ env.frame_index:decision_event.valid_pms: ["cpu_cores_capacity", "cpu_cores_allocated"]].reshape(-1, 2) # Calculate to get the remaining cpu cores. cpu_cores_remaining = valid_pm_info[:, 0] - valid_pm_info[:, 1] # Choose the one with the closet remaining CPU. chosen_idx = 0 minimum_remaining_cpu_cores = cpu_cores_remaining[0] for i, remaining in enumerate(cpu_cores_remaining): if remaining < minimum_remaining_cpu_cores: chosen_idx = i minimum_remaining_cpu_cores = remaining # Take action to allocate on the closet pm. action: AllocateAction = AllocateAction( vm_id=decision_event.vm_id, pm_id=decision_event.valid_pms[chosen_idx]) metrics, decision_event, is_done = env.step(action) end_time = timeit.default_timer() print( f"[Best fit] Topology: {config.env.topology}. Total ticks: {config.env.durations}." f" Start tick: {config.env.start_tick}.") print( f"[Timer] {end_time - start_time:.2f} seconds to finish the simulation." ) print(metrics)
env.set_seed(config.env.seed) random.seed(config.env.seed) metrics: object = None decision_event: DecisionPayload = None is_done: bool = False action: AllocateAction = None metrics, decision_event, is_done = env.step(None) while not is_done: valid_pm_num: int = len(decision_event.valid_pms) if valid_pm_num <= 0: # No valid PM now, postpone. action: PostponeAction = PostponeAction(vm_id=decision_event.vm_id, postpone_step=1) else: # Randomly choose an available PM. random_idx = random.randint(0, valid_pm_num - 1) pm_id = decision_event.valid_pms[random_idx] action: AllocateAction = AllocateAction(vm_id=decision_event.vm_id, pm_id=pm_id) metrics, decision_event, is_done = env.step(action) end_time = timeit.default_timer() print( f"[Random] Topology: {config.env.topology}. Total ticks: {config.env.durations}.", f" Start tick: {config.env.start_tick}") print( f"[Timer] {end_time - start_time:.2f} seconds to finish the simulation." ) print(metrics)
def choose_action(self, env_tick: int, cur_vm_id: int, live_vm_set_list: List[Set[int]]) -> Action: # Formulate and solve only when the new request goes beyond the apply buffer size of last ILP solution. if self.last_solution_env_tick < 0 or env_tick >= self.last_solution_env_tick + self.ilp_apply_buffer_size: self.last_solution_env_tick = env_tick self._vm_id_to_idx = {} self.future_vm_req.clear() self.allocated_vm.clear() # To clear the outdated vm_req_dict data. pop_num = 0 for i, tick in enumerate(self.env_tick_in_vm_req_dict): if tick < env_tick: self.vm_req_dict.pop(tick) pop_num += 1 else: break self.env_tick_in_vm_req_dict = self.env_tick_in_vm_req_dict[pop_num:] # Read VM data from file. for tick in range(env_tick, env_tick + self.ilp_plan_window_size + 1): if tick not in self.vm_req_dict: self.env_tick_in_vm_req_dict.append(tick) self.vm_req_dict[tick] = [item for item in self.vm_item_picker.items(tick)] # Build the future_vm_req list for ILP. for tick in range(env_tick, env_tick + self.ilp_plan_window_size + 1): for vm in self.vm_req_dict[tick]: vmInfo = IlpVmInfo( id=vm.vm_id, core=vm.vm_cpu_cores, mem=vm.vm_memory, lifetime=vm.vm_lifetime, arrival_env_tick=tick ) if tick < env_tick + self.ilp_apply_buffer_size: self.refreshed_allocated_vm_dict[vm.vm_id] = vmInfo self._vm_id_to_idx[vm.vm_id] = len(self.future_vm_req) self.future_vm_req.append(vmInfo) # Build the allocated_vm list for ILP. for pm_idx in range(len(live_vm_set_list)): for vm_id in live_vm_set_list[pm_idx]: assert vm_id in self.allocated_vm_dict, f"ILP agent: vm_id {vm_id} not in allocated_vm_dict" vm = self.allocated_vm_dict[vm_id] vm.pm_idx = pm_idx self.refreshed_allocated_vm_dict[vm_id] = vm self.allocated_vm.append(vm) self.allocated_vm_dict.clear() self.allocated_vm_dict = self.refreshed_allocated_vm_dict self.refreshed_allocated_vm_dict = {} # Choose action by ILP, may trigger a new formulation and solution, # may directly return the decision if the cur_vm_id is still in the apply buffer size of last solution. chosen_pm_idx = self.ilp.choose_pm(env_tick, cur_vm_id, self.allocated_vm, self.future_vm_req, self._vm_id_to_idx) self._simulation_logger.info(f"tick: {env_tick}, vm: {cur_vm_id} -> pm: {chosen_pm_idx}") if chosen_pm_idx == NOT_ALLOCATE_NOW: return PostponeAction(vm_id=cur_vm_id, postpone_step=1) else: self._allocation_counter[self.future_vm_req[self._vm_id_to_idx[cur_vm_id]].core] += 1 return AllocateAction(vm_id=cur_vm_id, pm_id=chosen_pm_idx)