def loadbalance(self): """ Run the loadbalancer on the cluster and migrate vm accordingly. See cxm.loadbalancer module for details about algorithm. """ if not core.cfg['QUIET']: print "Recording metrics..." current_state = {} vm_metrics = {} node_metrics = {} for node in self.get_nodes(): node.metrics.init_cache( ) # Early call to increase timeslice used to compute rates vms = node.get_vms() # Get current cluster state current_state[node.get_hostname()] = [vm.name for vm in vms] # Get node's metrics node_metrics[node.get_hostname()] = { 'ram': node.metrics.get_available_ram() } # Get VM's metrics cpu = node.metrics.get_vms_cpu_usage() io = node.metrics.get_vms_disk_io_rate() for vm in vms: vm_metrics[vm.name] = {} vm_metrics[vm.name]['ram'] = vm.get_ram() vm_metrics[vm.name]['cpu'] = cpu[vm.name] vm_metrics[ vm.name]['io'] = io[vm.name]['Read'] + io[vm.name]['Write'] # Initialize loadbalancer lb = loadbalancer.LoadBalancer(current_state) lb.set_metrics(vm_metrics, node_metrics) solution = lb.get_solution() if not solution: print "No better solution found with a minimal gain of %s%%." % core.cfg[ 'LB_MIN_GAIN'] else: # Ask the user for a confirmation if not core.cfg['QUIET']: print "Here is the proposed migration plan:" for path in solution.get_path(): print " -> Migrate", path['vm'], "from", path[ 'src'], "to", path['dst'] if (raw_input("Proceed ? [y/N]:").upper() != "Y"): print "Aborded by user." return # Do migrations to put the cluster in the selected state for path in solution.get_path(): if not core.cfg['QUIET']: print "Migrating", path['vm'], "from", path[ 'src'], "to", path['dst'], "..." self.migrate(path['vm'], path['src'], path['dst'])
def loadbalance(self): """ Run the loadbalancer on the cluster and migrate vm accordingly. See cxm.loadbalancer module for details about algorithm. """ if not core.cfg["QUIET"]: print "Recording metrics..." current_state = {} vm_metrics = {} node_metrics = {} for node in self.get_nodes(): node.metrics.init_cache() # Early call to increase timeslice used to compute rates vms = node.get_vms() # Get current cluster state current_state[node.get_hostname()] = [vm.name for vm in vms] # Get node's metrics node_metrics[node.get_hostname()] = {"ram": node.metrics.get_available_ram()} # Get VM's metrics cpu = node.metrics.get_vms_cpu_usage() io = node.metrics.get_vms_disk_io_rate() for vm in vms: vm_metrics[vm.name] = {} vm_metrics[vm.name]["ram"] = vm.get_ram() vm_metrics[vm.name]["cpu"] = cpu[vm.name] vm_metrics[vm.name]["io"] = io[vm.name]["Read"] + io[vm.name]["Write"] # Initialize loadbalancer lb = loadbalancer.LoadBalancer(current_state) lb.set_metrics(vm_metrics, node_metrics) solution = lb.get_solution() if not solution: print "No better solution found with a minimal gain of %s%%." % core.cfg["LB_MIN_GAIN"] else: # Ask the user for a confirmation if not core.cfg["QUIET"]: print "Here is the proposed migration plan:" for path in solution.get_path(): print " -> Migrate", path["vm"], "from", path["src"], "to", path["dst"] if raw_input("Proceed ? [y/N]:").upper() != "Y": print "Aborded by user." return # Do migrations to put the cluster in the selected state for path in solution.get_path(): if not core.cfg["QUIET"]: print "Migrating", path["vm"], "from", path["src"], "to", path["dst"], "..." self.migrate(path["vm"], path["src"], path["dst"])
def emergency_eject(self, ejected_node): """Migrate all running VMs on ejected_node to the others nodes. Use best-fit decreasing algorithm to resolve bin packing problem. Need Further optimizations when cluster is nearly full. """ # Get nodes pool = self.get_nodes() pool.remove(ejected_node) # Sort VMs to be ejected by used ram vms = ejected_node.get_vms() vms.sort(key=lambda x: x.get_ram(), reverse=True) failed = list() for vm in vms: selected_node = None # Sort nodes by free ram pool.sort(key=lambda x: x.metrics.get_free_ram()) for node in pool: if node.metrics.get_free_ram() >= vm.get_ram(): selected_node = node break # Select first node with enough space if selected_node is None: failed.append(vm) # Not enough room for this one continue # Next ! if not core.cfg["QUIET"]: print "Migrating", vm.name, "to", selected_node.get_hostname() self.migrate(vm.name, ejected_node.get_hostname(), selected_node.get_hostname()) if len(failed) > 0: raise ClusterNodeError( ejected_node.get_hostname(), ClusterNodeError.NOT_ENOUGH_RAM, "Cannot migrate " + ", ".join([vm.name for vm in failed]), )
def emergency_eject(self, ejected_node): """Migrate all running VMs on ejected_node to the others nodes. Use best-fit decreasing algorithm to resolve bin packing problem. Need Further optimizations when cluster is nearly full. """ # Get nodes pool = self.get_nodes() pool.remove(ejected_node) # Sort VMs to be ejected by used ram vms = ejected_node.get_vms() vms.sort(key=lambda x: x.get_ram(), reverse=True) failed = list() for vm in vms: selected_node = None # Sort nodes by free ram pool.sort(key=lambda x: x.metrics.get_free_ram()) for node in pool: if node.metrics.get_free_ram() >= vm.get_ram(): selected_node = node break # Select first node with enough space if selected_node is None: failed.append(vm) # Not enough room for this one continue # Next ! if not core.cfg['QUIET']: print "Migrating", vm.name, "to", selected_node.get_hostname() self.migrate(vm.name, ejected_node.get_hostname(), selected_node.get_hostname()) if len(failed) > 0: raise ClusterNodeError( ejected_node.get_hostname(), ClusterNodeError.NOT_ENOUGH_RAM, "Cannot migrate " + ", ".join([vm.name for vm in failed]))