def load_balance(): node_list = ['node1', 'node2', 'node3', 'node4'] node_cpu_list = [0, 0, 0, 0] host_vm_dict = getHostVMDict() vm_obj_list = [] used_cpu_count = 0 for vm_dict in host_vm_dict.itervalues(): for value in vm_dict.itervalues(): used_cpu_count += value.current_cpu vm_obj_list.append(value) avg_cpu = int(math.ceil(used_cpu_count / len(node_list))) pickle_dict = {} lock = LockFile("/var/lib/virtdc/framework/host_vm_dict.pkl") with lock: while (len(vm_obj_list) > 0): max_cpu_vm = None # pick a vm with the largest cpu number for vm in vm_obj_list: if max_cpu_vm is None: max_cpu_vm = vm if vm.current_cpu > max_cpu_vm.current_cpu: max_cpu_vm = vm # migrate the max-cpu vm to first available node if vm_migrate_guest(get_host_name(max_cpu_vm.vmid), node_list[-1], max_cpu_vm.vmid) is False: return False node_cpu_list[-1] += max_cpu_vm.current_cpu # node is saturated if it has reached its balance quorum if node_cpu_list[-1] >= avg_cpu: node_list.pop() node_cpu_list.pop() # re-populate the vm pickle file pickle_dict.setdefault(node_list[-1], {}) pickle_dict[node_list[-1]][max_cpu_vm.vmid] = max_cpu_vm vm_obj_list.remove(max_cpu_vm) with open('/var/lib/virtdc/framework/host_vm_dict.pkl', 'w') as pickle_out: pickle.dump(pickle_dict, pickle_out) #pickleNodeVMDictionary(pickle_dict) return True
def load_balance(): node_list = ['node1', 'node2', 'node3', 'node4'] node_cpu_list = [ 0, 0, 0, 0 ] host_vm_dict = getHostVMDict() vm_obj_list = [] used_cpu_count = 0 for vm_dict in host_vm_dict.itervalues(): for value in vm_dict.itervalues(): used_cpu_count += value.current_cpu vm_obj_list.append(value) avg_cpu = int(math.ceil(used_cpu_count/len(node_list))) pickle_dict = {} lock = LockFile("/var/lib/virtdc/framework/host_vm_dict.pkl") with lock: while(len(vm_obj_list) > 0): max_cpu_vm = None # pick a vm with the largest cpu number for vm in vm_obj_list: if max_cpu_vm is None: max_cpu_vm = vm if vm.current_cpu > max_cpu_vm.current_cpu: max_cpu_vm = vm # migrate the max-cpu vm to first available node if vm_migrate_guest(get_host_name(max_cpu_vm.vmid), node_list[-1], max_cpu_vm.vmid) is False: return False node_cpu_list[-1] += max_cpu_vm.current_cpu # node is saturated if it has reached its balance quorum if node_cpu_list[-1] >= avg_cpu: node_list.pop() node_cpu_list.pop() # re-populate the vm pickle file pickle_dict.setdefault(node_list[-1], {}) pickle_dict[node_list[-1]][max_cpu_vm.vmid] = max_cpu_vm vm_obj_list.remove(max_cpu_vm) with open('/var/lib/virtdc/framework/host_vm_dict.pkl','w') as pickle_out: pickle.dump(pickle_dict, pickle_out) #pickleNodeVMDictionary(pickle_dict) return True
def force_migrate(vmid, source_host, dest_host): return vm_migrate_guest(source_host, dest_host,vmid)
def force_migrate(vmid, source_host, dest_host): return vm_migrate_guest(source_host, dest_host, vmid)
def process_action_on_current_usage(host, vmid, value, cpu_usage, mem_usage, io_usage): node_dict = GetNodeDict() #for key, value in node_dict.iteritems() : #print key, value.hostname, value.ip_address, value.max_cpu, value.max_memory, value.max_io, value.avail_cpu, value.avail_memory, value.avail_io #Log activity manager_activity_log = open('/var/lib/virtdc/logs/activity_logs/manager.log', 'a+') manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::MEMORY::'+host+' :: '+vmid+' :: Alotted Memory '+str(value.current_memory)+' :: Current Memory '+str(mem_usage)+'\n') manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::CPU::'+host+' :: '+vmid+' :: Alotted CPU '+str(value.current_cpu)+' :: Current CPU '+str(cpu_usage)+'\n') obj=NodeFinder() max_cpu = value.max_cpu #print 'Max CPU '+str(max_cpu) allotted_cpu = float(value.current_cpu) #print 'Allocate CPU : '+str(allotted_cpu) #Base OS should not go below the minimum memory mem_usage = float(mem_usage) + float(_base_mem_size) allotted_memory = float(value.current_memory) max_memory = float(value.max_memory) #Check CPU usage, regarding a 0.1 margin as eligible to scale up if((cpu_usage+0.1 > allotted_cpu) and (cpu_usage < max_cpu)): if ( obj.is_cpu_available_on_host(host, 1) ): #print 'Test 2' new_cpu_value = value.current_cpu + 1 print "New CPU: %s" % new_cpu_value vm_cpu_scaling(host, vmid, value.vmip, new_cpu_value) manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::CPU::Scaling ::'+host+' :: '+vmid+' :: Memory scaled from '+str(value.current_cpu)+' to '+str(cpu_usage)+'\n') else: print 'Test 3' new_host = obj.is_space_available_for_vm(cpu_usage, mem_usage , io_usage) if new_host is None: print "Cant migrate guest" else: print 'Dest Node : '+new_host #Initiate vm migration migrate_flag = vm_migrate_guest(host, new_host, vmid) if (migrate_flag): manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::CPU::Migration ::'+host+' :: '+vmid+' :: Domain migrated from '+str(host)+' to '+str(new_host)+' for CPU Scaling from'+str(value.current_cpu)+' to '+str(cpu_usage)+'\n') else: manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::CPU::Migration ::'+host+' :: '+vmid+' :: Cannot migrate from '+str(host)+' to '+str(new_host)+' for CPU Scaling from'+str(value.current_cpu)+' to '+str(cpu_usage)+'\n') #if( (cpu_usage>current_cpu) and (cpu_usage<max_cpu) ): -- CPU scaling down is not implemented #Check Memory Usage - Memory scale up will be initiated when usage is greater than usage+scaleup_threshold #print "MEM USAGE: " + str(mem_usage) #print "Alloc mem: " + str(allotted_memory) #print "threadhold: " + str(float(mem_scale_up_threshold)) #print "Max mem: " + str(max_memory) if(((mem_usage > (allotted_memory + float(mem_scale_up_threshold))))and (mem_usage<max_memory)): required_extra_memory = mem_usage - allotted_memory print "Required Mem: " + str(required_extra_memory) if( obj.is_mem_available_on_host(host, required_extra_memory) ): vm_memory_scaling(host, vmid, float(mem_usage)) manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::MEMORY::Scaling ::'+str(host)+' :: '+str(vmid)+' :: Memory scaled from '+str(allotted_memory)+' to '+str(mem_usage)+'\n') else: new_host = obj.is_space_available_for_vm(cpu_usage, mem_usage , io_usage) if new_host is None: print "Cant migrate Guest" else: #Initiate vm migration migrate_flag = vm_migrate_guest(host, new_host, vmid) if (migrate_flag): manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::MEMORY::Migration ::'+host+' :: '+vmid+' :: Domain migrated from '+str(host)+' to '+str(new_host)+' for Memory Scaling from'+str(value.current_memory)+' to '+str(mem_usage)+'\n') else: manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::MEMORY::Migration ::'+host+' :: '+vmid+' :: Cannot migrate from '+str(host)+' to '+str(new_host)+' for Memory Scaling from'+str(value.current_memory)+' to '+str(mem_usage)+'\n') #To scale down memory - Memory scale down will be initiated when usage is lower than usage-scaledown_threshold or elif((mem_usage<(allotted_memory - float(mem_scale_down_threshold)))and (mem_usage<max_memory) ): vm_memory_scaling(host, vmid, float(mem_usage)) manager_activity_log.write(str(datetime.datetime.now())+'::PLACEMENT MANAGER::MEMORY::Scaling ::'+str(host)+' :: '+str(vmid)+' :: Memory scaled from '+str(allotted_memory)+' to '+str(mem_usage)+'\n')
def process_action_on_current_usage(host, vmid, value, cpu_usage, mem_usage, io_usage): node_dict = GetNodeDict() #for key, value in node_dict.iteritems() : #print key, value.hostname, value.ip_address, value.max_cpu, value.max_memory, value.max_io, value.avail_cpu, value.avail_memory, value.avail_io #Log activity manager_activity_log = open( '/var/lib/virtdc/logs/activity_logs/manager.log', 'a+') manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::MEMORY::' + host + ' :: ' + vmid + ' :: Alotted Memory ' + str(value.current_memory) + ' :: Current Memory ' + str(mem_usage) + '\n') manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::CPU::' + host + ' :: ' + vmid + ' :: Alotted CPU ' + str(value.current_cpu) + ' :: Current CPU ' + str(cpu_usage) + '\n') obj = NodeFinder() max_cpu = value.max_cpu #print 'Max CPU '+str(max_cpu) allotted_cpu = float(value.current_cpu) #print 'Allocate CPU : '+str(allotted_cpu) #Base OS should not go below the minimum memory mem_usage = float(mem_usage) + float(_base_mem_size) allotted_memory = float(value.current_memory) max_memory = float(value.max_memory) #Check CPU usage, regarding a 0.1 margin as eligible to scale up if ((cpu_usage + 0.1 > allotted_cpu) and (cpu_usage < max_cpu)): if (obj.is_cpu_available_on_host(host, 1)): #print 'Test 2' new_cpu_value = value.current_cpu + 1 print "New CPU: %s" % new_cpu_value vm_cpu_scaling(host, vmid, value.vmip, new_cpu_value) manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::CPU::Scaling ::' + host + ' :: ' + vmid + ' :: Memory scaled from ' + str(value.current_cpu) + ' to ' + str(cpu_usage) + '\n') else: print 'Test 3' new_host = obj.is_space_available_for_vm(cpu_usage, mem_usage, io_usage) if new_host is None: print "Cant migrate guest" else: print 'Dest Node : ' + new_host #Initiate vm migration migrate_flag = vm_migrate_guest(host, new_host, vmid) if (migrate_flag): manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::CPU::Migration ::' + host + ' :: ' + vmid + ' :: Domain migrated from ' + str(host) + ' to ' + str(new_host) + ' for CPU Scaling from' + str(value.current_cpu) + ' to ' + str(cpu_usage) + '\n') else: manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::CPU::Migration ::' + host + ' :: ' + vmid + ' :: Cannot migrate from ' + str(host) + ' to ' + str(new_host) + ' for CPU Scaling from' + str(value.current_cpu) + ' to ' + str(cpu_usage) + '\n') #if( (cpu_usage>current_cpu) and (cpu_usage<max_cpu) ): -- CPU scaling down is not implemented #Check Memory Usage - Memory scale up will be initiated when usage is greater than usage+scaleup_threshold #print "MEM USAGE: " + str(mem_usage) #print "Alloc mem: " + str(allotted_memory) #print "threadhold: " + str(float(mem_scale_up_threshold)) #print "Max mem: " + str(max_memory) if (((mem_usage > (allotted_memory + float(mem_scale_up_threshold)))) and (mem_usage < max_memory)): required_extra_memory = mem_usage - allotted_memory print "Required Mem: " + str(required_extra_memory) if (obj.is_mem_available_on_host(host, required_extra_memory)): vm_memory_scaling(host, vmid, float(mem_usage)) manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::MEMORY::Scaling ::' + str(host) + ' :: ' + str(vmid) + ' :: Memory scaled from ' + str(allotted_memory) + ' to ' + str(mem_usage) + '\n') else: new_host = obj.is_space_available_for_vm(cpu_usage, mem_usage, io_usage) if new_host is None: print "Cant migrate Guest" else: #Initiate vm migration migrate_flag = vm_migrate_guest(host, new_host, vmid) if (migrate_flag): manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::MEMORY::Migration ::' + host + ' :: ' + vmid + ' :: Domain migrated from ' + str(host) + ' to ' + str(new_host) + ' for Memory Scaling from' + str(value.current_memory) + ' to ' + str(mem_usage) + '\n') else: manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::MEMORY::Migration ::' + host + ' :: ' + vmid + ' :: Cannot migrate from ' + str(host) + ' to ' + str(new_host) + ' for Memory Scaling from' + str(value.current_memory) + ' to ' + str(mem_usage) + '\n') #To scale down memory - Memory scale down will be initiated when usage is lower than usage-scaledown_threshold or elif ((mem_usage < (allotted_memory - float(mem_scale_down_threshold))) and (mem_usage < max_memory)): vm_memory_scaling(host, vmid, float(mem_usage)) manager_activity_log.write( str(datetime.datetime.now()) + '::PLACEMENT MANAGER::MEMORY::Scaling ::' + str(host) + ' :: ' + str(vmid) + ' :: Memory scaled from ' + str(allotted_memory) + ' to ' + str(mem_usage) + '\n')