def test_nodes(self, active_nodes, new_domain):
        # Go over all hosts
        for node in active_nodes:
            # Get actual CPU measurements
            curr_cpu_demand = np.percentile(node.get_readings(), 95)
            
            # Memory demand is calculated by summing up all VM reservations
            curr_mem_demand = 0

            # Add up the demands of all domains running on the host              
            for old_domain in node.domains.values():
                # Domain size specification 
                spec = old_domain.domain_configuration.get_domain_spec()
                
                # Sum up mem load 
                curr_mem_demand += spec.total_memory()
            
            # Calculate metrics
            new_domain_spec = conf_domainsize.get_domain_spec(new_domain.size)
            mem_delta = conf_nodes.NODE_MEM - curr_mem_demand - new_domain_spec.total_memory()
            
            # Calculate estiated CPU demand if VM is almost
            vm_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
            cpu_delta = conf_nodes.UTIL - curr_cpu_demand - vm_cpu_demand
            
            # If metric is positive, the node can host the domain
            if cpu_delta >= 0 and mem_delta >= 0: 
                return node.name 
예제 #2
0
    def run(self):
        # Update slot count in scoreboard
        self.scoreboard.increment_slot_count()

        # Status logging
        self.count += 1
        if self.count % 100 == 0:
            sys.stdout.write("[pid %i]" % os.getpid())

        # Current simulation time
        sim_time = self.pump.sim_time()

        # For all nodes update their domains and aggregate the load for the node
        for host in self.model.get_hosts(model.types.NODE):
            # Reset aggregated node load
            aggregated_load = 0

            # Go over all domains and update their load by their TS
            for domain in host.domains.values():
                # Calculate domain time
                dom_time = sim_time - domain.creation_time

                # Get domain load
                load = self.__dom_load(dom_time, domain)
                if load is None:
                    return

                # Notify load to the load metric_handler (like Sonar would do)
                self.__notify(sim_time, domain.name, "psutilcpu", load)

                # Update aggregated cpu load
                self.scoreboard.add_cpu_load(load)

                # Load aggregation for the node
                node_load = conf_nodes.to_node_load(load, domain.domain_configuration.size)
                aggregated_load += node_load

            # Add hypervisor load to the aggregated load
            # For the SSAPv this causes service level violations
            aggregated_load += BASE_LOAD

            # Add Migration overheads
            if host.active_migrations_out:
                aggregated_load += host.active_migrations_out * MIGRATION_SOURCE
            if host.active_migrations_in:
                aggregated_load += host.active_migrations_in * MIGRATION_TARGET

            # Notify aggregated load for the server (like Sonar would do) (cap @ 100)
            self.__notify(sim_time, host.name, "psutilcpu", min(aggregated_load, 100))

            # Update overload counter
            if aggregated_load > 100:
                self.scoreboard.add_cpu_violations(1)

            # log the current host-specific load with its time
            self.scoreboard.add_load(host.name, sim_time, aggregated_load, len(host.domains) > 0)

        # Schedule next call for run
        self.pump.callLater(self.report_rate, self.run)
    def test_nodes(self, new_domain, nodelist):
        results = []
        for node in nodelist: 
            # Aggregate load for the complete host 
            mem_load = 0
              
            for dom in node.domains.values():
                spec = dom.domain_configuration.get_domain_spec() 
                mem_load += spec.total_memory()
            
            # Get actual CPU measurements
            curr_cpu_demand = np.percentile(node.get_readings(), 95)
            resd_cpu = conf_nodes.UTIL - curr_cpu_demand
                
            # Calculate residual vector
            resd_mem = conf_nodes.NODE_MEM - mem_load
            
            # VM resource demand
            spec = conf_domainsize.get_domain_spec(new_domain.size)
            
            # Calculate estiated CPU demand if VM is almost
            domain_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
            domain_mem_demand = spec.total_memory()
            
            # Calculate the dot product
            w_cpu = w_mem = 1
            abs_res = math.sqrt(math.pow(resd_cpu, 2) + math.pow(resd_mem, 2))
            abs_vm = math.sqrt(math.pow(domain_cpu_demand, 2) + math.pow(domain_mem_demand, 2))
            dot_product = w_cpu * resd_cpu * domain_cpu_demand + w_mem * resd_mem * domain_mem_demand
            cosine = dot_product / (abs_res * abs_vm)
             
            # Check if this host is able to handle the new domain
            cpu_delta = resd_cpu - domain_cpu_demand
            mem_delta = resd_mem - domain_mem_demand
            if cpu_delta >= 0 and mem_delta >= 0:
                results.append((node, dot_product, cosine))
            else:
                print 'failed for node %s - status: %i mem %i cpu' % (node.name, mem_delta, cpu_delta)
            

        if results:
            # Get the node with the best (greatest) dot product
            results.sort(key=lambda x: x[1])
            results.reverse()
            best_dot_product = results[0][0]
                        
            # Get the node with the best (smallest) cosine
            results.sort(key=lambda x: x[2])
            best_cosine = results[0][0]
            
            return (best_dot_product.name, best_cosine.name)
예제 #4
0
 def __run_optimized_migrations(self, bucket_index):
     # Create allocations lists for GOAP 
     # Assignment
     curr_assignment = self.placement.assignment_list[bucket_index]
     
     # Previous assignment
     prev_assignment = self.placement.assignment_list[(bucket_index - 1) % NUM_BUCKETS]
     
     as_current = [0 for _ in xrange(conf_domains.count())]
     as_next = [0 for _ in xrange(conf_domains.count())]
     
     for index_domain in xrange(conf_domains.count()):
         as_current[index_domain] = prev_assignment[index_domain]
         as_next[index_domain] = curr_assignment[index_domain]
                 
     # Get current domain loads
     domain_load = []
     for mapping in conf_domains.initial_domains:
         domain_name = mapping.domain
         load = self.model.get_host(domain_name).mean_load(20)
         domain_load.append(conf_nodes.to_node_load(load, conf_domainsize.DEFAULT))
                 
     # Schedule migrations
     from ai import astar
     migrations = astar.plan(conf_nodes.NODE_COUNT, as_current, as_next, domain_load)
     
     # Trigger migrations
     dep = None
     for migration in migrations:
         domain_name = conf_domains.initial_domains[migration[0]].domain
         source_node = conf_nodes.get_node_name(migration[1])
         target_node = conf_nodes.get_node_name(migration[2])
         
         print 'domain %s - source %s - target %s' % (domain_name, source_node, target_node) 
         
         model_domain = self.model.get_host(domain_name)
         model_source = self.model.get_host(source_node)
         model_target = self.model.get_host(target_node)
         
         # dep = self.migration_queue.add(model_domain, model_source, model_target, dep)
         dep = self.migration_queue.add(model_domain, model_source, model_target)
     return 
 def test_nodes(self, new_domain, node_list):
     host_choice = []
     for node in node_list:
         # Get actual CPU measurements
         curr_cpu_demand = np.percentile(node.get_readings(), 95)
         
         # Memory demand is calculated by summing up all VM reservations
         mem_load = 0
           
         # Calculate the node utilization by accumulating all domain loads
         for dom in node.domains.values():
             spec = dom.domain_configuration.get_domain_spec()
             mem_load += spec.total_memory()
         
         # Calculate metric
         spec = conf_domainsize.get_domain_spec(new_domain.size)
         mem_delta = conf_nodes.NODE_MEM - (mem_load + spec.total_memory())
         
         
         # Calculate estiated CPU demand if VM is almost
         vm_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
         cpu_delta = conf_nodes.UTIL - curr_cpu_demand - vm_cpu_demand
         
         # Calculate fit metric
         metric = cpu_delta * mem_delta
         
         # Server is not able to handle the domain
         if cpu_delta < 0 or mem_delta < 0:
             continue
         
         # Add metric to the choice list
         host_choice.append((node.name, metric))
           
     # Check if we found at least one host
     if not host_choice:
         return None
       
     # Sort host choice list
     host_choice = self.sort(host_choice, lambda x: x[1])
     
     # Pkc hte one with the lowest metric (best fit)
     return host_choice[0][0] 
    def test_nodes(self, new_domain, nodelist):
        norms = []
        for node in nodelist: 
            # Aggregate load for the complete host 
            mem_load = 0
              
            for dom in node.domains.values():
                spec = dom.domain_configuration.get_domain_spec()
                mem_load += spec.total_memory()
            
            # Get actual CPU measurements
            curr_cpu_demand = np.percentile(node.get_readings(), 95)
            resd_cpu = conf_nodes.UTIL - curr_cpu_demand
            
            # Calculate residual vector
            resd_mem = conf_nodes.NODE_MEM - mem_load
            
            # VM resource demand
            spec = conf_domainsize.get_domain_spec(new_domain.size)

            # Calculate estiated CPU demand if VM is almost
            domain_cpu_demand = conf_nodes.to_node_load(95, new_domain.size)
            domain_mem_demand = spec.total_memory()
            
            # Calculate the norm
            norm = 1 * math.pow(domain_cpu_demand - resd_cpu, 2) + 1 * math.pow(domain_mem_demand - resd_mem, 2)

            # Check if this host is able to handle the new domain
            cpu_delta = resd_cpu - domain_cpu_demand
            mem_delta = resd_mem - domain_mem_demand
            if cpu_delta >= 0 and mem_delta >= 0:
                norms.append((node, norm, curr_cpu_demand, mem_load))
            else:
                print 'failed for node %s - status: %i mem %i cpu' % (node.name, mem_delta, cpu_delta)
            
        # Find the node with the lowest norm that is able to host the domain
        norms.sort(key=lambda x: x[1])
        spec = conf_domainsize.get_domain_spec(new_domain.size)
        for norm in norms: 
            # Node found 
            return norm[0].name
예제 #7
0
def __calculate_demand_based_lower_bounds(capacity_cpu, capacity_mem, events):
    # Stack is used to keep track of active VMs
    active_stack = []
     
    # Demand duration list
    demand_duration_list = []
     
    # Go through all events and calculate lower bound values
    last_event_offset = 0
    
    # Go through all events and calculate lower bound values
    for event in events:
        # Update active VM stack
        if event.is_start:
            active_stack.append(event.entry)
        else:
            active_stack.remove(event.entry)
            
        # Calculate total infrastructure demand
        sum_cpu = 0
        sum_mem = 0
        for entry in active_stack:
            # calculate current offset
            delta_time = event.offset - entry.offset 
            delta_index = delta_time / entry.freq
            
            # Include 10 minutes of load
            delta_index_past = max(delta_index - 10, 0)
            
            # Measurements
            loads = entry.ts[delta_index_past:delta_index]
            
            # Get minimum load
            if len(loads) < 2:
                load = 0; 
            else:
                load = min(*loads)
                
            size = conf_domainsize.get_domain_spec(entry.domain_size)
            sum_cpu += conf_nodes.to_node_load(load, entry.domain_size)
            sum_mem += size.total_memory()
           
        # Calculate server demands 
        lb_cpu = math.ceil(sum_cpu / float(conf_nodes.UTIL))
        lb_mem = math.ceil(sum_mem / float(capacity_mem))
        
        # Take bigger server demand
        lb = max(lb_cpu, lb_mem)
        
        # Update delta event duration calculation
        duration = event.offset - last_event_offset
        last_event_offset = event.offset
        
        # Add server demand to list
        demand_duration_list.append((lb, duration))
        
    # Calculate overall lower bound on server demand over all event slots
    lb_dem_total = max([e[0] for e in demand_duration_list])
    lb_dem_avg = sum([e[0] * e[1] for e in demand_duration_list]) / sum([e[1] for e in demand_duration_list])
    
    return lb_dem_total, lb_dem_avg
예제 #8
0
 def __run_migrations(self):
     
     conversion_table = {}
     prev_assignment = {}
     i = 0
     for node in self.model.get_hosts(types.NODE):
         for domain_name in node.domains.keys():
             conversion_table[i] = domain_name
             prev_assignment[i] = conf_nodes.index_of(node.name)
             i +=1
     
     
     inverse_conversion_table = {domain_name : i for i, domain_name in conversion_table.iteritems()}
     
     
     demand_cpu = {}
     demand_mem = {}
     
     for domain in self.model.get_hosts(types.DOMAIN):
         
         if domain in conf_domains.initial_domains:
             domain_size = conf_domains.initial_domains[conf_domains.initial_domain_index(domain.name)].size
         else:
             domain_size = conf_domains.available_domains[conf_domains.available_domain_index(domain.name)].size
         
         
         domain_index = inverse_conversion_table[domain.name]
                    
         
         cpu_readings = domain.get_readings()
         
         
         domain_load = conf_nodes.to_node_load(np.mean(cpu_readings[-NUM_CPU_READINGS:]), domain_size)
         
         demand_cpu[domain_index] = domain_load
         demand_mem[domain_index] = domain.domain_configuration.get_domain_spec().total_memory()
         
         print 'domain : %d, demand_cpu : %d, demand_memory : %d' % (domain_index, domain_load, domain.domain_configuration.get_domain_spec().total_memory())
         
     # Assignment
     try:
         _, curr_assignment = dsapp.solve(conf_nodes.NODE_COUNT,
                                          conf_nodes.UTIL,
                                          conf_nodes.NODE_MEM,
                                          demand_cpu,
                                          demand_mem,
                                          prev_assignment,
                                          MIG_OVERHEAD_SOURCE,
                                          MIG_OVERHEAD_TARGET)
     except:
         print 'invalid solution #######################'
         # don't change anything and just return in case the model was infeasible
         return
     
     assignment_changed = dsapp.AssignmentChanged(prev_assignment, curr_assignment)
     print 'CHANGE in the Assignment : %s' % assignment_changed
     
     if not assignment_changed:
         # there is no change in the assignment, we can just return
         logger.info("Returning because the previous assignment was optimal...")
         return
         
     
     for index_domain in curr_assignment.keys():
         
         domain_name = conversion_table[index_domain]
         source_node = conf_nodes.get_node_name(prev_assignment[index_domain])
         target_node = conf_nodes.get_node_name(curr_assignment[index_domain])
         
         # Find current node for domain
         source_node = self.model.get_host_for_domain(domain_name).name
         
         # Trigger migration
         model_domain = self.model.get_host(domain_name)
         model_source = self.model.get_host(source_node)
         model_target = self.model.get_host(target_node)
         self.migration_queue.add(model_domain, model_source, model_target)