Пример #1
0
 def start(self):
     print 'Connecting with Times'
     connection = times_client.connect()
     
     self.min_ts_length = sys.maxint  # Minimum length across all TS
     ts_freq = 0  # Frequency of the TS from Times
     
     # Iterate over all domains and assign them a TS
     for domain in self.model.get_hosts(model.types.DOMAIN):
         # Select and load TS (based on the configuration)
         index = domains.index_of(domain.name)
         mapping = domains.domain_profile_mapping[index]
         # load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         ts = connection.load(load)
         
         # Convert TS to a numpy array
         # select TS not time index
         ts_freq = ts.frequency
         ts = wutil.to_array(ts)[1]
         
         # Add noise to the time series
         if NOISE:
             # random = np.random.lognormal(mean=NOISE_MEAN, sigma=NOISE_SIGMA, size=len(ts))
             random = np.random.normal(loc=NOISE_MEAN, scale=NOISE_SIGMA, size=len(ts))
             ts += random
             ts[ts > 100] = 100
             ts[ts < 0] = 0
         
         # Attach TS to domain 
         domain.ts = ts
         
         # Update max length
         self.min_ts_length = min(self.min_ts_length, len(ts))
     
     # Close times connection
     times_client.close()
     
     # Reduce length of time series to 6 hours
     # Calculation: Adjust frequency by (new duration / current TS duration)
     self.freq = (ts_freq * profiles.EXPERIMENT_DURATION) / (self.min_ts_length * ts_freq)
     
     # Calculate ramp up delete time
     self.ramp_up = profiles.RAMP_UP
     self.ramp_down = profiles.RAMP_DOWN
     
     # Schedule message pump
     self.pump.callLater(0, self.run)
Пример #2
0
def __calc_stats(con):
    stats = []
    names = con.find('RAW_SIS_.*_cpu')
    for i, name in enumerate(names): 
        print 'processing %s... (%i%%)' % (name, (i*100/len(names)))
        timeSeries = con.load(name)
        _, values = util.to_array(timeSeries)
        
        stat = TsStat(name)
        stats.append(stat)
        
        stat.mean = np.mean(values)
        stat.median = np.percentile(values, 50)
        stat.min = np.min(values)
        stat.max = np.max(values)
        stat.std = np.std(values)
        
    return stats
Пример #3
0
def main():
    from service import times_client
    from workload import util
    
    con = times_client.connect()
    data = con.load('SIS_222_cpu_profile_trace')
    _ , demand = util.to_array(data)
    times_client.close()
    
    random = np.random.lognormal(mean=0.0, sigma=1.5, size=len(demand))
    demand += random

    # Run smoother
    _, s0, _ = single_exponential_smoother(demand)
    _, s1, _ = double_exponential_smoother(demand)
    
    # Plot original data with forecasted
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(demand)
    ax.plot(s0)
    ax.plot(s1)
    plt.show()
Пример #4
0
def main():
    # Solve allocation problem
    nodecount = len(nodes.NODES)
    model = placement.SSAPvPlacement(nodecount, nodes.NODE_CPU, nodes.NODE_MEM, nodes.DOMAIN_MEM)
    model.execute()
    
    assignment = model.assignment
    if assignment != None:
        
        node_assignment = {}
        for domain in assignment.keys():
            node = assignment[domain]
            if not node_assignment.has_key(node):
                node_assignment[node] = []
            node_assignment[node].append(domain)
            
        print node_assignment 
                
        # Load time series used by the drivers
        # Connect with Times
        print 'Connecting with Times'
        tsdata = []
        connection = times_client.connect()
        
        # Loading services to combine the dmain_service_mapping with
        service_count = len(domains.domain_profile_mapping)
        import sys
        ts_length = sys.maxint
        for i_service in xrange(service_count):
            name = profiles.get_cpu_current_profile(i_service)
            tsd = connection.load(name)
            tsd = wutil.to_array(tsd)[1]
            tsdata.append(tsd)
            ts_length = min(ts_length, len(tsd)) 
            
        times_client.close()
        
        print node_assignment
        
        # Run simulation and report overload situations
        acc_load = [[] for _ in xrange(len(nodes.NODES))]
        for t in xrange(ts_length):
            print '-- t -- %i' % t
            for node in node_assignment.keys():
                sum_load = 0
                for domain in node_assignment[node]: 
                    sum_load += tsdata[domain][t]
                    
                print node
                acc_load[node].append(sum_load)

        # Plot accumulated loads
        fig = plt.figure()
        fig.set
        ax = fig.add_subplot(111)
        ax.set_title("Workload %s Overload %s" % (profiles.selected_name, profiles.modified))
        ax.axis([0.0, ts_length, 0, 500])
        for load in acc_load:
            ax.plot(range(0, len(load)), load)
    
        plt.savefig('C:/temp/convolution/tesoverload_%s_%s.png' % (profiles.selected_name, profiles.modified))
        
             
    else:
        print 'Could not check overload - no feasible assignment found'
Пример #5
0
 def execute(self):
     # Execute super code
     super(FirstFitPlacement, self).execute()
     
     print 'Using First Fit for domain placmement ...'
         
     # Logging
     logger.info('Placement strategy: First Fit')
     logger.info('Required servers: %i' % len(nodes.NODES))
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     service_count = len(domains.domain_profile_mapping)
     
     # For each node there is one bucket
     buckets = []
     buckets_mem = []
     
     migrations = []
     assignment = {}
     for _ in xrange(len(nodes.NODES)):
         buckets.append([0, nodes.NODE_CPU, []])
         buckets_mem.append([0, nodes.NODE_MEM, []])
     
     # Service which gets mapped
     for service_index in xrange(service_count):
         # Maps the service to a service profile
         mapping = domains.domain_profile_mapping[service_index]
         
         # Important: Load the trace of the workload profile
         service = profiles.get_cpu_profile_for_initial_placement(service_index)
         
         print 'loading service: %s' % (service)
         ts = connection.load(service)
         from workload import util
         _, demand = util.to_array(ts)
     
         # Determine max demand value of this service
         max_value = np.percentile(demand, 95)  # np.max(demand)
         
         bin_found = False
         try:
             for node_index in xrange(len(buckets)):
                 bucket = buckets[node_index]
                 bucket_mem = buckets_mem[node_index]
                 if (bucket[0] + max_value) < bucket[1] and (bucket_mem[0] + nodes.DOMAIN_MEM) < bucket_mem[1]:
                     bin_found = True
                     
                     bucket[2].append(service)
                     
                     bucket[0] = bucket[0] + max_value
                     bucket_mem[0] = bucket_mem[0] + nodes.DOMAIN_MEM
                     
                     migrations.append((mapping.domain, node_index))
                     assignment[service_index] = node_index
                     
                     raise StopIteration()
             print 'Error no target!'
         except StopIteration:
             if bin_found == False:
                 print 'WARN: Could not assign domain to a node!'
               
               
     # Close Times connection
     times_client.close()
     
           
     for bucket in buckets:
         print 'bucket length: %i' % len(bucket[2])
               
     print 'Assignment: %s' % assignment
     logger.info('Assignment: %s' % assignment)
     print 'Migrations: %s' % migrations
     logger.info('Migrations: %s' % migrations)
        
     return migrations, self._count_active_servers(assignment)
Пример #6
0
 def execute(self, num_buckets, aggregation, migration_limit):
     from workload import util
             
     # Execute super code
     super(DSAPPlacement, self).execute()
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     domain_count = len(domains.domain_profile_mapping)
     domain_matrix = np.zeros((domain_count, num_buckets), dtype=float)
     
     domain_log = ''
     for domain_index in xrange(domain_count):
         mapping = domains.domain_profile_mapping[domain_index]
         
         # Important: Load the trace of the workload profile
         domain = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         print 'loading domain: %s' % (domain)
         domain_log += domain + '; '
         
         ts = connection.load(domain)
         ts_len = len(ts.elements)
     
         # put TS into domain matrix
         _time, data = util.to_array(ts)
         
         data = data[0:profiles.PROFILE_INTERVAL_COUNT]
         
         # Downsampling TS (domain_matrix)
         self.experiment_length = ts_len * ts.frequency  # length of the experiment measured in seconds
         bucket_width = self.experiment_length / num_buckets  # in sec
         
         # elements = bucket_width / ts.frequency
         elements = ts_len / num_buckets
         buckets = []
         for i in xrange(num_buckets):
             start = i * elements
             end = min(ts_len, (i + 1) * elements) 
             tmp = data[start : end]
             buckets.append(aggregation(tmp))
 
         domain_matrix[domain_index] = buckets
         # print data
 
     # Log services
     logger.info('Selected profile: %s' % profiles.selected_name)
     logger.info('Loading services: %s' % domain_log)
 
     # Dumpservice_matrix
     print 'Logging domain matrix...'
     np.set_printoptions(linewidth=200, threshold=99999999)
     logger.info('Service matrix: %s' % domain_matrix)
 
     # Close Times connection
     times_client.close()
     
     print "Downsampling-Ratio:", ts_len, "elements TO", num_buckets, "buckets (freq=", ts.frequency, ", placement.experiment_length=", self.experiment_length, ", profiles.experiment_duration", profiles.EXPERIMENT_DURATION, ")"
     
     print 'Solving model...'
     logger.info('Placement strategy: DSAP')
     server_list, assignment_list = dsap.solve(self.nodecount, self.node_capacity_cpu, self.node_capacity_mem, domain_matrix, self.domain_demand_mem, migration_limit)
             
     # return values for initial placement only > A(0) <   (#servers + assignment(t=0))
     self.assignment_list = assignment_list
     initial_placement = assignment_list[0]
     
     self.server_list = server_list
     initial_server_count = server_list[0]
     
     # Set initial_placement for getter functions 
     if initial_placement != None:
         print 'Required servers: %i' % (initial_server_count)
         logger.info('Required servers: %i' % initial_server_count)
         print initial_placement
         logger.info('Assignment: %s' % initial_placement)
         
         print 'Assigning domains to servers'
         migrations = []
         for key in initial_placement.keys():
             mapping = domains.domain_profile_mapping[key]
             migration = (mapping.domain, initial_placement[key])
             migrations.append(migration)
         
         print 'Migrations: %s' % migrations
         logger.info('Migrations: %s' % migrations)
         return migrations, self._count_active_servers(initial_placement)
 
     else:
         print 'model infeasible'
         return None, None
##########################

connection = times_client.connect()

mean_deltas = []
mean_sds = []
mean_50til = []
mean_90til = []
mean_corr = []

print '%s \t %s \t %s \t %s \t %s' % ('name', 'mean', 'sd', '50th', '90th')
for desc in selected:
    # Load normal CPU trace
    default = desc.name + profiles.POSTFIX_NORM
    ts_default = connection.load(default)
    ar_default = wutil.to_array(ts_default)[1]
    
    # Load modified CPU trace 
    modified = default + profiles.POSTFIX_MODIFIED
    ts_modified = connection.load(modified)
    ar_modified = wutil.to_array(ts_modified)[1]

    # Calculate mean    
    mean_default = np.mean(ar_default)
    mean_modified = np.mean(ar_modified)
    mean_deltas.append(mean_modified - mean_default) 
    
    # Calculate SDs
    stdev_default = np.std(ar_default)
    stdev_modified = np.std(ar_modified)
    mean_sds.append(stdev_modified - stdev_default)