예제 #1
0
 def start(self):
     print 'Connecting with Times'
     connection = times_client.connect()
     
     self.min_ts_length = sys.maxint  # Minimum length across all TS
     ts_freq = 0  # Frequency of the TS from Times
     
     # Iterate over all domains and assign them a TS
     for domain in self.model.get_hosts(model.types.DOMAIN):
         # Select and load TS (based on the configuration)
         index = domains.index_of(domain.name)
         mapping = domains.domain_profile_mapping[index]
         # load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         ts = connection.load(load)
         
         # Convert TS to a numpy array
         # select TS not time index
         ts_freq = ts.frequency
         ts = wutil.to_array(ts)[1]
         
         # Add noise to the time series
         if NOISE:
             # random = np.random.lognormal(mean=NOISE_MEAN, sigma=NOISE_SIGMA, size=len(ts))
             random = np.random.normal(loc=NOISE_MEAN, scale=NOISE_SIGMA, size=len(ts))
             ts += random
             ts[ts > 100] = 100
             ts[ts < 0] = 0
         
         # Attach TS to domain 
         domain.ts = ts
         
         # Update max length
         self.min_ts_length = min(self.min_ts_length, len(ts))
     
     # Close times connection
     times_client.close()
     
     # Reduce length of time series to 6 hours
     # Calculation: Adjust frequency by (new duration / current TS duration)
     self.freq = (ts_freq * profiles.EXPERIMENT_DURATION) / (self.min_ts_length * ts_freq)
     
     # Calculate ramp up delete time
     self.ramp_up = profiles.RAMP_UP
     self.ramp_down = profiles.RAMP_DOWN
     
     # Schedule message pump
     self.pump.callLater(0, self.run)
예제 #2
0
 def execute(self):
     # Execute super code
     super(FirstFitPlacement, self).execute()
     
     print 'Using First Fit for domain placmement ...'
         
     # Logging
     logger.info('Placement strategy: First Fit')
     logger.info('Required servers: %i' % len(nodes.NODES))
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     service_count = len(domains.domain_profile_mapping)
     
     # For each node there is one bucket
     buckets = []
     buckets_mem = []
     
     migrations = []
     assignment = {}
     for _ in xrange(len(nodes.NODES)):
         buckets.append([0, nodes.NODE_CPU, []])
         buckets_mem.append([0, nodes.NODE_MEM, []])
     
     # Service which gets mapped
     for service_index in xrange(service_count):
         # Maps the service to a service profile
         mapping = domains.domain_profile_mapping[service_index]
         
         # Important: Load the trace of the workload profile
         service = profiles.get_cpu_profile_for_initial_placement(service_index)
         
         print 'loading service: %s' % (service)
         ts = connection.load(service)
         from workload import util
         _, demand = util.to_array(ts)
     
         # Determine max demand value of this service
         max_value = np.percentile(demand, 95)  # np.max(demand)
         
         bin_found = False
         try:
             for node_index in xrange(len(buckets)):
                 bucket = buckets[node_index]
                 bucket_mem = buckets_mem[node_index]
                 if (bucket[0] + max_value) < bucket[1] and (bucket_mem[0] + nodes.DOMAIN_MEM) < bucket_mem[1]:
                     bin_found = True
                     
                     bucket[2].append(service)
                     
                     bucket[0] = bucket[0] + max_value
                     bucket_mem[0] = bucket_mem[0] + nodes.DOMAIN_MEM
                     
                     migrations.append((mapping.domain, node_index))
                     assignment[service_index] = node_index
                     
                     raise StopIteration()
             print 'Error no target!'
         except StopIteration:
             if bin_found == False:
                 print 'WARN: Could not assign domain to a node!'
               
               
     # Close Times connection
     times_client.close()
     
           
     for bucket in buckets:
         print 'bucket length: %i' % len(bucket[2])
               
     print 'Assignment: %s' % assignment
     logger.info('Assignment: %s' % assignment)
     print 'Migrations: %s' % migrations
     logger.info('Migrations: %s' % migrations)
        
     return migrations, self._count_active_servers(assignment)
예제 #3
0
 def execute(self, num_buckets, aggregation, migration_limit):
     from workload import util
             
     # Execute super code
     super(DSAPPlacement, self).execute()
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     domain_count = len(domains.domain_profile_mapping)
     domain_matrix = np.zeros((domain_count, num_buckets), dtype=float)
     
     domain_log = ''
     for domain_index in xrange(domain_count):
         mapping = domains.domain_profile_mapping[domain_index]
         
         # Important: Load the trace of the workload profile
         domain = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         print 'loading domain: %s' % (domain)
         domain_log += domain + '; '
         
         ts = connection.load(domain)
         ts_len = len(ts.elements)
     
         # put TS into domain matrix
         _time, data = util.to_array(ts)
         
         data = data[0:profiles.PROFILE_INTERVAL_COUNT]
         
         # Downsampling TS (domain_matrix)
         self.experiment_length = ts_len * ts.frequency  # length of the experiment measured in seconds
         bucket_width = self.experiment_length / num_buckets  # in sec
         
         # elements = bucket_width / ts.frequency
         elements = ts_len / num_buckets
         buckets = []
         for i in xrange(num_buckets):
             start = i * elements
             end = min(ts_len, (i + 1) * elements) 
             tmp = data[start : end]
             buckets.append(aggregation(tmp))
 
         domain_matrix[domain_index] = buckets
         # print data
 
     # Log services
     logger.info('Selected profile: %s' % profiles.selected_name)
     logger.info('Loading services: %s' % domain_log)
 
     # Dumpservice_matrix
     print 'Logging domain matrix...'
     np.set_printoptions(linewidth=200, threshold=99999999)
     logger.info('Service matrix: %s' % domain_matrix)
 
     # Close Times connection
     times_client.close()
     
     print "Downsampling-Ratio:", ts_len, "elements TO", num_buckets, "buckets (freq=", ts.frequency, ", placement.experiment_length=", self.experiment_length, ", profiles.experiment_duration", profiles.EXPERIMENT_DURATION, ")"
     
     print 'Solving model...'
     logger.info('Placement strategy: DSAP')
     server_list, assignment_list = dsap.solve(self.nodecount, self.node_capacity_cpu, self.node_capacity_mem, domain_matrix, self.domain_demand_mem, migration_limit)
             
     # return values for initial placement only > A(0) <   (#servers + assignment(t=0))
     self.assignment_list = assignment_list
     initial_placement = assignment_list[0]
     
     self.server_list = server_list
     initial_server_count = server_list[0]
     
     # Set initial_placement for getter functions 
     if initial_placement != None:
         print 'Required servers: %i' % (initial_server_count)
         logger.info('Required servers: %i' % initial_server_count)
         print initial_placement
         logger.info('Assignment: %s' % initial_placement)
         
         print 'Assigning domains to servers'
         migrations = []
         for key in initial_placement.keys():
             mapping = domains.domain_profile_mapping[key]
             migration = (mapping.domain, initial_placement[key])
             migrations.append(migration)
         
         print 'Migrations: %s' % migrations
         logger.info('Migrations: %s' % migrations)
         return migrations, self._count_active_servers(initial_placement)
 
     else:
         print 'model infeasible'
         return None, None
예제 #4
0
 def execute(self, aggregation=False, bucketCount=24):
     # Execute super code
     super(SSAPvPlacement, self).execute()
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     service_count = len(domains.domain_profile_mapping)
     
     if aggregation:
         llen = bucketCount
     else: 
         llen = profiles.PROFILE_INTERVAL_COUNT
     service_matrix = np.zeros((service_count, llen), dtype=float)
     
     service_log = ''
     for service_index in xrange(service_count):
         mapping = domains.domain_profile_mapping[service_index]
         service = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
             
         print 'loading service: %s' % (service)
         service_log += service + '; '
         
         ts = connection.load(service)
         ts_len = len(ts.elements)
     
         # put TS into service matrix
         data = np.empty((ts_len), dtype=float)
         for i in xrange(ts_len):
             data[i] = ts.elements[i].value
             
         
         data = data[0:profiles.PROFILE_INTERVAL_COUNT]
 
         # Downsample TS
         if aggregation:
             elements = ts_len / bucketCount
             bucket_data = []
             for i in xrange(bucketCount):
                 start = i * elements
                 end = min(ts_len, (i + 1) * elements)
                 tmp = data[start : end]
                 bucket_data.append(np.max(tmp))
             service_matrix[service_index] = bucket_data
         else:
             service_matrix[service_index] = data
 
     # Log services
     logger.info('Selected profile: %s' % profiles.selected_name)
     logger.info('Loading services: %s' % service_log)
 
     # Dumpservice_matrix
     print 'Logging service matrix...'
     np.set_printoptions(linewidth=200, threshold=99999999)
     logger.info('Service matrix: %s' % service_matrix)
 
     # Close Times connection
     times_client.close()
     
     print 'Solving model...'
     logger.info('Placement strategy: SSAPv')
     server, assignment = ssapv.solve(self.nodecount, self.node_capacity_cpu, self.node_capacity_mem, service_matrix, self.domain_demand_mem)
     
     # Set assignment for getter functions 
     self.assignment = assignment
     
     if assignment != None:
         print 'Required servers: %i' % (server)
         logger.info('Required servers: %i' % server)
         print assignment
         logger.info('Assignment: %s' % assignment)
         
         print 'Assigning domains to servers'
         migrations = []
         for key in assignment.keys():
             mapping = domains.domain_profile_mapping[key]
             migration = (mapping.domain, assignment[key])
             migrations.append(migration)
         
         
         print 'Migrations: %s' % migrations
         logger.info('Migrations: %s' % migrations)
         return migrations, self._count_active_servers(assignment)
 
     else:
         print 'model infeasible'
         return None, None