def __dom_load(self, dom_time, domain):
        # Setup
        if dom_time < conf_load.SETUP:
            if dom_time < tutil.minu(1):
                return 100
            elif dom_time < tutil.minu(1.3):
                return 10
            elif dom_time < tutil.minu(2):
                return 50
            elif dom_time < tutil.minu(2.5):
                return 0
            elif dom_time < tutil.minu(3.5):
                return 75
            return 0

        # Correct time for setup
        # dom_time -= conf_load.SETUP

        # Ramp up
        if dom_time < conf_load.RAMP_UP_DOWN:
            load = self.__get_load(0, domain)
            return (float(load) / float(conf_load.RAMP_UP_DOWN)) * float(dom_time)

        # Correct time for ramp up
        dom_time -= conf_load.RAMP_UP_DOWN

        # Default load
        tindex = self.__calc_index(dom_time, domain)
        if tindex is None:
            return None
        load = self.__get_load(tindex, domain)
        return load
 def get_parallelity(self, entry=None):
     # Heap that holds start and stop events            
     active_heap = []
     
     if entry is not None: 
         # Add new entry to the heap with some safety buffer
         heapq.heappush(active_heap, (entry.offset - minu(1), True))
         heapq.heappush(active_heap, (entry.offset + entry.duration + entry.rampDown + entry.rampUp + minu(1), False))
     
     # Add existing entries
     for entry in self.entries:
         heapq.heappush(active_heap, (entry.offset - minu(1), True))
         heapq.heappush(active_heap, (entry.offset + entry.duration + entry.rampUp + entry.rampDown + minu(1), False))
     
     # Simulate active server count by a stack
     stack = 0
     max_stack = 0
     while active_heap:
         ad = heapq.heappop(active_heap)
         if ad[1]: stack += 1
         else: stack -= 1
         max_stack = max(stack, max_stack)
         
     # Return max stack depth
     return max_stack
 def __inter_arrival(self, entry_index):
     # First entry always starts at time 0 
     if entry_index == 0:
         return 0
     
     # Use random number generator with CDF from Peng et al.
     delta = self.inter_arrival.get_random()
     return minu(delta)
 def build(self, schedule_id):
     # Create a new schedule
     schedule = Schedule(schedule_id, 500, None, 0)
     
     for workload_index in xrange(self.launches): 
         # Try adding entry to schedule
         start_time = workload_index * minu(1)
         workload_offset = 0
         ramp_up = (self.launches - workload_index) * minu(1) + SCH_RAMP_UP
         
         duration = EXPERIMENT_DURATION
         success = schedule.add(start_time, conf_domainsize.DEFAULT, duration,
                                workload_index, workload_offset,
                                conf_domains.initial_domains[workload_index].name,
                                ramp_up)
         if not success:
             print 'ERROR: Could not add static schedule entry'
 
     # Return schedule
     return schedule
 def __lifetime(self):
     # Use uniform distribution to determine CDF to use
     rand = random.random()
     
     # Select CDF by rand
     sum_prob = 0
     for prob, cdf in self.random_lifetime:
         sum_prob += prob
         if rand <= sum_prob:
             delta = cdf.get_random()
             delta = minu(delta)
             return delta
 def validate_production(self):
     # Heap that holds start and stop events            
     active_heap = []
     
     # Add existing entries (consider some safety buffer too)
     for entry in self.entries:
         heapq.heappush(active_heap, (entry.offset - minu(1), True, entry))
         heapq.heappush(active_heap, (entry.offset - + entry.duration + entry.rampUp + entry.rampDown + minu(1), False, entry))
     
     # Simulate active server count by a stack
     stack = []
     max_stack = 0
     while active_heap:
         ad = heapq.heappop(active_heap)
         if ad[1]: stack.append(ad[2])
         else: del stack[stack.index(ad[2])]
         max_stack = max(len(stack), max_stack)
         
         
         
     # Return max stack depth
     return max_stack
    def build(self, schedule_id):
        # Available domain launch counter
        self.available_launches = [conf_domains.AVAILABLE_DOMAINS for _ in xrange(len(conf_domainsize.size_set[self.conf_domain_size]))]
        
        # Create a new schedule
        schedule = Schedule(schedule_id, self.max_parallelity, None, self.conf_domain_size)
        
        # Count number of added domains
        count_domains_added = 0
        
        # Hold offset of last added domain (inter-arrival time calculation)
        last_domain_offset = 0
                
        # Create domains until required number of domains is in schedule
        while count_domains_added < self.launches:
            # Next domain size
            domain_size = self.__domain_size()
            
            # Are there endomains of this size available
            if self.available_launches[domain_size] <= 0:
                print 'WARN: insufficient domains for size %i' % domain_size 
                continue
            
            # Determine duration and offset for this domain 
            lifetime = self.__lifetime()
            lifetime /= self.scale_duration
            
            if lifetime > self.max_duration:
                continue
            
            if self.lifetime_min and lifetime < (SCH_RAMP_DOWN + SCH_RAMP_UP + minu(2)):
                continue
            else:
                self.lifetime_min = max(lifetime, SCH_RAMP_DOWN + SCH_RAMP_UP + minu(2))
            
            offset = self.__inter_arrival(count_domains_added)
            offset /= self.scale_arrival

            # Determine profile index and profile offset 
            workload_profile_index = self.__workoad(domain_size, lifetime)
            workload_profile_offset = 0

            # Total duration
            duration = lifetime - SCH_RAMP_DOWN - SCH_RAMP_UP
            duration = max(duration, 0)

            # Try adding entry to schedule
            success = schedule.add(last_domain_offset + offset, domain_size, duration,
                                   workload_profile_index, workload_profile_offset)
            
            # If adding entry was successful
            if success:
                # Update internal counters
                count_domains_added += 1
                last_domain_offset += offset

        # Check duration
        
        time_hours = (schedule.get_end_time())
        if time_hours > self.max_duration:
            sys.stdout.write('.')
            return None
    
        # Check parallelity
        parallelity = schedule.get_parallelity()
        if parallelity > self.max_parallelity:
            sys.stdout.write('.')
            return None
    
        # Return schedule
        return schedule
import bisect
import cdf_random
import clparams
import conf_domains
import conf_domainsize
import conf_load
import configuration
import heapq
import itertools
import json
import numpy as np
import random
import sys

# Constants
SCH_RAMP_UP = minu(10)  # Ramp up duration of the experiment
SCH_RAMP_DOWN = minu(10)  # Ramp down duration of the experiment
ID_START_STATIC = 10000
ID_START_DYNAMIC_PRODUCTION = 20000
ID_START_DYNAMIC_SENSITIVITY = 30000
EXPERIMENT_DURATION = hour(6)  # 6 hours steady-state duration of a static experiment

'''
Difference between Lifetime and Duration:
 
- Lifetime = How long is a VM running it includes ramp-up, ramp-down, and duration
- Lifetime = ramp-up + duration + ramp-down
- Duration = length of the steady-state load generation
'''

'''
import clparams
import sys
from workload import wtimes_meta
from workload import timeutil

LOAD_SOURCE = 'times' # options: sonar, times, times_MKI
SCRAMBLER = 0 # 0 = default 1:1 scrambling
TIMES_SELECTED_MIX = wtimes_meta.mixmkII # selecht workload in times
RAMP_UP_DOWN = timeutil.minu(10)
SETUP = timeutil.minu(10)

# Override settings by command line
clparams.load_parameters(sys.modules[__name__])
from balancer.model import types
from ipmodels import cbc_dsapp as dsapp
from logs import sonarlog
from migration_queue import MigrationQueue
from workload.timeutil import minu
import conf_nodes
import conf_domains
import json
import numpy as np
import strategy

# Fixed values
START_WAIT = 0  # Data aggregation phase (ALWAYS 0 FOR THIS CONTROLLER)
INTERVAL = minu(60)  # Control frequency
NUM_CPU_READINGS = 120
MIG_OVERHEAD_SOURCE = 0.00
MIG_OVERHEAD_TARGET = 0.00

# Setup logging
logger = sonarlog.getLogger('controller')

class Strategy(strategy.StrategyBase):
    
    def __init__(self, scoreboard, pump, model):
        super(Strategy, self).__init__(scoreboard, pump, model, INTERVAL, START_WAIT)
        
        # Setup migration queue
        simple = True
        self.migration_queue = MigrationQueue(self, simple, not simple, True)
       
    def start(self):