Beispiel #1
0
def process_sonar_trace(name, trace_ts, timestamps, save=False):
    # Number of readings to aggregate to downsample the
    # trace to width PROFILE_INTERVAL_COUNT
    readings_to_aggregate = len(trace_ts) / PROFILE_INTERVAL_COUNT
    
    # Create a new array to hold the profile
    profile = np.zeros(PROFILE_INTERVAL_COUNT, dtype=float)
    
    # Downsample the trace to the profile array
    for i in xrange(PROFILE_INTERVAL_COUNT):
        start = readings_to_aggregate * i
        end = min(readings_to_aggregate * (i + 1), len(trace_ts))
        
        # ATTENTION: Using mean here
        profile[i] = np.mean(trace_ts[start:end])
        
    # Calculate interval
    interval = EXPERIMENT_DURATION / PROFILE_INTERVAL_COUNT
        
    # Save the profile
    if save:
        connection = times_client.connect()
        # Special case - no prefix is added as this TS is treated as a RAW TS
        __write_profile(connection, __times_name(False, name, POSTFIX_TRACE), profile, interval, noprefix=True)
        times_client.close()
Beispiel #2
0
def __plot_complete_mix():
    '''
    Plots all TS of a mix in a single image using multiple axis
    '''
    
    # Connect with times
    connection = times_client.connect()
    
    plot_mix = pdata.mix_2
    cols = 5
    rows = len(plot_mix) / cols + 1 
    index = 0
    print rows
    
    fig = plt.figure()
    fig.set_figheight(20)
    fig.set_figwidth(40)
    
    for desc in plot_mix:  
        name = desc.name
        timeSeries = connection.load(__times_name(True, name, POSTFIX_USER))
        _, demand = util.to_array(timeSeries)
        
        index += 1
        ax = fig.add_subplot(rows, cols, index)
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax.plot(range(0, len(demand)), demand)

    plt.savefig(configuration.path('mix_overlay', 'png'))
    
    # Close times connection
    times_client.close()
Beispiel #3
0
def __dump_to_csv():
    connection = times_client.connect()
    
    demands = []
    for desc in selected:
        timeSeries = connection.load(__times_name(True, desc.name, POSTFIX_NORM))
        _, demand = util.to_array(timeSeries)
        demands.append((desc.name, demand))
        
    import csv
    with open(configuration.path('traces', 'csv'), 'wb') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter='\t')
        
        row = []
        for demand in demands:
            row.append(demand[0])
        spamwriter.writerow(row)
        
        l = len(demands[0][1])
        for i in xrange(0, l):
            row = []
            for demand in demands:
                if i < len(demand[1]): 
                    row.append(demand[1][i])
                else:
                    print 'warn'
                    row.append(0)
                    
            spamwriter.writerow(row)
        
    times_client.close()
Beispiel #4
0
def __build_profiles(mix, save):
    '''
    Build the profiles for all TS in mix. 
    
    Keyword arguments:
    mix -- set of TS to generate a profile
    save -- save profiles to Times 
    '''
    
    # Times connection
    connection = times_client.connect()

    # Calculate profiles
    for desc in mix:
        print 'processing convolution: %s' % (desc.name)
        import convolution
        profile = convolution.process_trace(connection, __times_name(False, desc.name),
                                            desc.sample_frequency, CYCLE_TIME)
        
        # Add profile to mix
        desc.profile = profile
        
    # Get maximum for each set in mix
    set_max = __get_and_apply_set_max(mix)

    # Store profiles
    for desc in mix:
        profile, frequency = desc.profile
        __store_profile(connection, desc, set_max, profile, frequency, save)
        
    # Close Times connection
    times_client.close()
Beispiel #5
0
def __build_modified_profiles(mix, save):
    connection = times_client.connect()
    
    for mi_element in mix:
        # Operates on pre-processed data. Hence, a prefix is required
        ts_name = __times_name(True, mi_element.name, POSTFIX_NORM)

        # Modify CPU normal profile     
        modified_profile, interval = modifier.process_trace(connection, ts_name,
                                                            mi_element.modifier, mi_element.additive,
                                                            mi_element.scale, mi_element.shift)
        if save:
            name = __times_name(True, mi_element.name, POSTFIX_NORM, POSTFIX_MODIFIED)
            __write_profile(connection, name, modified_profile, interval)
            
        # Store USER profiles (-> feed into Rain)
        # Adapt frequency for the benchmark duration
        # Add padding for ramp up and ramp down
        modified_profile /= 100.0
        modified_profile *= MAX_USERS
        interval = interval / (CYCLE_TIME / EXPERIMENT_DURATION)
        user_profile = np.array(modified_profile)
        user_profile = __padprofile(user_profile, interval)
        if save:
            name = __times_name(True, mi_element.name, POSTFIX_NORM, POSTFIX_MODIFIED)
            __write_profile(connection, name, user_profile, interval)

    times_client.close()
Beispiel #6
0
def process_trace(name):
    print 'Downloading...'
    connection = times_client.connect()
    timeSeries = connection.load(name)
    times_client.close()
    print 'complete'
    
    # 24 hours
    periodicity = 24.0 * 3600.0 # day
    frequency = 3600 # hour
    smoothening = 30
    return process_file(name, timeSeries.elements, periodicity, frequency, smoothening)
Beispiel #7
0
def __dump_user_profile_maxes():
    '''
    Used to verify that all user profiles have user values below MAX_USERS
    '''
    # Connect with times
    connection = times_client.connect()
    
    for name in connection.find('.*%s$' % (POSTFIX_USER)):
        result = util.to_array(connection.load(name))[1]
        if np.max(result) > MAX_USERS: 
            print '%s - %i' % (name, np.max(result))
    
    times_client.close()
Beispiel #8
0
 def start(self):
     print 'Connecting with Times'
     connection = times_client.connect()
     
     self.min_ts_length = sys.maxint  # Minimum length across all TS
     ts_freq = 0  # Frequency of the TS from Times
     
     # Iterate over all domains and assign them a TS
     for domain in self.model.get_hosts(model.types.DOMAIN):
         # Select and load TS (based on the configuration)
         index = domains.index_of(domain.name)
         mapping = domains.domain_profile_mapping[index]
         # load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         load = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         ts = connection.load(load)
         
         # Convert TS to a numpy array
         # select TS not time index
         ts_freq = ts.frequency
         ts = wutil.to_array(ts)[1]
         
         # Add noise to the time series
         if NOISE:
             # random = np.random.lognormal(mean=NOISE_MEAN, sigma=NOISE_SIGMA, size=len(ts))
             random = np.random.normal(loc=NOISE_MEAN, scale=NOISE_SIGMA, size=len(ts))
             ts += random
             ts[ts > 100] = 100
             ts[ts < 0] = 0
         
         # Attach TS to domain 
         domain.ts = ts
         
         # Update max length
         self.min_ts_length = min(self.min_ts_length, len(ts))
     
     # Close times connection
     times_client.close()
     
     # Reduce length of time series to 6 hours
     # Calculation: Adjust frequency by (new duration / current TS duration)
     self.freq = (ts_freq * profiles.EXPERIMENT_DURATION) / (self.min_ts_length * ts_freq)
     
     # Calculate ramp up delete time
     self.ramp_up = profiles.RAMP_UP
     self.ramp_down = profiles.RAMP_DOWN
     
     # Schedule message pump
     self.pump.callLater(0, self.run)
Beispiel #9
0
    def __call__(self, parser, namespace, values, option_string=None):
        try:
            query = values
            port = times_client.port_MKII
            if namespace.version == 1:    
                port = times_client.port_MKI
            else:
                port = times_client.port_MKII

            connection = times_client.connect(port)
            results = connection.find(query)
            print 'name'
            for result in results:
                print '%s' % result
        finally:
            times_client.close()
Beispiel #10
0
def __plot_overlay_mix():
    '''
    Plots all TS of a mix in a single axis graph
    '''
    # Connect with times
    connection = times_client.connect()
    
    # Plot selected
    selected_mix0 = ['O2_retail_ADDORDER', 'SIS_163_cpu', 'SIS_393_cpu']
    selected_mix1 = ['SIS_222_cpu', 'SIS_213_cpu', 'SIS_387_cpu']
    plot_mix = selected_mix1
    
    # Plot all from a set
#    plot_mix = []
#    for i in xrange(a, a + 100):
#        print selected[i].name
#        plot_mix.append(selected[i].name)
    
    fig = plt.figure()
    
    ax = fig.add_subplot(111)
    ax.set_xlim([0, 300])
    
    for name in plot_mix:
        timeSeries = connection.load(__times_name(True, name, POSTFIX_USER))
        print timeSeries
        _, demand = util.to_array(timeSeries)
        demand = demand[7:289 + 7]
        ax.plot(range(0, len(demand)), demand, linewidth=0.7)

    
    xt = [(t * 60 / 5) for t in xrange(0, 25)]
    xl = [t for t in xrange(0, 25)]
    
    ax.set_xticks(xt)
    ax.set_xticklabels(xl)
    
    ax.set_xlabel('Time in hours')
    ax.set_ylabel('Load in number of users')
    
#    plt.show()
    plt.savefig(configuration.path('overlay', 'png'))
    plt.savefig(configuration.path('mix1', 'pdf'))
    
    # Close times connection
    times_client.close()
Beispiel #11
0
def __build_sample_day(mix, save):
    connection = times_client.connect()

    # Calculate profiles
    for desc in mix:
        print 'processing sample day %s' % (desc.name)
        import sampleday
        profile = sampleday.process_trace(connection, __times_name(False, desc.name),
                                          desc.sample_frequency, CYCLE_TIME, desc.profile_set.day)
        desc.profile = profile
        
    # Max value in each set of TS
    set_max = __get_and_apply_set_max(mix)

    # Store profiles
    for desc in mix:
        profile, frequency = desc.profile
        __store_profile(connection, desc, set_max, profile, frequency, save)
        
    times_client.close()
Beispiel #12
0
def allocate_domains(name):
    print 'Downloading...'
    connection = times_client.connect()
    
    timeSeries = connection.load(name)
    print 'done'
    
    times_client.close()
    print 'complete'
    
    # Generate (x,y) signal
    signal = []
    for element in timeSeries.elements:
        signal.append(element.value)
    
    # Buckets (DSAP paper)
    blen = len(signal) / 48
    bsignal = []
    for b in xrange(48):
        values = signal[b * blen:(b+1)*blen]
        b = np.percentile(values, 99)
        bsignal.append(b)
        
    signal = []
    signal.append(bsignal[0])
    signal.extend(bsignal)
    signal.append(bsignal[-1])
    
    # Setup the new plot
    fig = plt.figure()
    fig.set_size_inches((10, 10))
    plt.xlabel('Time')
    plt.ylabel('Load')
    
    # Create a new subplot
    ax = fig.add_subplot(1, 1, 1)
    ax.set_title('TimeSeries %s' % (name));
    ax.plot(range(0, len(signal)), signal)   

    # Display the plot        
    plt.show()
def plot(profile=None):
    # Setup the new plot
    fig = plt.figure()
     
    # Create a new subplot
    ax = fig.add_subplot(1, 1, 1)
    if profile is None:
        # Connect with Times
        connection = times_client.connect()
        # Download TS
        timeSeries = connection.load('PUSER_MIX0_O2_business_ADDORDER')
        
        # Disconnect from Times      
        times_client.close()
        # Generate (x,y) signal
        signal = []
        for element in timeSeries.elements:
            signal.append(element.value)
        ax.plot(range(0, len(signal)), signal)  
    else:
        ax.plot(range(0, len(profile)), profile)
    # Display the plot        
    plt.show()
Beispiel #14
0
 def __call__(self, parser, namespace, values, option_string=None):
     try:
         name = values
         port = times_client.port_MKII    
         if namespace.version == 1:
             if namespace.version == 1:
                 port = times_client.port_MKI
             else:
                 port = times_client.port_MKII
         
         connection = times_client.connect(port)
         result = connection.find(name)
         if len(result) == 0:
             print '0,0'
             sys.exit(1)
         
         ts = connection.load(name)
         print 'timestamp,value'
         for element in ts.elements:
             print '%i,%i' % (element.timestamp, element.value)
         
     finally:
         times_client.close()
Beispiel #15
0
def main():
    from service import times_client
    from workload import util
    
    con = times_client.connect()
    data = con.load('SIS_222_cpu_profile_trace')
    _ , demand = util.to_array(data)
    times_client.close()
    
    random = np.random.lognormal(mean=0.0, sigma=1.5, size=len(demand))
    demand += random

    # Run smoother
    _, s0, _ = single_exponential_smoother(demand)
    _, s1, _ = double_exponential_smoother(demand)
    
    # Plot original data with forecasted
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.plot(demand)
    ax.plot(s0)
    ax.plot(s1)
    plt.show()
    # Store profiles
    for handle in handles:
        print 'storing convolution: %s ...' % (handle.name)
        __store_profile_yarns(mix, handle, normalizing_value, handle.profile, handle.profile_frequency)
        

def __build_profiles(mix):
    sampleday = []
    convolution = []
    for handle in mix.handles: 
        if handle.htype.day != None:
            sampleday.append(handle)
        else:
            convolution.append(handle)
            
    print 'Build sample days %i' % (len(sampleday))
    __build_sampleday(mix, sampleday)
    
    print 'Build profiles %i' % (len(convolution))
    __build_convolutions(mix, convolution)

    if mix.modified: 
        print 'Building modified %i' % (len(mix.handles))
        __build_modified_profiles(mix) 
    
if __name__ == '__main__':
    connection = tc.connect()
    __build_profiles(conf_load.TIMES_SELECTED_MIX)
    tc.close()
Beispiel #17
0
def main():
    # Solve allocation problem
    nodecount = len(nodes.NODES)
    model = placement.SSAPvPlacement(nodecount, nodes.NODE_CPU, nodes.NODE_MEM, nodes.DOMAIN_MEM)
    model.execute()
    
    assignment = model.assignment
    if assignment != None:
        
        node_assignment = {}
        for domain in assignment.keys():
            node = assignment[domain]
            if not node_assignment.has_key(node):
                node_assignment[node] = []
            node_assignment[node].append(domain)
            
        print node_assignment 
                
        # Load time series used by the drivers
        # Connect with Times
        print 'Connecting with Times'
        tsdata = []
        connection = times_client.connect()
        
        # Loading services to combine the dmain_service_mapping with
        service_count = len(domains.domain_profile_mapping)
        import sys
        ts_length = sys.maxint
        for i_service in xrange(service_count):
            name = profiles.get_cpu_current_profile(i_service)
            tsd = connection.load(name)
            tsd = wutil.to_array(tsd)[1]
            tsdata.append(tsd)
            ts_length = min(ts_length, len(tsd)) 
            
        times_client.close()
        
        print node_assignment
        
        # Run simulation and report overload situations
        acc_load = [[] for _ in xrange(len(nodes.NODES))]
        for t in xrange(ts_length):
            print '-- t -- %i' % t
            for node in node_assignment.keys():
                sum_load = 0
                for domain in node_assignment[node]: 
                    sum_load += tsdata[domain][t]
                    
                print node
                acc_load[node].append(sum_load)

        # Plot accumulated loads
        fig = plt.figure()
        fig.set
        ax = fig.add_subplot(111)
        ax.set_title("Workload %s Overload %s" % (profiles.selected_name, profiles.modified))
        ax.axis([0.0, ts_length, 0, 500])
        for load in acc_load:
            ax.plot(range(0, len(load)), load)
    
        plt.savefig('C:/temp/convolution/tesoverload_%s_%s.png' % (profiles.selected_name, profiles.modified))
        
             
    else:
        print 'Could not check overload - no feasible assignment found'
Beispiel #18
0
 def execute(self):
     # Execute super code
     super(FirstFitPlacement, self).execute()
     
     print 'Using First Fit for domain placmement ...'
         
     # Logging
     logger.info('Placement strategy: First Fit')
     logger.info('Required servers: %i' % len(nodes.NODES))
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     service_count = len(domains.domain_profile_mapping)
     
     # For each node there is one bucket
     buckets = []
     buckets_mem = []
     
     migrations = []
     assignment = {}
     for _ in xrange(len(nodes.NODES)):
         buckets.append([0, nodes.NODE_CPU, []])
         buckets_mem.append([0, nodes.NODE_MEM, []])
     
     # Service which gets mapped
     for service_index in xrange(service_count):
         # Maps the service to a service profile
         mapping = domains.domain_profile_mapping[service_index]
         
         # Important: Load the trace of the workload profile
         service = profiles.get_cpu_profile_for_initial_placement(service_index)
         
         print 'loading service: %s' % (service)
         ts = connection.load(service)
         from workload import util
         _, demand = util.to_array(ts)
     
         # Determine max demand value of this service
         max_value = np.percentile(demand, 95)  # np.max(demand)
         
         bin_found = False
         try:
             for node_index in xrange(len(buckets)):
                 bucket = buckets[node_index]
                 bucket_mem = buckets_mem[node_index]
                 if (bucket[0] + max_value) < bucket[1] and (bucket_mem[0] + nodes.DOMAIN_MEM) < bucket_mem[1]:
                     bin_found = True
                     
                     bucket[2].append(service)
                     
                     bucket[0] = bucket[0] + max_value
                     bucket_mem[0] = bucket_mem[0] + nodes.DOMAIN_MEM
                     
                     migrations.append((mapping.domain, node_index))
                     assignment[service_index] = node_index
                     
                     raise StopIteration()
             print 'Error no target!'
         except StopIteration:
             if bin_found == False:
                 print 'WARN: Could not assign domain to a node!'
               
               
     # Close Times connection
     times_client.close()
     
           
     for bucket in buckets:
         print 'bucket length: %i' % len(bucket[2])
               
     print 'Assignment: %s' % assignment
     logger.info('Assignment: %s' % assignment)
     print 'Migrations: %s' % migrations
     logger.info('Migrations: %s' % migrations)
        
     return migrations, self._count_active_servers(assignment)
Beispiel #19
0
 def execute(self, num_buckets, aggregation, migration_limit):
     from workload import util
             
     # Execute super code
     super(DSAPPlacement, self).execute()
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     domain_count = len(domains.domain_profile_mapping)
     domain_matrix = np.zeros((domain_count, num_buckets), dtype=float)
     
     domain_log = ''
     for domain_index in xrange(domain_count):
         mapping = domains.domain_profile_mapping[domain_index]
         
         # Important: Load the trace of the workload profile
         domain = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
         
         print 'loading domain: %s' % (domain)
         domain_log += domain + '; '
         
         ts = connection.load(domain)
         ts_len = len(ts.elements)
     
         # put TS into domain matrix
         _time, data = util.to_array(ts)
         
         data = data[0:profiles.PROFILE_INTERVAL_COUNT]
         
         # Downsampling TS (domain_matrix)
         self.experiment_length = ts_len * ts.frequency  # length of the experiment measured in seconds
         bucket_width = self.experiment_length / num_buckets  # in sec
         
         # elements = bucket_width / ts.frequency
         elements = ts_len / num_buckets
         buckets = []
         for i in xrange(num_buckets):
             start = i * elements
             end = min(ts_len, (i + 1) * elements) 
             tmp = data[start : end]
             buckets.append(aggregation(tmp))
 
         domain_matrix[domain_index] = buckets
         # print data
 
     # Log services
     logger.info('Selected profile: %s' % profiles.selected_name)
     logger.info('Loading services: %s' % domain_log)
 
     # Dumpservice_matrix
     print 'Logging domain matrix...'
     np.set_printoptions(linewidth=200, threshold=99999999)
     logger.info('Service matrix: %s' % domain_matrix)
 
     # Close Times connection
     times_client.close()
     
     print "Downsampling-Ratio:", ts_len, "elements TO", num_buckets, "buckets (freq=", ts.frequency, ", placement.experiment_length=", self.experiment_length, ", profiles.experiment_duration", profiles.EXPERIMENT_DURATION, ")"
     
     print 'Solving model...'
     logger.info('Placement strategy: DSAP')
     server_list, assignment_list = dsap.solve(self.nodecount, self.node_capacity_cpu, self.node_capacity_mem, domain_matrix, self.domain_demand_mem, migration_limit)
             
     # return values for initial placement only > A(0) <   (#servers + assignment(t=0))
     self.assignment_list = assignment_list
     initial_placement = assignment_list[0]
     
     self.server_list = server_list
     initial_server_count = server_list[0]
     
     # Set initial_placement for getter functions 
     if initial_placement != None:
         print 'Required servers: %i' % (initial_server_count)
         logger.info('Required servers: %i' % initial_server_count)
         print initial_placement
         logger.info('Assignment: %s' % initial_placement)
         
         print 'Assigning domains to servers'
         migrations = []
         for key in initial_placement.keys():
             mapping = domains.domain_profile_mapping[key]
             migration = (mapping.domain, initial_placement[key])
             migrations.append(migration)
         
         print 'Migrations: %s' % migrations
         logger.info('Migrations: %s' % migrations)
         return migrations, self._count_active_servers(initial_placement)
 
     else:
         print 'model infeasible'
         return None, None
Beispiel #20
0
 def execute(self, aggregation=False, bucketCount=24):
     # Execute super code
     super(SSAPvPlacement, self).execute()
     
     # Connect with Times
     print 'Connecting with Times'
     connection = times_client.connect()
     
     # Loading services to combine the dmain_service_mapping with    
     service_count = len(domains.domain_profile_mapping)
     
     if aggregation:
         llen = bucketCount
     else: 
         llen = profiles.PROFILE_INTERVAL_COUNT
     service_matrix = np.zeros((service_count, llen), dtype=float)
     
     service_log = ''
     for service_index in xrange(service_count):
         mapping = domains.domain_profile_mapping[service_index]
         service = profiles.get_cpu_profile_for_initial_placement(mapping.profileId)
             
         print 'loading service: %s' % (service)
         service_log += service + '; '
         
         ts = connection.load(service)
         ts_len = len(ts.elements)
     
         # put TS into service matrix
         data = np.empty((ts_len), dtype=float)
         for i in xrange(ts_len):
             data[i] = ts.elements[i].value
             
         
         data = data[0:profiles.PROFILE_INTERVAL_COUNT]
 
         # Downsample TS
         if aggregation:
             elements = ts_len / bucketCount
             bucket_data = []
             for i in xrange(bucketCount):
                 start = i * elements
                 end = min(ts_len, (i + 1) * elements)
                 tmp = data[start : end]
                 bucket_data.append(np.max(tmp))
             service_matrix[service_index] = bucket_data
         else:
             service_matrix[service_index] = data
 
     # Log services
     logger.info('Selected profile: %s' % profiles.selected_name)
     logger.info('Loading services: %s' % service_log)
 
     # Dumpservice_matrix
     print 'Logging service matrix...'
     np.set_printoptions(linewidth=200, threshold=99999999)
     logger.info('Service matrix: %s' % service_matrix)
 
     # Close Times connection
     times_client.close()
     
     print 'Solving model...'
     logger.info('Placement strategy: SSAPv')
     server, assignment = ssapv.solve(self.nodecount, self.node_capacity_cpu, self.node_capacity_mem, service_matrix, self.domain_demand_mem)
     
     # Set assignment for getter functions 
     self.assignment = assignment
     
     if assignment != None:
         print 'Required servers: %i' % (server)
         logger.info('Required servers: %i' % server)
         print assignment
         logger.info('Assignment: %s' % assignment)
         
         print 'Assigning domains to servers'
         migrations = []
         for key in assignment.keys():
             mapping = domains.domain_profile_mapping[key]
             migration = (mapping.domain, assignment[key])
             migrations.append(migration)
         
         
         print 'Migrations: %s' % migrations
         logger.info('Migrations: %s' % migrations)
         return migrations, self._count_active_servers(assignment)
 
     else:
         print 'model infeasible'
         return None, None
    for name in ll: 
        timeSeries = connection.load(name)
        time, demand = util.to_array(timeSeries)

        period, acf, ped = __period_calc(timeSeries.frequency, demand, time)
        
        # Calculate profile
        freq, profile = __extract_profile(time, demand, timeSeries.frequency, period, hour(1), lambda x: np.mean(x))
        freq, profile_upper = __extract_profile(time, demand, timeSeries.frequency, period, hour(1), lambda x: np.percentile(x, 95)) 
        freq, profile_lower = __extract_profile(time, demand, timeSeries.frequency, period, hour(1), lambda x: np.percentile(x, 15))

        # Plotting
        fig = plt.figure()
        folder = 'C:/temp/'
        ax = fig.add_subplot(111)
        ax.fill_between(xrange(len(profile)), profile_lower, profile_upper, interpolate=True, facecolor='lightgray', lw=0)
        ax.plot(range(len(profile)), profile)
        plt.savefig('%s/%s_profile.png' % (folder, name), dpi=30)
        plt.close()
        
        plt.plot(xrange(len(acf)), acf)
        plt.savefig('%s/%s_acf.png' % (folder, name), dpi=30)
        plt.close()
        
        plt.plot(xrange(len(demand)), demand)
        plt.savefig('%s/%s_times.png' % (folder, name), dpi=30)
        plt.close()
        
    # Close times
    times_client.close()    
def main():
    tc.connect()
    createBenchmarkProfile('PUSER_BENCHMARK_SMALL', 100, sec(3), hour(6))
    createBenchmarkProfile('PUSER_BENCHMARK_MEDIUM', 150, sec(3), hour(6))
    createBenchmarkProfile('PUSER_BENCHMARK_LARGE', 200, sec(3), hour(6))
    tc.close()