Esempio n. 1
0
def __hist_migration_cpu_effect():
    indexmap, table = __read_table('migration-data') 
            
    deltas_source = []
    deltas_target = []
    for row in table:
        source_before = float(row[indexmap['source-before']])
        source_during = float(row[indexmap['source-during']])
        deltas_source.append(source_during - source_before)
        
        target_before = float(row[indexmap['target-before']])
        target_during = float(row[indexmap['target-during']])
        deltas_target.append(target_during - target_before)
            
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hist(deltas_source, bins=10, color='white')
    ax.set_ylabel('Frequency')
    ax.set_xlabel('CPU change before/during migration')
    fig.savefig(configuration.path('migration_source','pdf'))
    fig.show()
        
    fig = plt.figure()
    ax = fig.add_subplot(111)
    ax.hist(deltas_target, bins=10, color='white')
    ax.set_ylabel('Frequency')
    ax.set_xlabel('CPU change before/during migration')
    fig.savefig(configuration.path('migration_target','pdf'))
    fig.show()
 def write(self):
     # Build result table 
     names = 'STRATEGY_PLACEMENT \t STRATEGY_REALLOCATION \t max. servers \t avg. srv. count'
     res_reservation = 'LowerBound \t None \t %f \t %f' % (self.lb_max_srv, self.lb_avg_srv)
     res_demand = 'LowerBoundDemand \t None \t %f \t %f' % (self.lb_demand_max_srv, self.lb_demand_avg_srv)
     
     # Build header and values for this experiment
     conf_head, conf_values = clparams.build_result_log_title()
     names = '%s \t %s' % ('\t'.join(conf_head), names)
     res_reservation = '%s \t %s' % ('\t'.join(conf_values), res_reservation)
     res_demand = '%s \t %s' % ('\t'.join(conf_values), res_demand)
     
     # Append results to file
     filename = configuration.path(clparams.CL_RESULT_FILE, 'csv')
     with FileLock(filename):
         # Add header only if file is new
         try:
             # Check if file already exists
             with open(filename):
                 pass
         except IOError:
             # Create a new file and append header row
             f = open(filename, 'w')
             f.write(names)
             f.write('\n')
             f.close()
         
         # Append row information 
         with open(filename, 'a') as f:
             f.write(res_reservation)
             f.write('\n')
             f.write(res_demand)
             f.write('\n')
Esempio n. 3
0
def __dump_to_csv():
    connection = times_client.connect()
    
    demands = []
    for desc in selected:
        timeSeries = connection.load(__times_name(True, desc.name, POSTFIX_NORM))
        _, demand = util.to_array(timeSeries)
        demands.append((desc.name, demand))
        
    import csv
    with open(configuration.path('traces', 'csv'), 'wb') as csvfile:
        spamwriter = csv.writer(csvfile, delimiter='\t')
        
        row = []
        for demand in demands:
            row.append(demand[0])
        spamwriter.writerow(row)
        
        l = len(demands[0][1])
        for i in xrange(0, l):
            row = []
            for demand in demands:
                if i < len(demand[1]): 
                    row.append(demand[1][i])
                else:
                    print 'warn'
                    row.append(0)
                    
            spamwriter.writerow(row)
        
    times_client.close()
Esempio n. 4
0
def __plot_complete_mix():
    '''
    Plots all TS of a mix in a single image using multiple axis
    '''
    
    # Connect with times
    connection = times_client.connect()
    
    plot_mix = pdata.mix_2
    cols = 5
    rows = len(plot_mix) / cols + 1 
    index = 0
    print rows
    
    fig = plt.figure()
    fig.set_figheight(20)
    fig.set_figwidth(40)
    
    for desc in plot_mix:  
        name = desc.name
        timeSeries = connection.load(__times_name(True, name, POSTFIX_USER))
        _, demand = util.to_array(timeSeries)
        
        index += 1
        ax = fig.add_subplot(rows, cols, index)
        ax.get_xaxis().set_visible(False)
        ax.get_yaxis().set_visible(False)
        ax.plot(range(0, len(demand)), demand)

    plt.savefig(configuration.path('mix_overlay', 'png'))
    
    # Close times connection
    times_client.close()
Esempio n. 5
0
def __plot_overlay_mix():
    '''
    Plots all TS of a mix in a single axis graph
    '''
    # Connect with times
    connection = times_client.connect()
    
    # Plot selected
    selected_mix0 = ['O2_retail_ADDORDER', 'SIS_163_cpu', 'SIS_393_cpu']
    selected_mix1 = ['SIS_222_cpu', 'SIS_213_cpu', 'SIS_387_cpu']
    plot_mix = selected_mix1
    
    # Plot all from a set
#    plot_mix = []
#    for i in xrange(a, a + 100):
#        print selected[i].name
#        plot_mix.append(selected[i].name)
    
    fig = plt.figure()
    
    ax = fig.add_subplot(111)
    ax.set_xlim([0, 300])
    
    for name in plot_mix:
        timeSeries = connection.load(__times_name(True, name, POSTFIX_USER))
        print timeSeries
        _, demand = util.to_array(timeSeries)
        demand = demand[7:289 + 7]
        ax.plot(range(0, len(demand)), demand, linewidth=0.7)

    
    xt = [(t * 60 / 5) for t in xrange(0, 25)]
    xl = [t for t in xrange(0, 25)]
    
    ax.set_xticks(xt)
    ax.set_xticklabels(xl)
    
    ax.set_xlabel('Time in hours')
    ax.set_ylabel('Load in number of users')
    
#    plt.show()
    plt.savefig(configuration.path('overlay', 'png'))
    plt.savefig(configuration.path('mix1', 'pdf'))
    
    # Close times connection
    times_client.close()
Esempio n. 6
0
def __fetch_timeseries(host, sensor, timeframe):
    from filelock.fl import FileLock
    cache_file = '%s_%s_%s_%s' % (str(host), str(sensor), str(timeframe[0]), str(timeframe[1]))
    cache_file = configuration.path(cache_file)
    with FileLock(cache_file, timeout=60):
        try:
            with open(cache_file):
                # Open file in binary mode
                f = open(cache_file, 'rb')
                t = TTransport.TFileObjectTransport(f)
                prot = TBinaryProtocol.TBinaryProtocolAccelerated(t)
                
                # Decode binary stream as Thrift object
                ts = times_types.TimeSeries()
                ts.read(prot)
                f.close()
                
                # Convert to array
                load = [x.value for x in ts.elements]
                
                # Return elements
                return load
            
        except IOError:
            __connect()
            
            query = ttypes.TimeSeriesQuery()
            query.hostname = host
            query.startTime = timeframe[0]
            query.stopTime = timeframe[1]
            query.sensor = sensor
            
            result = connection.query(query)
            _, ts_load = util.to_array_collector(result, timeframe)
            
            # Build file as Thrift object
            ts = times_types.TimeSeries()
            ts.name = 'cache'
            ts.frequency = 3
            ts.elements = []
            
            for i, load in enumerate(ts_load):
                new_el = times_types.Element(i*3, int(load))
                ts.elements.append(new_el)
            
            # Write Thrift object to a cache file
            f = open(cache_file, 'wb')
            t = TTransport.TFileObjectTransport(f)
            prot = TBinaryProtocol.TBinaryProtocolAccelerated(t)
            ts.write(prot)
            f.close()
            
            return ts_load
def plot_schedules(schedule_ids, cdf_only=True, out=False, suffix='png'):
    import matplotlib.pyplot as plt
    
    # Plot a gantt chart for each schedule
    if not cdf_only: 
        for schedule_id in schedule_ids:
            schedule = load_schedule(schedule_id)
            schedule.plot_gantt_chart(schedule_id, out, suffix)

    # Setup plot figure
    fig = plt.figure()
    subplot = fig.add_subplot(111)
    subplot.set_ylabel('CDF')
    subplot.set_xlabel('Instance Lifetime (min)')
    
    # Extracts lifetimes for each entry
    lifetimes = lambda x: [(int(entry.rampUp + entry.duration + entry.rampDown) / 60) for entry in x]
    
    # Plot CDF and show the plot
    plt = plot_CDF(schedule_ids, subplot, lifetimes)
    if out is None:
        plt.show()
    else:
        plt.savefig(configuration.path('CDF_lifetimes', suffix)) 

    # Plot CDFs of arrival rates
    fig = plt.figure()
    subplot = fig.add_subplot(111)
    subplot.set_ylabel('CDF')
    subplot.set_xlabel('Arrival rate (min)')
    
    # Extracts arrival times for each entry
    arrivalrates = lambda x: [int((x[i].offset - x[i - 1].offset) / 60) for i in xrange(1, len(x), 1)]
    
    # Plot CDFs and show the plot 
    plt = plot_CDF(schedule_ids, subplot, arrivalrates)
    if out == False:
        plt.show()
    else:
        plt.savefig(configuration.path('CDF_arrivals', suffix))  
def clear_file(filename, extension):
    try:
        filename = configuration.path(filename, extension)
        print 'Clearing output file: %s' % (filename)
        
        with open(filename):
            pass
        
        import shutil
        print 'Creating backup of current results file'
        shutil.copyfile(filename, '%s.bak' % filename)
        
        print 'Removing existing results file'
        import os
        os.remove(filename)
    except:
        pass   
Esempio n. 9
0
def __read_table(name):
    indexmap = {}
    table = []
    header = False
    with open(configuration.path(name,'csv'), 'rb') as db_file:
        dbreader = csv.reader(db_file, delimiter='\t')

        for row in dbreader:
            if header == False:
                for i, name in enumerate(row):
                    print name
                    indexmap[name] = i 
                header = True
                continue
            table.append(row)
            
    return indexmap, table
Esempio n. 10
0
def plot_all():
    print 'Total profiles: %i' % load.count()
    
    # For each workload profile available
    for i in xrange(load.count()):
        # For all domain sizes configured
        for s in xrange(conf_domainsize.count_domain_sizes()):
            # Load user profile
            ts = load.get_user_profile(i, s)[1]
            #ts = load.get_cpu_profile(i, s)[1]
            
            # Create figure
            fig = plt.figure()
            subplot = fig.add_subplot(111)
            subplot.set_ylim(ymax=100, ymin=0)
            subplot.plot(range(len(ts)), ts)
            
            # Path and save 
            path = configuration.path('%s_%i_%i' % (conf_load.TIMES_SELECTED_MIX.name, i, s),'png') 
            print path
            plt.savefig(path)
 def write_configuration_csv(self, configurations):
     # All CSV content lines
     csv_lines = []
     
     # Create one line for each configuration
     for config in configurations:
         params = [] 
         for factor in self.factors:
             params.append(str(config[factor[0]]))
         line = '\t'.join(params)
         csv_lines.append(line)
     
     # Create CSV header
     csv_header = '\t'.join([factor[0] for factor in self.factors]) + '\n'
     csv_header = '%s \t %s' % ('schedule_id', csv_header)
     
     # Write CSV file
     f = open(configuration.path('schedule_configurations', 'csv'), 'w')
     f.write(csv_header)
     for i, line in enumerate(csv_lines):
         f.write('%i \t %s \n' % (self.__get_schedule_id(i, 0), line))
     f.close()
 def plot_gantt_chart(self, schedule_id, out=None, suffix='png'):
     '''
     Plot this schedule in a gantt chart to visualize it
     '''
     
     import matplotlib.pyplot as plt
     
     # Setup figure
     fig = plt.figure()
     ax = fig.add_subplot(111)
     ax.set_xlabel('Time in Seconds')
     ax.set_ylabel('Virtual Machine')
     
     # Create offset and width information for boxes (= entries)
     offsets = []
     widths = []
     bottoms = [] 
     for i , entry in enumerate(self.entries):
         
         offsets.append(entry.offset)
         widths.append(entry.rampUp)
         
         offsets.append(entry.offset + entry.rampUp)
         widths.append(entry.duration)
         
         offsets.append(entry.offset + entry.duration + entry.rampUp)
         widths.append(entry.rampDown)
         
         bottoms.append(i)
         bottoms.append(i)
         bottoms.append(i) 
     
     # Plot boxes
     ax.barh(bottoms, widths, height=1, color=('gray', 'black', 'gray'), edgecolor='white', left=offsets)
     if out is None:
         plt.show()
     else:
         plt.savefig(configuration.path('gantt_%i' % schedule_id, suffix))
def main():
    connection = analytics.__connect()
    start = analytics.__to_timestamp(START)
    stop = analytics.__to_timestamp(END)
    raw_frame = (start, stop)
    
    s0_result, s0_time = analytics.__fetch_timeseries(connection, 'srv0', 'psutilcpu', raw_frame)
    
    ts0, pr0 = __fetch_logs(connection, 'Andreas-PC', 'start_benchmark', raw_frame)
    ts1, pr1 = __fetch_logs(connection, 'load0', 'rain', raw_frame)
    
    analytics.__disconnect()
    
    # Setup the new plot
    fig = plt.figure()
    fig.set_size_inches((10, 6))
    plt.xlabel('Time')
    plt.ylabel('Server Load')
    
    # Create a new subplot
    ax = fig.add_subplot(1, 1, 1)
    ax.plot(s0_time, s0_result, linewidth=0.3)
    ax.set_ylim((0, 150))
    ax.set_xticklabels(
        [__to_date(data) for data in ts0]
        )
    
    # Draw lines for messages
    for i, ts in enumerate(ts0):
        if pr0[i] > 5000:
            # ax.axvline(ts, color='m')
            ax.axvline(x=ts, ymin=0.9, ymax=0.95, linewidth=0.3, color='m')
    for i,ts in enumerate(ts1):
        ax.axvline(x=ts, ymin=0.85, ymax=0.9, linewidth=0.3, color='r')

    # Display the plot
    plt.savefig(configuration.path('sonar_concept', 'pdf'))        
Esempio n. 14
0
    
    return frequency, result


if __name__ == '__main__':
    '''
    Tests if all registered TS in sonar meta are of sufficient length
    '''
    import matplotlib.pyplot as plt
    plot_counter = 0
    for segment in meta.raw_segments:
        RAW, host = segment
        START, END = RAW.split('    ')
        start = __to_timestamp(START)
        stop = __to_timestamp(END)
        raw_frame = (start, stop) 
        load = __fetch_timeseries(host, 'psutilcpu', raw_frame)
        
        # Check whether the TS is long enough
        if len(load) < 7600: 
            print "invalid TS: ('%s', '%s')" % (segment[0], segment[1])
            
        # Plot time series for debugging purpose
        fig = plt.figure()
        plot_counter += 1
        ax = fig.add_subplot(111)
        ax.plot(range(len(load)), load)
        plt.savefig(configuration.path('sonar_trace%s' % (plot_counter), 'png'), dpi=30)
        plt.close()
        
            
Esempio n. 15
0
    plt.legend(legends)
    plt.show()


if __name__ == "__main__":
    x_labels = ["Instance Lifetime (min)", "Arrival Intervals (min)", "Instance Lifetime (min)"]
    y_label = "CDF"

    for i, dir_name in enumerate(dir_names):
        root_dir = parent_dir + dir_name
        metafile_lines = [line.rstrip("\n") for line in open(root_dir + meta_filename)]

        meta_info = MetaCDF(root_dir + metafile_lines[0] + ".csv")
        metafile_lines = metafile_lines[1:]

        cdf_functions = {}
        for file_name in metafile_lines:
            print root_dir + file_name + ".csv"
            coordinates = transform_cdf_coordinates(root_dir + file_name + ".csv", meta_info)
            cdf_functions[file_name] = coordinates

            # create map only for JSON
            y_x_dict = {}
            for pair in coordinates:
                y_x_dict[pair.y] = pair.x
            data_coordinates = json.dumps(y_x_dict, sort_keys=True)
            json_output = "schedules/" + "cdf_" + dir_name[:-1] + "_" + file_name + ".json"
            with open(json_output, "w") as outfile:
                json.dump(data_coordinates, outfile)
        plotCDFGraph(x_labels[i], y_label, cdf_functions, configuration.path(x_labels[i], ".png"))
Esempio n. 16
0
import json
import numpy as np
import sys
import time
import traceback

##########################
# # Configuration       ##
##########################
COLLECTOR_IP = 'monitor0.dfg'
MANAGEMENT_PORT = 7931
LOGGING_PORT = 7921
DEBUG = False
TRACE_EXTRACT = False
DRIVERS = 2
EXPERIMENT_DB = configuration.path('experiments', 'csv')

CONTROLLER_NODE = 'Andreas-PC'  
DRIVER_NODES = ['load0', 'load1']

# Times
RAW = '6/6/2014 23:58:00    6/7/2014 11:40:00'

# Timestamps of bugfixes
FIX_B1 = int(time.mktime(time.strptime('10/6/2013', '%m/%d/%Y')))    
##########################

# List of warnings that might render the run invalid
warns = []

# Extract timestamps from RAW
Esempio n. 17
0
from control import domains
from logs import sonarlog
from virtual import nodes
import strategy
import configuration
import json

######################
# # CONFIGURATION    ##
######################
ALLOCATION_MATRIX_FILE = configuration.path("andreas_matrix_ 90_cap200", "csv")
######################

# Setup logging
logger = sonarlog.getLogger("controller")


class Strategy(strategy.StrategyBase):
    def __init__(self, scoreboard, pump, model):
        super(Strategy, self).__init__(scoreboard, pump, model, 10 * 60, 120)
        self.var = []

    def dump(self):
        print "Dump Sandpiper controller configuration..."
        logger.info(
            "Strategy Configuration: %s"
            % json.dumps({"name": "File", "allocation_matrix_file": ALLOCATION_MATRIX_FILE})
        )

    def initial_placement(self):
        nodecount = len(nodes.NODES)
Esempio n. 18
0
if __name__ == "__main__":
    if configuration.PRODUCTION != False:
        print "Configuration is set to PRODUCTION MODE"
        print "Change configuration.PRODUCTION = False for simulations"
    else:
        try:
            names, res = controller.start()

            # Build header and values for this experiment
            conf_head, conf_values = clparams.build_result_log_title()
            names = "%s \t %s" % ("\t".join(conf_head), names)
            res = "%s \t %s" % ("\t".join(conf_values), res)

            # Append results to file
            filename = configuration.path(clparams.CL_RESULT_FILE, "csv")
            with FileLock(filename):
                # Add header only if file is new
                try:
                    with open(filename):
                        pass
                except IOError:
                    f = open(filename, "w")
                    f.write(names)
                    f.write("\n")
                    f.close()

                # Append row information
                f = open(filename, "a")
                f.write(res)
                f.write("\n")