Exemplo n.º 1
0
import os
from os.path import isfile, join
import pandas as pd
import pickle
import sys

sys.path = ['./', '../'] + sys.path

# Local
from GenConfigs import *
from ContactDB import GetActivationRecordsSince
from Logger import ScriptLogger
from PerfMonAnalyzer import *
from TestDataframePlotting import *

logger = ScriptLogger(loggername='workload_analyzer',
                      filename=FAAS_ROOT + '/logs/WA.log')


def GetTestMetadata():
    """
    Returns the test start time from the output log of SWI.
    """
    test_start_time = None
    with open(FAAS_ROOT +
              "/synthetic-workload-invoker/test_metadata.out") as f:
        lines = f.readlines()
        test_start_time = lines[0]
        config_file = lines[1]
        invoked_actions = int(lines[2][:-1])
        print('Invocations by Workload Invoker :' + str(invoked_actions))
    try:
Exemplo n.º 2
0
from os.path import isfile, join
import pandas as pd
import pickle
import sys

# Local
sys.path = ['./', '../'] + sys.path
from GenConfigs import *
sys.path = [
    FAAS_ROOT, FAAS_ROOT + '/commons', FAAS_ROOT + '/workload_analyzer'
] + sys.path
from Logger import ScriptLogger
from PerfMonAnalyzer import *
from TestDataframePlotting import *

logger = ScriptLogger(loggername='workload_analyzer', logfile='WA.log')


def GetTestMetadata(test_metadata_file=FAAS_ROOT +
                    "/synthetic_workload_invoker/test_metadata.out"):
    """
    Returns the test start time from the output log of SWI.
    """
    test_start_time = None
    with open(test_metadata_file) as f:
        lines = f.readlines()
        test_start_time = lines[0]
        config_file = lines[1]
        invoked_actions = int(lines[2][:-1])
        print('Invocations by Workload Invoker :' + str(invoked_actions))
    try:
Exemplo n.º 3
0
def main(argv):
    """
    The main function.
    """
    parser = OptionParser()
    parser.add_option("-v",
                      "--verbose",
                      dest="verbose",
                      help="prints the detailed test data",
                      action='store_true')
    parser.add_option("-p",
                      "--plot",
                      dest="plot",
                      help="plots the test results",
                      action='store_true')
    parser.add_option("-s",
                      "--save_plot",
                      dest="save_plot",
                      help="save test result plots",
                      action='store_true')
    parser.add_option("-a",
                      "--archive",
                      dest="archive",
                      help="archive the test results in an pickle file",
                      action='store_true')
    parser.add_option("-c",
                      "--capacity_factor",
                      dest="capacity_factor",
                      help="returns the capacity factor",
                      action='store_true')
    parser.add_option('-o',
                      '--override_testname',
                      dest='override_testname',
                      help='override the JSON test name',
                      metavar='FILE')
    parser.add_option("-r",
                      "--read_results",
                      dest="read_results",
                      help="gather also the results of function invocations",
                      action='store_true')
    parser.add_option("-n",
                      "--test_name",
                      dest="test_name",
                      default="latest_test",
                      help="Name of test for Workload analysis",
                      action='store_true')
    (options, args) = parser.parse_args()

    logger.info("Workload Analyzer started")
    logger = ScriptLogger(loggername='workload_analyzer',
                          filename=FAAS_ROOT + '/logs/' + test_name +
                          '/WA.log')

    print("Log file -> logs/WA.log")

    test_start_time, config_file = GetTestMetadata(options.test_name, logger)
    if FAAS_ROOT in config_file:
        [test_name, config_df] = ConstructConfigDataframe(config_file, logger)
    else:
        [test_name,
         config_df] = ConstructConfigDataframe(FAAS_ROOT + '/' + config_file,
                                               logger)

    read_results = True if options.read_results else False
    test_df = ConstructTestDataframe(since=test_start_time,
                                     limit=100000,
                                     read_results=read_results)
    print('Records read from CouchDB: ' + str(len(test_df['start'])))

    invocation_periods = []
    start_times = []
    for index, row in test_df.iterrows():
        start_time = row['start']
        start_times.append(start_time)
    sorted_starts = sorted(start_times)
    invocation_periods = [
        sorted_starts[i + 1] - sorted_starts[i]
        for i in range(len(sorted_starts) - 1)
    ]
    try:
        mean_invocation_period = 1.0 * \
            sum(invocation_periods)/len(invocation_periods)
        print('invocation period: ' + str(mean_invocation_period))
        print('avg invocation rate: ' + str(1000.0 / mean_invocation_period))
    except:
        print('No invocations found!')

    stat_df = CreateStatisticalSummary(test_df, config_df, test_start_time)
    print(stat_df)

    if options.override_testname:
        # override the testname
        test_name = options.override_testname

    ref = test_df['start'].min()
    test_df['start'] -= ref
    test_df['end'] -= ref
    cgroups_df = GetControlGroupsRecords(logger, since=test_start_time)
    perf_mon_records = AnalyzePerfMonRecords(config_file)
    test_df['execution'] = test_df['duration'] - test_df['initTime']

    if options.verbose:
        # Printing the data
        print(config_df)
        print(stat_df)
        print(test_df)
        print(cgroups_df)
        print(perf_mon_records)
    if options.plot:
        # Plotting the data
        if options.save_plot:
            save_plot = True
        else:
            save_plot = False
        TestDataframePlotter(save_plot, test_df, cgroups_df)
        # PerfMonPlotter(perf_mon_records, time_window=[5, 10])
    if options.archive:
        # Storing the data
        now = datetime.now()
        file_name = FAAS_ROOT + '/data_archive/' + now.strftime("%Y_%m_%d_%H_%M") + \
            '_' + test_name + '.pkl'
        pickle.dump([test_name, config_df, stat_df, test_df, perf_mon_records],
                    open(file_name, "wb"))
    if options.capacity_factor:
        with open(FAAS_ROOT + '/workload-analyzer/capacity_factors.json',
                  'w') as outfile:
            json.dump(CapacityFactor(test_df), outfile)

    print('Performance Summary')
    for dim in ['initTime', 'execution', 'latency']:
        print('Mean ' + dim + ' (ms): ' + str(test_df[dim].mean()))
        print('Std ' + dim + ' (ms): ' + str(test_df[dim].std()))
        print('***********')
    warm_starts_test_df = test_df[test_df['initTime'] == 0]
    print('Warm Start Performance Summary (count: ' +
          str(len(warm_starts_test_df[dim])) + ')')
    for dim in ['initTime', 'execution', 'latency']:
        print('Mean ' + dim + ' (ms): ' + str(warm_starts_test_df[dim].mean()))
        print('Std ' + dim + ' (ms): ' + str(warm_starts_test_df[dim].std()))
        print('***********')
    cold_starts_test_df = test_df[test_df['initTime'] != 0]
    print('Cold Start Performance Summary (count: ' +
          str(len(cold_starts_test_df[dim])) + ')')
    for dim in ['initTime', 'execution', 'latency']:
        print('Mean ' + dim + ' (ms): ' + str(cold_starts_test_df[dim].mean()))
        print('Std ' + dim + ' (ms): ' + str(cold_starts_test_df[dim].std()))
        print('***********')

    return True