Exemplo n.º 1
0
def generic_event_generator(workload) -> Tuple[Dict[str, List[float]], int]:
    """
    This function returns a list of times and applications calls given a workload description.
    """

    logger_eg = ScriptLogger('event_generator', "SWI.log")

    logger_eg.info("Started Generic Event Generator")
    test_duration_in_seconds = workload['test_duration_in_seconds']

    all_events = {}
    event_count = 0

    random_seed = workload['random_seed']
    logger_eg.info('random_seed: ' + str(random_seed))

    instance: str
    desc: Dict[str, Any]
    for (instance, desc) in workload['instances'].items():
        if 'interarrivals_list' in desc.keys():
            instance_events = cast(List[Any], desc['interarrivals_list'])
            logger_eg.info('Read the invocation time trace for ' + instance)
            # enforcing maximum test duration
            list_len = 0
            cutoff_index = None
            for i in range(len(instance_events)):
                list_len += instance_events[i]
                if list_len > test_duration_in_seconds:
                    cutoff_index = i
                    break
            if cutoff_index is not None:
                instance_events = instance_events[:cutoff_index]
        else:
            instance_events = create_events(instance=instance,
                                            dist=desc['distribution'],
                                            rate=desc['rate'],
                                            duration=test_duration_in_seconds,
                                            seed=random_seed)
            try:
                start_time = desc['activity_window'][0]
                end_time = desc['activity_window'][1]
                instance_events = EnforceActivityWindow(
                    start_time, end_time, instance_events)
            except:
                instance_events = EnforceActivityWindow(
                    0, workload['test_duration_in_seconds'], instance_events)
        all_events[instance] = instance_events
        event_count += len(instance_events)

    logger_eg.info("Returning workload event list")

    return (all_events, event_count)
Exemplo n.º 2
0
def GenericEventGenerator(workload):
    """
    This function returns a list of times and applications calls given a workload description.
    """
    log_file = workload['log_dir'] + '/SWI.log'
    logger = ScriptLogger('workload_invoker', log_file)

    logger.info("Started Generic Event Generator")
    test_duration_in_seconds = workload['test_duration_in_seconds']

    all_events = {}
    event_count = 0

    random_seed = workload['random_seed']
    logger.info('random_seed: ' + str(random_seed))

    for (instance, desc) in workload['instances'].items():
        if 'interarrivals_list' in desc.keys():
            instance_events = desc['interarrivals_list']
            logger.info('Read the invocation time trace for ' + instance)
        else:
            instance_events = CreateEvents(instance=instance,
                                           dist=desc['distribution'],
                                           rate=desc['rate'],
                                           duration=test_duration_in_seconds,
                                           seed=random_seed)
        try:
            start_time = desc['activity_window'][0]
            end_time = desc['activity_window'][1]
            instance_events = EnforceActivityWindow(
                start_time, end_time, instance_events)
        except:
            instance_events = EnforceActivityWindow(0,
                                                    workload['test_duration_in_seconds'],
                                                    instance_events)
        all_events[instance] = instance_events
        event_count += len(instance_events)

    logger.info("Returning workload event list")

    return [all_events, event_count]
Exemplo n.º 3
0
# Local imports
sys.path = ['./', '../'] + sys.path
from GenConfigs import *
sys.path = [FAAS_ROOT + '/synthetic-workload-invoker'] + sys.path
from EventGenerator import GenericEventGenerator
from commons.JSONConfigHelper import CheckJSONConfig, ReadJSONConfig
from commons.Logger import ScriptLogger
from WorkloadChecker import CheckWorkloadValidity

logging.captureWarnings(True)

# Global variables
supported_distributions = {'Poisson', 'Uniform'}

logger = ScriptLogger('workload_invoker', 'SWI.log')

APIHOST = subprocess.check_output(WSK_PATH + " property get --apihost",
                                  shell=True).split()[3]
APIHOST = 'https://' + APIHOST.decode("utf-8")
AUTH_KEY = subprocess.check_output(WSK_PATH + " property get --auth",
                                   shell=True).split()[2]
AUTH_KEY = AUTH_KEY.decode("utf-8")
user_pass = AUTH_KEY.split(':')
NAMESPACE = subprocess.check_output(WSK_PATH + " property get --namespace",
                                    shell=True).split()[2]
NAMESPACE = NAMESPACE.decode("utf-8")
RESULT = 'false'
base_url = APIHOST + '/api/v1/namespaces/' + NAMESPACE + '/actions/'
base_gust_url = APIHOST + '/api/v1/web/guest/default/'
Exemplo n.º 4
0
import yaml
import sys, getopt
from typing import List
import traceback
import asyncio
import json
import logging

from Clusters import BaseDeployment
from Clusters import OpenWhiskDeployment
from Clusters import GoogleDeployment

functions_meta = []
from commons.Logger import ScriptLogger
logging.basicConfig(level=logging.DEBUG)
logger = ScriptLogger(__name__, 'SWI.log')
logger.setLevel(logging.DEBUG)
logging.captureWarnings(True)


async def deploy_to_clusters(configfile: str, provider: str, scenario_name: str, functions_list: list,
                             cluster_obj: BaseDeployment = None,
                       providers_list: list = None, all_clusters: bool = False):
    with open(configfile, 'r') as stream:
        try:
            data = yaml.safe_load(stream)
            if all_clusters:
                for cluster in data['providers'][provider]:
                    curr_cluster = data['providers'][provider][cluster]
                    scenario = data['scenarios'][scenario_name]
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes, mark_inset, zoomed_inset_axes, InsetPosition
import os
import pandas as pd
import pickle
import seaborn as sns
import sys
import time

sys.path = ['./', '../'] + sys.path

# Local
from GenConfigs import *
from commons.Logger import ScriptLogger

logger = ScriptLogger(loggername='comparative_analyzer', logfile='CA.log')
archive_folder = FAAS_ROOT + "/data_archive/"


def GetTimeFromDFName(dfname):
    res = dfname[0:16]
    res = datetime.strptime(res, '%Y_%m_%d_%H_%M')
    return res


def ComparativePlotting(t_df, p_df_dic):
    """
    Plotting result comparisons.
    """
    dims = {
        's': 'start',
Exemplo n.º 6
0
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import datetime
import logging
import os
import sys

# Local imports
sys.path = ['./', '../'] + sys.path
from GenConfigs import *
sys.path = [FAAS_ROOT + '/synthetic-workload-invoker'] + sys.path
from commons.Logger import ScriptLogger

logger_wlch = ScriptLogger('workload_checker', 'SWI.log')


def CheckWorkloadValidity(workload, supported_distributions):
    """
    Checks whether a loaded workload is valid.
    """
    logger_wlch.info("Started CheckWorkloadValidity")
    # 1 - Check if the workload has been successfully read in ReadJSONConfig
    if workload is None:
        logger_wlch.info('Workload not valid => Terminating')
        return False
    # 2 - Check for validity of general field
    print(workload)
    fields_to_check = [['test_name', str], ['blocking_cli', bool]]
    for field in fields_to_check:
Exemplo n.º 7
0
def CheckWorkloadValidity(workload, supported_distributions):
    """
    Checks whether a loaded workload is valid.
    """
    log_file = workload['log_dir'] + '/SWI.log'
    logger = ScriptLogger('workload_invoker', log_file)
    logger.info("Started CheckWorkloadValidity")
    # 1 - Check if the workload has been successfully read in ReadJSONConfig
    if workload is None:
        logger.info('Workload not valid => Terminating')
        return False
    # 2 - Check for validity of general field
    print(workload)
    fields_to_check = [['test_name', str], ['blocking_cli', bool]]
    for field in fields_to_check:
        try:
            print([field, workload[field[0]]])
            if type(workload[field[0]]) is not field[1]:
                test_name('Input of the ' + field[0] + ' field should be a ' +
                          str(field[1]))
                return False
        except:
            logger.error('No ' + field[0] + ' field provided!')
            return False
    # # 3 - Check if invocation scripts exists for all functions/applications in the workload
    application_set = set()
    distribution_set = set()
    for (instance, specs) in workload['instances'].items():
        application_set.add(specs['application'])
        try:
            distribution_set.add(specs['distribution'])
        except:
            pass

    logger.info('Required applications: ' + str(application_set))
    # all_scripts_available = True

    # for application in application_set:
    #     if not os.path.isfile(FAAS_ROOT + '/invocation-scripts/'+application+'.sh'):
    #         logger.error(
    #             'No invocation script available in invocation-scripts for the following application: '+application)
    #         all_scripts_available = False

    # if not all_scripts_available:
    #     logger.info('Incomplete invocation scripts => Terminating')
    #     return False
    # else:
    #     logger.info('Script files for all applications exist')
    # 4 - Check for supported distributions
    if not distribution_set.issubset(supported_distributions):
        logger.error(
            'At least one specified distribution is not supported. Supported distribution(s): '
            + str(supported_distributions))
        return False
    # 5 - Check for valid test duration
    try:
        test_duration_in_seconds = workload['test_duration_in_seconds']
        if test_duration_in_seconds is None:
            logger.error(
                'Please enter a valid value for test_duration_in_seconds field in the config file.'
            )
            return False
        elif int(test_duration_in_seconds) <= 0:
            logger.error(
                'test_duration_in_seconds should be greater than zero!')
            return False
    except:
        logger.error(
            'test_duration_in_seconds field not specified in the json config file'
        )
        return False
    # 6 - Check that the random_seed field is entered
    try:
        random_seed = workload['random_seed']
    except:
        logger.error("No random_seed field specified in the config file")
        return False

    return True
Exemplo n.º 8
0
# Copyright (c) 2019 Princeton University
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.

import datetime
import logging
import numpy as np
import random

from commons.Logger import ScriptLogger

logger_eg = ScriptLogger('event_generator', 'SWI.log')


def CreateEvents(instance, dist, rate, duration, seed=None):
    """
    Creates a dictionary of application instances
    """
    inter_arrivals = []

    if rate == 0:
        return inter_arrivals

    if dist == "Uniform":
        random.seed(seed + hash(instance))
        shift = random.random() / rate
        inter_arrivals = int(duration * rate) * [1.0 / rate]
        inter_arrivals[0] += shift
    elif dist == "Poisson":
        np.random.seed(seed)
Exemplo n.º 9
0
def main(argv):
    """
    The main function.
    """
    parser = OptionParser()
    parser.add_option("-n",
                      "--test_name",
                      dest="test_name",
                      default="latest_test",
                      help="Name of test",
                      metavar="FILE")
    parser.add_option("-r",
                      "--rate_override",
                      dest="rate_override",
                      help="Override rate of invocation from arguments",
                      metavar="FILE")
    parser.add_option(
        "-c",
        "--config_json",
        dest="config_json",
        help="The input json config file describing the synthetic workload.",
        metavar="FILE")
    parser.add_option("-b", "--benchmark", dest="benchmark", metavar="FILE")
    parser.add_option("-p", "--param_file", dest="param_file", metavar="FILE")
    (options, args) = parser.parse_args()

    log_dir, log_file = createDir(options.test_name)
    logger = ScriptLogger('workload_invoker', log_file)

    logger.info("Workload Invoker started")

    print("Log file -> ", log_file, "\n")

    if not CheckJSONConfig(options.config_json):
        logger.error("Invalid or no JSON config file!")
        return False  # Abort the function if json file not valid

    workload = ReadJSONConfig(options.config_json)
    workload = ApplyJSONOverrides(workload, log_dir, options.rate_override,
                                  options.benchmark, options.param_file)

    if not CheckWorkloadValidity(
            workload=workload,
            supported_distributions=supported_distributions):
        return False  # Abort the function if json file not valid

    [all_events, event_count] = GenericEventGenerator(workload)

    threads = []

    for (instance, instance_times) in all_events.items():
        # Previous method to run processes
        # instance_script = 'bash ' + FAAS_ROOT + '/invocation-scripts/' + \
        #     workload['instances'][instance]['application']+'.sh'
        # threads.append(threading.Thread(target=PROCESSInstanceGenerator, args=[instance, instance_script, instance_times, workload['blocking_cli']]))
        # New method
        action = workload['instances'][instance]['application']
        try:
            param_file = workload['instances'][instance]['param_file']
        except:
            param_file = None
        blocking_cli = workload['blocking_cli']
        if 'data_file' in workload['instances'][instance].keys():
            data_file = workload['instances'][instance]['data_file']
            threads.append(
                threading.Thread(
                    target=BinaryDataHTTPInstanceGenerator,
                    args=[action, instance_times, blocking_cli, data_file]))
        else:
            threads.append(
                threading.Thread(target=HTTPInstanceGenerator,
                                 args=[
                                     action, instance_times, blocking_cli,
                                     log_dir, param_file
                                 ]))
        pass

    # Dump Test Metadata
    metadata_file = log_dir + "/test_metadata.out"
    os.system("date +%s%N | cut -b1-13 > " + FAAS_ROOT + '/' + metadata_file)
    os.system("echo " + options.config_json + " >> " + FAAS_ROOT + '/' +
              metadata_file)
    os.system("echo " + str(event_count) + " >> " + FAAS_ROOT + '/' +
              metadata_file)

    try:
        if workload['perf_monitoring']['runtime_script']:
            runtime_script = 'bash ' + FAAS_ROOT + '/' + workload['perf_monitoring']['runtime_script'] + \
                ' ' + str(int(workload['test_duration_in_seconds'])) + ' ' + FAAS_ROOT + '/' + log_dir + '/perf-mon.out' + ' &'
            logger.info(runtime_script)
            os.system(runtime_script)
            logger.info("Runtime monitoring script ran")
    except:
        pass

    logger.info("Test started")
    for thread in threads:
        thread.start()
    logger.info("Test ended")

    return True
Exemplo n.º 10
0
class WorkloadInvoker:
    supported_distributions = {'Poisson', 'Uniform'}

    @staticmethod
    def gen_invocation_id(test_name, runid):
        out = subprocess.check_output(["git", "rev-parse", "HEAD"],
                                      cwd=FAAS_ROOT)
        commit_hash = out.decode('utf-8').strip()
        test_dir_name = "{}_{}_{}".format(commit_hash[0:10], test_name, runid)
        return (commit_hash, test_dir_name)

    def __init__(self, config_json):
        # Global variables

        self.logger = None

        APIHOST = subprocess.check_output(
            WSK_PATH + " property get --apihost",
            shell=True).split()[3].decode("utf-8")
        APIHOST = APIHOST if APIHOST.lower().startswith(
            "http") else 'https://' + APIHOST
        AUTH_KEY = subprocess.check_output(WSK_PATH + " property get --auth",
                                           shell=True).split()[2]
        AUTH_KEY = AUTH_KEY.decode("utf-8")
        self.user_pass = AUTH_KEY.split(':')
        NAMESPACE = subprocess.check_output(WSK_PATH +
                                            " property get --namespace",
                                            shell=True).split()[2]
        NAMESPACE = NAMESPACE.decode("utf-8")
        self.RESULT = 'false'
        self.base_url = APIHOST + '/api/v1/namespaces/' + NAMESPACE + '/actions/'
        self.base_gust_url = APIHOST + '/api/v1/web/guest/default/'

        # Generate a runid for this instance
        self.runid = util.gen_random_hex_string(8)

        self.param_file_cache = {}  # a cache to keep json of param files
        self.binary_data_cache = {}  # a cache to keep binary data
        # (image files, etc.)

        # Count of function invocations successfully submitted to FaaS queue
        self.invocation_success_tally = 0
        self.invocation_failure_tally = 0
        self.invocation_expected_tally = 0
        self.tally_lock = threading.Lock()

        self.config_json = config_json
        self.workload = WorkloadInvoker.read_json_config(config_json)
        # Set name and commit hash and create destination dir if
        # missing
        self.my_commit_hash, test_result_dir_name =\
           WorkloadInvoker.gen_invocation_id(self.workload["test_name"], self.runid)
        self.test_result_dir_path = util.ensure_directory_exists(
            os.path.join(DATA_DIR, test_result_dir_name))

    @staticmethod
    def read_json_config(config_json: str) -> Dict[Any, Any]:
        if not check_json_config(config_json):
            raise Exception("Invalid or no JSON config file!")

        workload = read_json_config(config_json)
        if not check_workload_validity(workload=workload,
                                       supported_distributions=WorkloadInvoker.
                                       supported_distributions):
            # Abort the function if json file not valid
            raise Exception("Workload JSON is invalid")

        return workload

    def handle_futures(self, futures) -> Tuple[int, int]:
        """
       Wait for all futures in futures to complete. Returnes a tuple
       containing the number of succseful and failed requests.
       """
        failures = 0
        successes = 0
        for future in concurrent.futures.as_completed(futures):
            try:
                res = future.result()
            except Exception as e:
                self.logger.info("Request failed: " + str(e))
                failures += 1
            else:
                prefix = ""
                if res.status_code >= 200 and res.status_code <= 299:
                    prefix = "Request successful: "
                    successes += 1
                else:
                    prefix = "Request failed:     "
                    failures += 1
                    self.logger.info(prefix + str(res.status_code) + " " +
                                     res.url)

        return (successes, failures)

    # @staticmethod
    # def PROCESSInstanceGenerator(instance, instance_script, instance_times, blocking_cli):
    #     if len(instance_times) == 0:
    #         return False
    #     after_time, before_time = 0, 0

    #     if blocking_cli:
    #         pass
    #     else:
    #         instance_script = instance_script + ' &'

    #     for t in instance_times:
    #         time.sleep(max(0, t - (after_time - before_time)))
    #         before_time = time.time()
    #         os.system(instance_script)
    #         after_time = time.time()

    #     return True

    #class InstanceGenerator(

    def http_instance_generator(self,
                                action,
                                instance_times,
                                blocking_cli,
                                query_file=None,
                                param_file=None) -> None:
        if len(instance_times) == 0:
            raise Exception(
                "http_instance_generator called without instance times")
        session = FuturesSession(max_workers=100)
        url = f"{self.base_url}{action}?{self.runid}"
        assert (self.runid)
        parameters = {'blocking': blocking_cli, 'result': self.RESULT}
        args = {'testid': self.runid, 'body': None}
        authentication = (self.user_pass[0], self.user_pass[1])

        futures = []

        if (not param_file is None) and (not query_file is None):
            raise Exception(
                "Only one of param_file and query_stirng can be set")

        if param_file:
            try:
                param_file_body = self.param_file_cache[param_file]
            except:
                with open(param_file, 'r') as f:
                    param_file_body = json.load(f)
                    args['body'] = param_file_body
                    param_file_body = args
                    self.param_file_cache[param_file] = param_file_body

        if query_file:
            try:
                query_file_body = self.param_file_cache[query_file]
            except:
                with open(query_file, 'r') as f:
                    query_file_body = json.load(f)
                    self.param_file_cache[query_file] = query_file_body

            print("Updating paramters", str(parameters), str(query_file_body))
            parameters.update(query_file_body)

        print("Final parameters", str(parameters))
        st = 0
        after_time, before_time = 0, 0
        for t in instance_times:
            st = st + t - (after_time - before_time)
            before_time = time.time()
            if st > 0:
                time.sleep(st)

            # self.logger.info("Url " + url)
            # self.logger.info("Setting params" + str(parameters))
            future = session.post(url,
                                  params=parameters,
                                  auth=authentication,
                                  json=args,
                                  verify=False)
            futures.append(future)
            #print(future.result())
            after_time = time.time()

        (successes, failures) = self.handle_futures(futures)

        with self.tally_lock:
            self.invocation_success_tally += successes
            self.invocation_failure_tally += failures
            self.invocation_expected_tally += len(instance_times)

    def binary_data_http_instance_generator(self, action, instance_times,
                                            blocking_cli, data_file):
        """
      TODO: Automate content type
      """
        url = f"{self.base_gust_url}{action}?{self.runid}"
        session = FuturesSession(max_workers=100)
        if len(instance_times) == 0:
            return False
        after_time, before_time = 0, 0

        futures = []

        if not data_file in self.binary_data_cache:
            data = open(data_file, 'rb').read()
            self.binary_data_cache[data_file] = {}
            self.binary_data_cache[data_file]["body"] = data
            self.binary_data_cache[data_file]["mime"] = MimeTypes().guess_type(
                data_file)[0]

        file_body = self.binary_data_cache[data_file]["body"]
        file_mime = self.binary_data_cache[data_file]["mime"]

        for t in instance_times:
            st = t - (after_time - before_time)
            if st > 0:
                time.sleep(st)
            before_time = time.time()
            #self.logger.info("Url " + url)
            assert (self.runid)
            future = session.post(url=url,
                                  headers={'Content-Type': file_mime},
                                  params={
                                      'blocking': blocking_cli,
                                      'result': self.RESULT,
                                      'payload': {
                                          'testid': self.runid
                                      }
                                  },
                                  data=file_body,
                                  auth=(self.user_pass[0], self.user_pass[1]),
                                  verify=False)
            futures.append(future)
            after_time = time.time()

        (successes, failures) = self.handle_futures(futures)

        with self.tally_lock:
            self.invocation_success_tally += successes
            self.invocation_failure_tally += failures
            self.invocation_expected_tally += len(instance_times)

    @staticmethod
    def write_test_metadata(metadata, destdir):
        destfile = os.path.join(destdir, "test_metadata.json")
        with open(destfile, 'w') as f:
            f.write(json.dumps(metadata))

    async def maybe_start_runtime_script(self, workload, destdir):
        if workload['perf_monitoring']['runtime_script']:
            runtime_script_cmdline = [
                os.path.join(FAAS_ROOT,
                             workload['perf_monitoring']['runtime_script']),
                str(int(workload['test_duration_in_seconds'])), destdir
            ]
            runtime_script = await asyncio.create_subprocess_exec(
                *runtime_script_cmdline)
            self.logger.info("Invoked runtime monitoring script pid=%s" %
                             runtime_script.pid)
            return runtime_script
        return None

    async def invoke_benchmark_async(self) -> InvocationMetadata:
        """
       The main function.
       """

        workload = self.workload
        #self.logger.info("Workload Invoker started")
        #print("Log file -> ../profiler_results/logs/SWI.log")

        # log_path = os.path.join(
        #                               util.ensure_directory_exists(
        #                                  os.path.join(self.test_result_dir_path, 'log')),
        #                               'SWI.log')
        self.logger = ScriptLogger('workload_invoker', "SWI.log")

        (all_events, event_count) = generic_event_generator(workload)

        threads = []

        for (instance, instance_times) in all_events.items():
            action = workload['instances'][instance]['application']
            if action == "long_run":
                print("Invoking long_run")
            try:
                param_file = os.path.join(
                    FAAS_ROOT, workload['instances'][instance]['param_file'])
            except:
                param_file = None

            try:
                query_file = os.path.join(
                    FAAS_ROOT, workload['instances'][instance]['query_string'])
            except:
                query_file = None

            blocking_cli = workload['blocking_cli']
            if 'data_file' in workload['instances'][instance].keys():
                data_file = workload['instances'][instance]['data_file']
                threads.append(
                    threading.Thread(
                        target=self.binary_data_http_instance_generator,
                        args=[action, instance_times, blocking_cli,
                              data_file]))
            else:
                threads.append(
                    threading.Thread(target=self.http_instance_generator,
                                     args=[
                                         action, instance_times, blocking_cli,
                                         query_file, param_file
                                     ]))

        # Dump Test Metadata
        test_metadata: InvocationMetadata = {
            'start_time': math.ceil(time.time() * 1000),
            'workload_name': self.workload["test_name"],
            'test_config': self.config_json,
            'event_count': event_count,
            'commit_hash': self.my_commit_hash,
            'runid': self.runid,
            'runtime_script': False,
            "failures": 0,
            "successes": 0,
            "expected": 0
        }

        runtime_script = await self.maybe_start_runtime_script(
            workload, self.test_result_dir_path)

        self.logger.info("Test started")
        for thread in threads:
            thread.start()

        if runtime_script:
            _, _ = await runtime_script.communicate()

            if runtime_script.returncode > 0:
                raise Exception("Runtime script failed")
            else:
                self.logger.info("Runtime script completed successfully")
                test_metadata['runtime_script'] = True

        for thread in threads:
            thread.join()

        # Save post-benchmark stats to metadata
        test_metadata["failures"] = self.invocation_failure_tally
        test_metadata["successes"] = self.invocation_success_tally
        test_metadata["expected"] = self.invocation_expected_tally

        self.write_test_metadata(test_metadata, self.test_result_dir_path)

        self.logger.info("Test ended")

        return test_metadata

    def invoke_benchmark(self) -> InvocationMetadata:
        return asyncio.run(self.invoke_benchmark_async())
Exemplo n.º 11
0
    async def invoke_benchmark_async(self) -> InvocationMetadata:
        """
       The main function.
       """

        workload = self.workload
        #self.logger.info("Workload Invoker started")
        #print("Log file -> ../profiler_results/logs/SWI.log")

        # log_path = os.path.join(
        #                               util.ensure_directory_exists(
        #                                  os.path.join(self.test_result_dir_path, 'log')),
        #                               'SWI.log')
        self.logger = ScriptLogger('workload_invoker', "SWI.log")

        (all_events, event_count) = generic_event_generator(workload)

        threads = []

        for (instance, instance_times) in all_events.items():
            action = workload['instances'][instance]['application']
            if action == "long_run":
                print("Invoking long_run")
            try:
                param_file = os.path.join(
                    FAAS_ROOT, workload['instances'][instance]['param_file'])
            except:
                param_file = None

            try:
                query_file = os.path.join(
                    FAAS_ROOT, workload['instances'][instance]['query_string'])
            except:
                query_file = None

            blocking_cli = workload['blocking_cli']
            if 'data_file' in workload['instances'][instance].keys():
                data_file = workload['instances'][instance]['data_file']
                threads.append(
                    threading.Thread(
                        target=self.binary_data_http_instance_generator,
                        args=[action, instance_times, blocking_cli,
                              data_file]))
            else:
                threads.append(
                    threading.Thread(target=self.http_instance_generator,
                                     args=[
                                         action, instance_times, blocking_cli,
                                         query_file, param_file
                                     ]))

        # Dump Test Metadata
        test_metadata: InvocationMetadata = {
            'start_time': math.ceil(time.time() * 1000),
            'workload_name': self.workload["test_name"],
            'test_config': self.config_json,
            'event_count': event_count,
            'commit_hash': self.my_commit_hash,
            'runid': self.runid,
            'runtime_script': False,
            "failures": 0,
            "successes": 0,
            "expected": 0
        }

        runtime_script = await self.maybe_start_runtime_script(
            workload, self.test_result_dir_path)

        self.logger.info("Test started")
        for thread in threads:
            thread.start()

        if runtime_script:
            _, _ = await runtime_script.communicate()

            if runtime_script.returncode > 0:
                raise Exception("Runtime script failed")
            else:
                self.logger.info("Runtime script completed successfully")
                test_metadata['runtime_script'] = True

        for thread in threads:
            thread.join()

        # Save post-benchmark stats to metadata
        test_metadata["failures"] = self.invocation_failure_tally
        test_metadata["successes"] = self.invocation_success_tally
        test_metadata["expected"] = self.invocation_expected_tally

        self.write_test_metadata(test_metadata, self.test_result_dir_path)

        self.logger.info("Test ended")

        return test_metadata