コード例 #1
0
ファイル: profiler.py プロジェクト: CN-UPB/son-cli
    def __init__(self, input_msd_path, output_msd_path, input_commands, configuration_commands, **kwargs):

        # Grafana dashboard title
        defaults = {
            'title':'son-profile',
            'timeout':20,
            'overload_vnf_list':[]
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')
        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_msd = msd(input_msd_path, self.emu, title=self.title)
        self.input_metrics = self.input_msd.get_metrics_list()
        LOG.info('input metrics:{0}'.format(self.input_metrics))

        self.output_msd = msd(output_msd_path, self.emu, title=self.title)
        self.output_metrics = self.output_msd.get_metrics_list()
        LOG.info('output metrics:{0}'.format(self.output_metrics))


        # each list item is a dict with {vnf_name:"cmd_to_execute", ..}
        self.input_commands = input_commands
        LOG.info('input commands:{0}'.format(self.input_commands))

        # the configuration commands that need to be executed before the load starts
        self.configuration_commands = configuration_commands
        LOG.info("configuration commands:{0}".format(self.configuration_commands))


        self.timeout = int(defaults.get('timeout'))

        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        self.input_msd.start()
        self.output_msd.start(overwrite=False)

        overload_vnf_list = defaults.get('overload_vnf_list')
        self.overload_monitor = Overload_Monitor(vnf_list=overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profling_loop)

        # list of dict for profiling results
        self.profiling_results = list()

        # the number of the current profiling run
        self.run_number = 0

        # display option
        self.no_display = defaults.get('no_display', False)
コード例 #2
0
 def EMU_command(self, args):
     command = args.command
     EMU_class = Emu(SON_EMU_API,
                     ip=SON_EMU_IP,
                     vm=SON_EMU_IN_VM,
                     user=SON_EMU_USER,
                     password=SON_EMU_PASSW)
     # call the EMU class method with the same name as the command arg
     args = vars(args)
     args['monitor'] = self
     ret = getattr(EMU_class, command)(**args)
     logging.debug("cmd: {0} \nreturn: {1}".format(command, ret))
     pp.pprint(ret)
     return 'end of EMU command: {}'.format(command)
コード例 #3
0
ファイル: profiler.py プロジェクト: stevenvanrossem/son-cli
    def __init__(self, input_msd_path, output_msd_path, experiment, **kwargs):

        self.configuration_space = experiment.configuration_space_list
        self.pre_config_commands = experiment.pre_configuration
        self.overload_vnf_list = experiment.overload_vnf_list
        self.timeout = int(experiment.time_limit)
        self.experiment_name = experiment.name
        self.experiment = experiment

        # Grafana dashboard title
        defaults = {
            'title':'son-profile',
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')

        # results file
        self.results_file = defaults.get('results_file', RESULT_FILE)
        # graph only option
        self.graph_only = defaults.get('graph_only', False)
        # display option
        self.no_display = defaults.get('no_display', False)

        # generate profiling results
        self.profiling_results = list()
        self.profile_calc = ProfileCalculator(self.experiment)

        # only display graph of previous profile run
        if self.graph_only:
            self.profile_calc.display_graph(file=self.results_file)
            return
        elif not self.no_display:
            self.profile_calc.start_plot()
            self.profile_calc.enable_updating.set()

        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_metrics = []
        self.input_msd = None
        if input_msd_path:
            self.input_msd = msd(input_msd_path, self.emu, title=self.title)
            self.input_metrics = self.input_msd.get_metrics_list()
            LOG.info('input metrics:{0}'.format(self.input_metrics))
        self.output_metrics = []
        self.output_msd = None
        if output_msd_path:
            self.output_msd = msd(output_msd_path, self.emu, title=self.title)
            self.output_metrics = self.output_msd.get_metrics_list()
            LOG.debug('output metrics:{0}'.format(self.output_metrics))

        # the configuration commands that needs to be executed before the load starts
        LOG.info("configuration commands:{0}".format(self.pre_config_commands))

        # time the test that is running
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # the resource configuration of the current experiment
        self.resource_configuration = defaultdict(dict)
        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        overwrite = True
        if input_msd_path:
            self.input_msd.start(overwrite=overwrite)
            overwrite = False
        if output_msd_path:
            self.output_msd.start(overwrite=overwrite)

        LOG.info('overload_vnf_list: {0}'.format(self.overload_vnf_list))
        self.overload_monitor = Overload_Monitor(vnf_list=self.overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profiling_loop)

        # the number of the current profiling run
        self.run_number = 1
コード例 #4
0
ファイル: profiler.py プロジェクト: stevenvanrossem/son-cli
class Emu_Profiler():

    def __init__(self, input_msd_path, output_msd_path, experiment, **kwargs):

        self.configuration_space = experiment.configuration_space_list
        self.pre_config_commands = experiment.pre_configuration
        self.overload_vnf_list = experiment.overload_vnf_list
        self.timeout = int(experiment.time_limit)
        self.experiment_name = experiment.name
        self.experiment = experiment

        # Grafana dashboard title
        defaults = {
            'title':'son-profile',
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')

        # results file
        self.results_file = defaults.get('results_file', RESULT_FILE)
        # graph only option
        self.graph_only = defaults.get('graph_only', False)
        # display option
        self.no_display = defaults.get('no_display', False)

        # generate profiling results
        self.profiling_results = list()
        self.profile_calc = ProfileCalculator(self.experiment)

        # only display graph of previous profile run
        if self.graph_only:
            self.profile_calc.display_graph(file=self.results_file)
            return
        elif not self.no_display:
            self.profile_calc.start_plot()
            self.profile_calc.enable_updating.set()

        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_metrics = []
        self.input_msd = None
        if input_msd_path:
            self.input_msd = msd(input_msd_path, self.emu, title=self.title)
            self.input_metrics = self.input_msd.get_metrics_list()
            LOG.info('input metrics:{0}'.format(self.input_metrics))
        self.output_metrics = []
        self.output_msd = None
        if output_msd_path:
            self.output_msd = msd(output_msd_path, self.emu, title=self.title)
            self.output_metrics = self.output_msd.get_metrics_list()
            LOG.debug('output metrics:{0}'.format(self.output_metrics))

        # the configuration commands that needs to be executed before the load starts
        LOG.info("configuration commands:{0}".format(self.pre_config_commands))

        # time the test that is running
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # the resource configuration of the current experiment
        self.resource_configuration = defaultdict(dict)
        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        overwrite = True
        if input_msd_path:
            self.input_msd.start(overwrite=overwrite)
            overwrite = False
        if output_msd_path:
            self.output_msd.start(overwrite=overwrite)

        LOG.info('overload_vnf_list: {0}'.format(self.overload_vnf_list))
        self.overload_monitor = Overload_Monitor(vnf_list=self.overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profiling_loop)

        # the number of the current profiling run
        self.run_number = 1




    def start_experiment(self):
        if self.graph_only:
            return

         # start pre-configuration commands
        for vnf_name, cmd_list in self.pre_config_commands.items():
            for cmd in cmd_list:
                self.emu.exec(vnf_name=vnf_name, cmd=cmd)

        # start overload detection
        #if len(self.overload_vnf_list) > 0 :
        #self.overload_monitor.start(self.emu)

        # start the profling loop
        self.profiling_thread.start()

        # if self.no_display == False:
        #     # nicely print values
        #     rows, columns = os.popen('stty size', 'r').read().split()
        #     # Set the Terminal window size larger than its default
        #     # to make sure the profiling results are fitting
        #     if int(rows) < 40 or int(columns) < 130:
        #         sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=40, cols=130))
        #     # print something to reset terminal
        #     print("")
        #     n = os.system("clear")
        #     # Add a delay to allow settings to settle...
        #     time.sleep(1)
        #     curses.wrapper(self.display_loop)
        # else:
        #     # wait for profiling thread to end
        #     self.profiling_thread.join()

        # wait for profiling thread to end
        self.profiling_thread.join()

        # stop overload detection
        self.overload_monitor.stop(self.emu)

        # write results to file
        self.write_results_to_file(self.results_file)

        #finalize the calculation of the performance profile
        if not self.no_display:
            self.profile_calc.finalize_graph()


    def profiling_loop(self):

        # start with empty results
        self.profiling_results.clear()
        self.run_number = 1

        # one cmd_dict per profile run
        for experiment in self.configuration_space:

            # parse the experiment's parameters
            resource_dict = defaultdict(dict)
            cmd_dict = {}
            vnf_name2order = dict()
            for key, value in experiment.items():
                array = key.split(':')
                if len(array) < 3:
                    continue
                type, vnf_name, param = array
                if type == 'measurement_point' and param == 'cmd':
                    cmd_dict[vnf_name] = value
                elif type == 'measurement_point' and param == 'cmd_order':
                    vnf_name2order[vnf_name] = int(value)
                elif type == 'resource_limitation':
                    resource_dict[vnf_name][param] = value

            self.resource_configuration = resource_dict
            LOG.info("resource config: {0}".format(resource_dict))


            # create ordered list of vnf_names, so the commands are always executed in a defined order
            vnforder_dict = OrderedDict(sorted(vnf_name2order.items(), key=operator.itemgetter(1)))
            vnforder_list = [vnf_name for vnf_name, order in vnforder_dict.items()]
            # also get the vnfs which do not have an cmd_order specified, and add them to the list
            leftover_vnfs = [vnf_name for vnf_name in cmd_dict if vnf_name not in vnforder_list]
            vnforder_list = vnforder_list + leftover_vnfs
            LOG.debug("vnf order:{0}".format(vnforder_list))

            # allocate the specified resources
            self.set_resources()

            # reset metrics
            for metric in self.input_metrics + self.output_metrics:
                metric.reset()

            LOG.info("All vnf commands: {0}".format(cmd_dict))
            # start the load
            for vnf_name in vnforder_list:
                cmd = cmd_dict.get(vnf_name)
                LOG.info("vnf command: {0}: {1}".format(vnf_name, cmd))
                self.emu.exec(vnf_name=vnf_name, cmd=cmd)

            # let the load stabilize and metrics get loaded in Prometheus
            time.sleep(2)
            # reset the overload monitor
            self.overload_monitor.reset()

            # monitor the metrics
            start_time = time.time()
            LOG.info('waiting {} seconds while gathering metrics...'.format(self.timeout))

            input_metrics = []
            output_metrics = []
            while((time.time()-start_time) < self.timeout):
                # add the new metric values to the list
                input_metrics = self.query_metrics(self.input_metrics)
                output_metrics = self.query_metrics(self.output_metrics)
                time.sleep(1)
                if self.overload.is_set():
                    LOG.info('overload detected')

            # all metrics gathered, do final calculations (average, CI)
            for metric in input_metrics + output_metrics:
                metric.doCalc()

            # stop the load
            LOG.info('end of experiment: {0} - run{1}/{2}'.format(self.experiment_name, self.run_number, len(self.configuration_space)))
            for vnf_name, cmd in cmd_dict.items():
                self.emu.exec(vnf_name=vnf_name, cmd=cmd, action='stop')

            # add the result of this profiling run to the results list
            profiling_result = dict(
                resource_alloc=(self.resource_configuration),
                metrics=input_metrics + output_metrics,
                name=self.experiment_name,
                run=self.run_number,
                total=len(self.configuration_space)
            )
            #result = self.filter_profile_results(profiling_result)
            self.profiling_results.append(deepcopy(profiling_result))
            # update the plot
            if not self.no_display:
                self.profile_calc.update_results(deepcopy(profiling_result))

            self.run_number += 1

        LOG.info('end of experiment: {}'.format(self.experiment_name))


    def display_loop(self, stdscr):
        # while profiling loop is running, display the metrics on the CLI
        # Clear screen
        stdscr.clear()
        # screen = curses.initscr()

        maxy, maxx = stdscr.getmaxyx()


        log_height = 10
        log_begin_y = maxy - log_height
        width = maxx
        logwin = curses.newwin(log_height, width, log_begin_y, 0)
        logwin.scrollok(True)

        height = maxy - log_height
        width = maxx
        # take large window to hold results
        resultwin = curses.newpad(height, 10000)
        resultwin.scrollok(True)

        # curses.setsyx(-1, -1)
        # win.setscrreg(begin_y, begin_y+height)
        # win.idlok(True)
        # win.leaveok(True)

        # LOG.removeHandler(logging.StreamHandler())
        LOG.propagate = False
        logging.getLogger('son_emu_lib').propagate=False
        LOG.addHandler(CursesHandler(logwin))

        stdscr.clear()

        i = 0
        resultwin.addstr(i, 0, "---- Run: {2}/{3}  -----  Timer: {0} secs ----".format(
            0, self.timeout, self.run_number, len(self.configuration_space)))
        i += 1
        resultwin.addstr(i, 0, "------------ resource allocation ------------")
        i += 1
        len_resources = 0
        for vnf_name, resource_dict in self.resource_configuration.items():
            for resource in resource_dict:
                resultwin.addstr(len_resources+i, 0, "{0}".format(resource))
                len_resources += 1

        i += 2 + len_resources
        resultwin.addstr(i, 0, "------------ input metrics ------------")
        i += 1
        for metric in self.input_metrics:
            resultwin.addstr(i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        i += 2
        resultwin.addstr(i, 0 , "------------ output metrics ------------")
        i += 1
        for metric in self.output_metrics:
            resultwin.addstr(i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx-1)

        time_counter = 0
        while self.profiling_thread.isAlive():
            resultwin.addstr(0, 0, "---- Run: {2}/{3}  ----- Timer: {0} secs ----".format(
                time_counter, self.timeout, self.run_number, len(self.configuration_space)))
            i = 2
            for vnf_name, resource_dict in self.resource_configuration.items():
                for resource, value in resource_dict.items():
                    resultwin.addstr(i, 50, "{0}".format(value))
                    i += 1

            # start from length of resource parameters
            i += 3
            for metric in self.input_metrics:
                resultwin.addstr(i, 50, "{0:.2f}".format(metric.last_value))
                i += 1

            i += 3
            for metric in self.output_metrics:
                resultwin.addstr(i, 50, "{0:.2f}".format(metric.last_value))
                i += 1

            maxy, maxx = stdscr.getmaxyx()
            resultwin.refresh(0, 0, 0, 0, height, maxx-1)
            time.sleep(1)
            time_counter += 1

        # print the final result
        result_number = 1
        for result in self.profiling_results:

            # start from length of resource parameters
            i = len_resources + 5
            for metric in result['input_metrics']:
                resultwin.addstr(i, 40 * result_number,
                              "{0:.2f} ({1:.2f},{2:.2f})".format(metric['average'], metric['CI_low'], metric['CI_high']))
                i += 1

            i += 3
            for metric in result['output_metrics']:
                resultwin.addstr(i, 40 * result_number,
                              "{0:.2f} ({1:.2f},{2:.2f})".format(metric['average'], metric['CI_low'], metric['CI_high']))
                i += 1

            result_number += 1



        #wait for input keypress
        resultwin.addstr(i + 1, 0, "press a key to close this window...")
        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
        #stdscr.refresh()
        resultwin.getkey()
        LOG.removeHandler(CursesHandler(logwin))
        LOG.propagate = True
        logging.getLogger('son_emu_lib').propagate = True
        # curses.endwin()
        # LOG.addHandler(logging.StreamHandler())
        # wait until curses is finished
        # while not curses.isendwin():
        #    time.sleep(0.5)

    def write_results_to_file(self, file_name):
        write_yaml(file_name, self.profiling_results)

    def filter_profile_results(self, profile_result):
        result = dict()
        result['name'] = profile_result['name'] + str(profile_result['run'])
        result['input_metrics'] = []
        result['output_metrics'] = []
        result['resource_alloc'] = dict()
        for metric in profile_result['input_metrics']:
            metric_dict = dict(
                name = metric.metric_name,
                type = metric.metric_type,
                unit = metric.unit,
                average = metric.average,
                desc = metric.desc,
                CI_low = float(metric.CI[0]),
                CI_high = float(metric.CI[1]),
            )
            result['input_metrics'].append(metric_dict)

        for metric in profile_result['output_metrics']:
            metric_dict = dict(
                name=metric.metric_name,
                type=metric.metric_type,
                unit=metric.unit,
                average=metric.average,
                desc=metric.desc,
                CI_low=float(metric.CI[0]),
                CI_high=float(metric.CI[1]),
            )
            result['output_metrics'].append(metric_dict)

        # avoid copying empty dicts into the results
        for vnf_name in profile_result['resource_alloc']:
            if len(profile_result['resource_alloc'][vnf_name]) > 0:
                result['resource_alloc'][vnf_name] = profile_result['resource_alloc'][vnf_name]

        return result

    def stop_experiment(self):
        self.input_msd.stop()
        self.output_msd.stop()

    def query_metrics(self, metrics):
        # fill the values of the metrics
        for metric in metrics:
            query = metric.query
            try:
                ret = query_Prometheus(query)
                metric.addValue(float(ret[1]))
                LOG.debug('metric: {0}={1}'.format(metric.metric_name, float(ret[1])))
            except:
                 LOG.info('Prometheus query failed: {0} \nquery: {1} \nerror:{2}'.format(ret, query, sys.exc_info()[0]))
                 continue
            #metric_name = metric.metric_name
            #metric_unit = metric.unit
            #LOG.info("metric query: {1} {0} {2}".format(metric.value, metric_name, metric_unit))
        return metrics

    def set_resources(self):
        """
        Allocate the specified resources
        :param resource_dict:
        {"vnf_name1" : {"param1":value,...},
         "vnf_name2" : {"param1":value,...},
         ...
        }
        :return:
        """
        res = copy.deepcopy(self.resource_configuration)
        for vnf_name in res:
            resource_config = res[vnf_name]
            self.emu.update_vnf_resources(vnf_name, resource_config)
コード例 #5
0
    def __init__(self, input_msd_path, output_msd_path, input_commands,
                 configuration_commands, **kwargs):

        # Grafana dashboard title
        defaults = {
            'title': 'son-profile',
            'timeout': 20,
            'overload_vnf_list': [],
            'resource_configuration': kwargs.get('resource_configuration',
                                                 [{}]),
            'vnforder_list': kwargs.get('vnforder_list', {})
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')
        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_msd = msd(input_msd_path, self.emu, title=self.title)
        self.input_metrics = self.input_msd.get_metrics_list()
        LOG.info('input metrics:{0}'.format(self.input_metrics))

        self.output_msd = msd(output_msd_path, self.emu, title=self.title)
        self.output_metrics = self.output_msd.get_metrics_list()
        LOG.info('output metrics:{0}'.format(self.output_metrics))

        # each list item is a dict with {vnf_name:"cmd_to_execute", ..}
        self.input_commands = input_commands
        LOG.info('input commands:{0}'.format(self.input_commands))

        # the configuration commands that needs to be executed before the load starts
        self.configuration_commands = configuration_commands
        LOG.info("configuration commands:{0}".format(
            self.configuration_commands))
        # the order in which the vnf_commands need to be executed
        self.vnforder_list = defaults.get('vnforder_list')
        LOG.info("vnf order:{0}".format(self.vnforder_list))

        # the resource configuration that needs to be allocated before the load starts
        self.resource_configuration = defaults.get('resource_configuration')
        LOG.info("resource  configuration:{0}".format(
            self.resource_configuration))

        self.timeout = int(defaults.get('timeout'))
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        self.input_msd.start()
        self.output_msd.start(overwrite=False)

        overload_vnf_list = defaults.get('overload_vnf_list')
        self.overload_monitor = Overload_Monitor(vnf_list=overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profling_loop)

        # list of dict for profiling results
        self.profiling_results = list()

        # the number of the current profiling run
        self.run_number = 0

        # display option
        self.no_display = defaults.get('no_display', False)
コード例 #6
0
class Emu_Profiler():
    def __init__(self, input_msd_path, output_msd_path, input_commands,
                 configuration_commands, **kwargs):

        # Grafana dashboard title
        defaults = {
            'title': 'son-profile',
            'timeout': 20,
            'overload_vnf_list': [],
            'resource_configuration': kwargs.get('resource_configuration',
                                                 [{}]),
            'vnforder_list': kwargs.get('vnforder_list', {})
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')
        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_msd = msd(input_msd_path, self.emu, title=self.title)
        self.input_metrics = self.input_msd.get_metrics_list()
        LOG.info('input metrics:{0}'.format(self.input_metrics))

        self.output_msd = msd(output_msd_path, self.emu, title=self.title)
        self.output_metrics = self.output_msd.get_metrics_list()
        LOG.info('output metrics:{0}'.format(self.output_metrics))

        # each list item is a dict with {vnf_name:"cmd_to_execute", ..}
        self.input_commands = input_commands
        LOG.info('input commands:{0}'.format(self.input_commands))

        # the configuration commands that needs to be executed before the load starts
        self.configuration_commands = configuration_commands
        LOG.info("configuration commands:{0}".format(
            self.configuration_commands))
        # the order in which the vnf_commands need to be executed
        self.vnforder_list = defaults.get('vnforder_list')
        LOG.info("vnf order:{0}".format(self.vnforder_list))

        # the resource configuration that needs to be allocated before the load starts
        self.resource_configuration = defaults.get('resource_configuration')
        LOG.info("resource  configuration:{0}".format(
            self.resource_configuration))

        self.timeout = int(defaults.get('timeout'))
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        self.input_msd.start()
        self.output_msd.start(overwrite=False)

        overload_vnf_list = defaults.get('overload_vnf_list')
        self.overload_monitor = Overload_Monitor(vnf_list=overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profling_loop)

        # list of dict for profiling results
        self.profiling_results = list()

        # the number of the current profiling run
        self.run_number = 0

        # display option
        self.no_display = defaults.get('no_display', False)

    def start_experiment(self):
        # start configuration commands
        for vnf_name, cmd_list in self.configuration_commands.items():
            for cmd in cmd_list:
                self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd)

        # start overload detection
        self.overload_monitor.start(self.emu)

        # start the profling loop
        self.profiling_thread.start()

        if self.no_display == False:
            # nicely print values
            rows, columns = os.popen('stty size', 'r').read().split()
            # Set the Terminal window size larger than its default
            # to make sure the profiling results are fitting
            if int(rows) < 40 or int(columns) < 130:
                sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=40,
                                                                cols=130))
            # print something to reset terminal
            print("")
            n = os.system("clear")
            # Add a delay to allow settings to settle...
            time.sleep(1)
            curses.wrapper(self.display_loop)
        else:
            # wait for profiling thread to end
            self.profiling_thread.join()

        # stop overload detection
        self.overload_monitor.stop(self.emu)

        # write results to file
        self.write_results_to_file("test_results.yml")

    def profling_loop(self):

        # start with empty results
        self.profiling_results.clear()
        self.run_number = 1

        # one cmd_dict per profile run
        for cmd_dict in self.input_commands:
            # configure all resource settings for every input command
            for resource_dict in self.resource_configuration:

                # reset metrics
                for metric in self.input_metrics + self.output_metrics:
                    metric.reset()

                self.set_resources(resource_dict)

                # start the load
                for vnf_name in self.vnforder_list:
                    cmd = cmd_dict[vnf_name]
                    self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd)
                #for vnf_name, cmd in cmd_dict:
                #    self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd, ensure=True)

                # let the load stabilize
                time.sleep(1)
                # reset the overload monitor
                self.overload_monitor.reset()

                # monitor the metrics
                start_time = time.time()

                while ((time.time() - start_time) < self.timeout):
                    # add the new metric values to the list
                    input_metrics = self.query_metrics(self.input_metrics)
                    output_metrics = self.query_metrics(self.output_metrics)
                    time.sleep(1)
                    if self.overload.is_set():
                        LOG.info('overload detected')

                # stop the load
                for vnf_name, cmd in cmd_dict.items():
                    self.emu.docker_exec(vnf_name=vnf_name,
                                         cmd=cmd,
                                         action='stop')

                # add the result of this profiling run to the results list
                profiling_result = dict(
                    resource_alloc=copy.deepcopy(resource_dict),
                    input_metrics=copy.deepcopy(input_metrics),
                    output_metrics=copy.deepcopy(output_metrics))
                self.profiling_results.append(profiling_result)
                self.run_number += 1

    def display_loop(self, stdscr):
        # while profiling loop is running, display the metrics
        # Clear screen
        stdscr.clear()
        # screen = curses.initscr()

        maxy, maxx = stdscr.getmaxyx()

        log_height = 10
        log_begin_y = maxy - log_height
        width = maxx
        logwin = curses.newwin(log_height, width, log_begin_y, 0)
        logwin.scrollok(True)

        height = maxy - log_height
        width = maxx
        # take large window to hold results
        resultwin = curses.newpad(height, 10000)
        resultwin.scrollok(True)

        # curses.setsyx(-1, -1)
        # win.setscrreg(begin_y, begin_y+height)
        # win.idlok(True)
        # win.leaveok(True)

        # LOG.removeHandler(logging.StreamHandler())
        LOG.propagate = False
        logging.getLogger('son_emu_lib').propagate = False
        LOG.addHandler(CursesHandler(logwin))

        stdscr.clear()

        resultwin.addstr(0, 0, "------------ resource allocation ------------")
        i = 1
        for resource, value in self.resource_configuration[self.run_number -
                                                           1].items():
            resultwin.addstr(i, 0, "{0}".format(resource))
            i += 1

        i += 2
        resultwin.addstr(i, 0, "------------ input metrics ------------")
        i += 1
        for metric in self.input_metrics:
            resultwin.addstr(
                i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        i += 2
        resultwin.addstr(i, 0, "------------ output metrics ------------")
        i += 1
        for metric in self.output_metrics:
            resultwin.addstr(
                i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)

        while self.profiling_thread.isAlive():

            i = 1
            resource_configs = len(self.resource_configuration)
            for resource, value in self.resource_configuration[
                (self.run_number - 1) % resource_configs].items():
                resultwin.addstr(i, 40 * self.run_number, "{0}".format(value))
                i += 1

            # start from length of resource parameters
            i += 3
            for metric in self.input_metrics:
                resultwin.addstr(i, 40 * self.run_number,
                                 "{0:.2f}".format(metric.last_value))
                i += 1

            i += 3
            for metric in self.output_metrics:
                resultwin.addstr(i, 40 * self.run_number,
                                 "{0:.2f}".format(metric.last_value))
                i += 1

            # print the final result
            result_number = 1
            for result in self.profiling_results:

                # start from length of resource parameters
                i = len(self.resource_configuration[0]) + 4
                for metric in result['input_metrics']:
                    resultwin.addstr(
                        i, 40 * result_number,
                        "{0:.2f} ({1:.2f},{2:.2f})".format(
                            metric.average, metric.CI[0], metric.CI[1]))
                    i += 1

                i += 3
                for metric in result['output_metrics']:
                    resultwin.addstr(
                        i, 40 * result_number,
                        "{0:.2f} ({1:.2f},{2:.2f})".format(
                            metric.average, metric.CI[0], metric.CI[1]))
                    i += 1

                result_number += 1

            maxy, maxx = stdscr.getmaxyx()
            resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
            time.sleep(1)

        # print the final result
        result_number = 1
        for result in self.profiling_results:

            # start from length of resource parameters
            i = len(self.resource_configuration[0]) + 4
            for metric in result['input_metrics']:
                resultwin.addstr(
                    i, 40 * result_number,
                    "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average,
                                                       metric.CI[0],
                                                       metric.CI[1]))
                i += 1

            i += 3
            for metric in result['output_metrics']:
                resultwin.addstr(
                    i, 40 * result_number,
                    "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average,
                                                       metric.CI[0],
                                                       metric.CI[1]))
                i += 1

            result_number += 1

        #wait for input keypress
        resultwin.addstr(i + 1, 0, "press a key to close this window...")
        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
        #stdscr.refresh()
        resultwin.getkey()
        LOG.removeHandler(CursesHandler(logwin))
        LOG.propagate = True
        logging.getLogger('son_emu_lib').propagate = True
        # curses.endwin()
        # LOG.addHandler(logging.StreamHandler())
        # wait until curses is finished
        # while not curses.isendwin():
        #    time.sleep(0.5)

    def write_results_to_file(self, file_name):
        write_yaml(file_name, self.profiling_results)

    def stop_experiment(self):
        self.input_msd.stop()
        self.output_msd.stop()

    def query_metrics(self, metrics):
        # fill the values of the metrics
        for metric in metrics:
            query = metric.query
            try:
                ret = query_Prometheus(query)
                metric.addValue(float(ret[1]))
            except:
                LOG.info(
                    'Prometheus query failed: {0} \nquery: {1} \nerror:{2}'.
                    format(ret, query,
                           sys.exc_info()[0]))
                continue
            #metric_name = metric.metric_name
            #metric_unit = metric.unit
            #LOG.info("metric query: {1} {0} {2}".format(metric.value, metric_name, metric_unit))
        return metrics

    def set_resources(self, resource_dict):
        """
        Allocate the specified resources
        :param resource_dict:
        {"function1:parameter1" : 0.1,
         "functionN:parameterN" : 0.2,
        }
        :return:
        """

        if len(resource_dict) == 0:
            return

        # group resources per vnf
        def get_vnfname(key):
            return key.split(':')[0]

        resource_dict_grouped = {}
        for vnf_name, param_list in groupby(resource_dict, get_vnfname):
            if not resource_dict_grouped.get(vnf_name):
                resource_dict_grouped[vnf_name] = []
            resource_dict_grouped[vnf_name] += list(param_list)

        # execute resource allocation of all parameters per vnf
        for vnf_name in resource_dict_grouped:
            new_dict = {}
            for param in resource_dict_grouped[vnf_name]:
                if ':' not in param: continue
                new_dict[param.split(':')[1]] = resource_dict[param]
            if len(new_dict) > 0:
                LOG.debug('vnf {1} set resources: {0}'.format(
                    new_dict, vnf_name))
                self.emu.update_vnf_resources(vnf_name, new_dict)
コード例 #7
0
SON_EMU_REST_API_PORT = 5001
SON_EMU_API = "http://{0}:{1}".format(SON_EMU_IP, SON_EMU_REST_API_PORT)
# API when docker network=host in son-cli, or when not started as container
#SON_EMU_API = "http://localhost:5001"
# API when started with docker-compose
#SON_EMU_API = "http://son-emu:5001"

# specify if son-emu is runnign in a seperate VM that has ssh login
SON_EMU_IN_VM = False
SON_EMU_USER = '******'  # 'vagrant'
SONE_EMU_PASSW = 'test'  # 'vagrant'

# initalize the vims accessible from the SDK
emu = Emu(SON_EMU_API,
          ip=SON_EMU_IP,
          vm=SON_EMU_IN_VM,
          user=SON_EMU_USER,
          password=SONE_EMU_PASSW)

# tmp directories that will be mounted in the Prometheus and Grafana Docker containers by son-emu
tmp_dir = '/tmp/son-monitor'
docker_dir = '/tmp/son-monitor/docker'
prometheus_dir = '/tmp/son-monitor/prometheus'
grafana_dir = '/tmp/son-monitor/grafana'


class sonmonitor():
    def __init__(self):

        for dir in [docker_dir, prometheus_dir, grafana_dir]:
            if not os.path.exists(dir):
コード例 #8
0
ファイル: profiler.py プロジェクト: CN-UPB/son-cli
class Emu_Profiler():

    def __init__(self, input_msd_path, output_msd_path, input_commands, configuration_commands, **kwargs):

        # Grafana dashboard title
        defaults = {
            'title':'son-profile',
            'timeout':20,
            'overload_vnf_list':[]
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')
        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_msd = msd(input_msd_path, self.emu, title=self.title)
        self.input_metrics = self.input_msd.get_metrics_list()
        LOG.info('input metrics:{0}'.format(self.input_metrics))

        self.output_msd = msd(output_msd_path, self.emu, title=self.title)
        self.output_metrics = self.output_msd.get_metrics_list()
        LOG.info('output metrics:{0}'.format(self.output_metrics))


        # each list item is a dict with {vnf_name:"cmd_to_execute", ..}
        self.input_commands = input_commands
        LOG.info('input commands:{0}'.format(self.input_commands))

        # the configuration commands that need to be executed before the load starts
        self.configuration_commands = configuration_commands
        LOG.info("configuration commands:{0}".format(self.configuration_commands))


        self.timeout = int(defaults.get('timeout'))

        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        self.input_msd.start()
        self.output_msd.start(overwrite=False)

        overload_vnf_list = defaults.get('overload_vnf_list')
        self.overload_monitor = Overload_Monitor(vnf_list=overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profling_loop)

        # list of dict for profiling results
        self.profiling_results = list()

        # the number of the current profiling run
        self.run_number = 0

        # display option
        self.no_display = defaults.get('no_display', False)

    def start_experiment(self):
        # start configuration commands
        for vnf_name, cmd_list in self.configuration_commands.items():
            for cmd in cmd_list:
                self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd)

        # start overload detection
        self.overload_monitor.start(self.emu)

        # start the profling loop
        self.profiling_thread.start()

        if self.no_display == False:
            # nicely print values
            rows, columns = os.popen('stty size', 'r').read().split()
            # Set the Terminal window size larger than its default
            # to make sure the profiling results are fitting
            if int(rows) < 40 or int(columns) < 120:
                sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=40, cols=120))
            # print something to reset terminal
            print("")
            n = os.system("clear")
            # Add a delay to allow settings to settle...
            time.sleep(1)
            curses.wrapper(self.display_loop)


        # stop overload detection
        self.overload_monitor.stop(self.emu)


    def profling_loop(self):

        # start with empty results
        self.profiling_results.clear()
        self.run_number = 1

        # one cmd_dict per profile run
        for cmd_dict in self.input_commands:
            # reset metrics
            for metric in self.input_metrics+self.output_metrics:
                metric.reset()

            # start the load
            for vnf_name, cmd in cmd_dict.items():
                self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd)

            # let the load stabilize
            time.sleep(2)
            # reset the overload monitor
            self.overload_monitor.reset()

            # monitor the metrics
            start_time = time.time()

            while((time.time()-start_time) < self.timeout):
                # add the new metric values to the list
                input_metrics = self.query_metrics(self.input_metrics)
                output_metrics = self.query_metrics(self.output_metrics)
                time.sleep(1)
                if self.overload.is_set():
                    LOG.info('overload detected')

            # stop the load
            for vnf_name, cmd in cmd_dict.items():
                self.emu.docker_exec(vnf_name=vnf_name, cmd=cmd, action='stop')

            # add the result of this profiling run to the results list
            profiling_result = dict(
                input_metrics=copy.deepcopy(input_metrics),
                output_metrics=copy.deepcopy(output_metrics)
            )
            self.profiling_results.append(profiling_result)
            self.run_number += 1


    def display_loop(self, stdscr):
        # while profiling loop is running, display the metrics
        # Clear screen
        stdscr.clear()
        # screen = curses.initscr()

        maxy, maxx = stdscr.getmaxyx()


        log_height = 10
        log_begin_y = maxy - log_height
        width = maxx
        logwin = curses.newwin(log_height, width, log_begin_y, 0)
        logwin.scrollok(True)

        height = maxy - log_height
        width = maxx
        # take large window to hold results
        resultwin = curses.newpad(height, 10000)
        resultwin.scrollok(True)

        # curses.setsyx(-1, -1)
        # win.setscrreg(begin_y, begin_y+height)
        # win.idlok(True)
        # win.leaveok(True)

        # LOG.removeHandler(logging.StreamHandler())
        LOG.addHandler(CursesHandler(logwin))

        stdscr.clear()

        resultwin.addstr(0, 0, "------------ input metrics ------------")
        i = 1
        for metric in self.input_metrics:
            resultwin.addstr(i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        resultwin.addstr(len(self.input_metrics) + i, 0 , "------------ output metrics ------------")
        i = len(self.input_metrics) + i + 1
        for metric in self.output_metrics:
            resultwin.addstr(i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx-1)

        while self.profiling_thread.isAlive():
            i = 1
            for metric in self.input_metrics:
                resultwin.addstr(i, 40*self.run_number, "{0:.2f}".format(metric.last_value))
                i += 1

            i = len(self.input_metrics) + i + 1
            for metric in self.output_metrics:
                resultwin.addstr(i, 40*self.run_number, "{0:.2f}".format(metric.last_value))
                i += 1

            # print the final result
            result_number = 1
            for result in self.profiling_results:

                i = 1
                for metric in result['input_metrics']:
                    resultwin.addstr(i, 40*result_number, "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average, metric.CI[0], metric.CI[1]))
                    i += 1

                i = len(self.input_metrics) + i + 1
                for metric in result['output_metrics']:
                    resultwin.addstr(i, 40*result_number, "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average, metric.CI[0], metric.CI[1]))
                    i += 1

                result_number += 1

            maxy, maxx = stdscr.getmaxyx()
            resultwin.refresh(0, 0, 0, 0, height, maxx-1)
            time.sleep(1)

        # print the final result
        result_number = 1
        for result in self.profiling_results:

            i = 1
            for metric in result['input_metrics']:
                resultwin.addstr(i, 40 * result_number,
                              "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average, metric.CI[0], metric.CI[1]))
                i += 1

            i = len(self.input_metrics) + i + 1
            for metric in result['output_metrics']:
                resultwin.addstr(i, 40 * result_number,
                              "{0:.2f} ({1:.2f},{2:.2f})".format(metric.average, metric.CI[0], metric.CI[1]))
                i += 1

            result_number += 1



        #wait for input keypress
        resultwin.addstr(i + 1, 0, "press a key to close this window...")
        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
        #stdscr.refresh()
        resultwin.getkey()
        LOG.removeHandler(CursesHandler(logwin))
        # curses.endwin()
        # LOG.addHandler(logging.StreamHandler())
        # wait until curses is finished
        # while not curses.isendwin():
        #    time.sleep(0.5)

    def stop_experiment(self):
        self.input_msd.stop()
        self.output_msd.stop()

    def query_metrics(self, metrics):
        # fill the values of the metrics
        for metric in metrics:
            query = metric.query
            try:
                ret = query_Prometheus(query)
                metric.addValue(float(ret[1]))
            except:
                 LOG.info('Prometheus query failed: {0} \nquery: {1} \nerror:{2}'.format(ret, query, sys.exc_info()[0]))
                 continue
            #metric_name = metric.metric_name
            #metric_unit = metric.unit
            #LOG.info("metric query: {1} {0} {2}".format(metric.value, metric_name, metric_unit))
        return metrics
コード例 #9
0
    def __init__(self, input_msd_path, output_msd_path, experiment, **kwargs):

        self.configuration_space = experiment.configuration_space_list
        self.pre_config_commands = experiment.pre_configuration
        self.overload_vnf_list = experiment.overload_vnf_list
        self.timeout = int(experiment.time_limit)
        self.experiment_name = experiment.name
        self.experiment = experiment

        # Grafana dashboard title
        defaults = {
            'title': 'son-profile',
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')

        # results file
        self.results_file = defaults.get('results_file', RESULT_FILE)
        # graph only option
        self.graph_only = defaults.get('graph_only', False)

        # generate profiling results
        self.profiling_results = list()
        self.profile_calc = ProfileCalculator(self.experiment)

        # only display graph of previous profile run
        if self.graph_only:
            self.profile_calc.display_graph(file=self.results_file)
            return
        else:
            self.profile_calc.start_plot()
            self.profile_calc.enable_updating.set()

        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_metrics = []
        self.input_msd = None
        if input_msd_path:
            self.input_msd = msd(input_msd_path, self.emu, title=self.title)
            self.input_metrics = self.input_msd.get_metrics_list()
            LOG.info('input metrics:{0}'.format(self.input_metrics))
        self.output_metrics = []
        self.output_msd = None
        if output_msd_path:
            self.output_msd = msd(output_msd_path, self.emu, title=self.title)
            self.output_metrics = self.output_msd.get_metrics_list()
            LOG.debug('output metrics:{0}'.format(self.output_metrics))

        # the configuration commands that needs to be executed before the load starts
        LOG.info("configuration commands:{0}".format(self.pre_config_commands))

        # time the test that is running
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # the resource configuration of the current experiment
        self.resource_configuration = defaultdict(dict)
        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        overwrite = True
        if input_msd_path:
            self.input_msd.start(overwrite=overwrite)
            overwrite = False
        if output_msd_path:
            self.output_msd.start(overwrite=overwrite)

        LOG.info('overload_vnf_list: {0}'.format(self.overload_vnf_list))
        self.overload_monitor = Overload_Monitor(
            vnf_list=self.overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profiling_loop)

        # the number of the current profiling run
        self.run_number = 1

        # display option
        self.no_display = defaults.get('no_display', False)
コード例 #10
0
class Emu_Profiler():
    def __init__(self, input_msd_path, output_msd_path, experiment, **kwargs):

        self.configuration_space = experiment.configuration_space_list
        self.pre_config_commands = experiment.pre_configuration
        self.overload_vnf_list = experiment.overload_vnf_list
        self.timeout = int(experiment.time_limit)
        self.experiment_name = experiment.name
        self.experiment = experiment

        # Grafana dashboard title
        defaults = {
            'title': 'son-profile',
        }
        defaults.update(kwargs)
        self.title = defaults.get('title')

        # results file
        self.results_file = defaults.get('results_file', RESULT_FILE)
        # graph only option
        self.graph_only = defaults.get('graph_only', False)

        # generate profiling results
        self.profiling_results = list()
        self.profile_calc = ProfileCalculator(self.experiment)

        # only display graph of previous profile run
        if self.graph_only:
            self.profile_calc.display_graph(file=self.results_file)
            return
        else:
            self.profile_calc.start_plot()
            self.profile_calc.enable_updating.set()

        #class to control son-emu (export/query metrics)
        self.emu = Emu(SON_EMU_API)
        # list of class Metric
        self.input_metrics = []
        self.input_msd = None
        if input_msd_path:
            self.input_msd = msd(input_msd_path, self.emu, title=self.title)
            self.input_metrics = self.input_msd.get_metrics_list()
            LOG.info('input metrics:{0}'.format(self.input_metrics))
        self.output_metrics = []
        self.output_msd = None
        if output_msd_path:
            self.output_msd = msd(output_msd_path, self.emu, title=self.title)
            self.output_metrics = self.output_msd.get_metrics_list()
            LOG.debug('output metrics:{0}'.format(self.output_metrics))

        # the configuration commands that needs to be executed before the load starts
        LOG.info("configuration commands:{0}".format(self.pre_config_commands))

        # time the test that is running
        if self.timeout < 11:
            LOG.warning("timeout should be > 10 to allow overload detection")

        # the resource configuration of the current experiment
        self.resource_configuration = defaultdict(dict)
        # check if prometheus is running
        sonmonitor.monitor.start_containers()

        # export msd's to Grafana
        overwrite = True
        if input_msd_path:
            self.input_msd.start(overwrite=overwrite)
            overwrite = False
        if output_msd_path:
            self.output_msd.start(overwrite=overwrite)

        LOG.info('overload_vnf_list: {0}'.format(self.overload_vnf_list))
        self.overload_monitor = Overload_Monitor(
            vnf_list=self.overload_vnf_list)
        # host overload flag
        self.overload = self.overload_monitor.overload_flag

        # profiling threaded function
        self.profiling_thread = threading.Thread(target=self.profiling_loop)

        # the number of the current profiling run
        self.run_number = 1

        # display option
        self.no_display = defaults.get('no_display', False)

    def start_experiment(self):
        if self.graph_only:
            return

        # start pre-configuration commands
        for vnf_name, cmd_list in self.pre_config_commands.items():
            for cmd in cmd_list:
                self.emu.exec(vnf_name=vnf_name, cmd=cmd)

        # start overload detection
        #if len(self.overload_vnf_list) > 0 :
        self.overload_monitor.start(self.emu)

        # start the profling loop
        self.profiling_thread.start()

        if self.no_display == False:
            # nicely print values
            rows, columns = os.popen('stty size', 'r').read().split()
            # Set the Terminal window size larger than its default
            # to make sure the profiling results are fitting
            if int(rows) < 40 or int(columns) < 130:
                sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=40,
                                                                cols=130))
            # print something to reset terminal
            print("")
            n = os.system("clear")
            # Add a delay to allow settings to settle...
            time.sleep(1)
            curses.wrapper(self.display_loop)
        else:
            # wait for profiling thread to end
            self.profiling_thread.join()

        # stop overload detection
        self.overload_monitor.stop(self.emu)

        # write results to file
        self.write_results_to_file(self.results_file)

        #finalize the calculation of the performance profile
        self.profile_calc.finalize_graph(show_final=self.no_display)

    def profiling_loop(self):

        # start with empty results
        self.profiling_results.clear()
        self.run_number = 1

        # one cmd_dict per profile run
        for experiment in self.configuration_space:

            # parse the experiment's parameters
            resource_dict = defaultdict(dict)
            cmd_dict = {}
            vnf_name2order = dict()
            for key, value in experiment.items():
                array = key.split(':')
                if len(array) < 3:
                    continue
                type, vnf_name, param = array
                if type == 'measurement_point' and param == 'cmd':
                    cmd_dict[vnf_name] = value
                elif type == 'measurement_point' and param == 'cmd_order':
                    vnf_name2order[vnf_name] = int(value)
                elif type == 'resource_limitation':
                    resource_dict[vnf_name][param] = value

            self.resource_configuration = resource_dict
            LOG.info("resource config: {0}".format(resource_dict))

            # create ordered list of vnf_names, so the commands are always executed in a defined order
            vnforder_dict = OrderedDict(
                sorted(vnf_name2order.items(), key=operator.itemgetter(1)))
            vnforder_list = [
                vnf_name for vnf_name, order in vnforder_dict.items()
            ]
            # also get the vnfs which do not have an cmd_order specified, and add them to the list
            leftover_vnfs = [
                vnf_name for vnf_name in cmd_dict
                if vnf_name not in vnforder_list
            ]
            vnforder_list = vnforder_list + leftover_vnfs
            LOG.debug("vnf order:{0}".format(vnforder_list))

            # allocate the specified resources
            self.set_resources()

            # reset metrics
            for metric in self.input_metrics + self.output_metrics:
                metric.reset()

            LOG.info("vnf commands: {0}".format(cmd_dict))
            # start the load
            for vnf_name in vnforder_list:
                cmd = cmd_dict.get(vnf_name)
                self.emu.exec(vnf_name=vnf_name, cmd=cmd)

            # let the load stabilize
            time.sleep(1)
            # reset the overload monitor
            self.overload_monitor.reset()

            # monitor the metrics
            start_time = time.time()
            LOG.info('waiting {} seconds while gathering metrics...'.format(
                self.timeout))

            while ((time.time() - start_time) < self.timeout):
                # add the new metric values to the list
                input_metrics = self.query_metrics(self.input_metrics)
                output_metrics = self.query_metrics(self.output_metrics)
                time.sleep(1)
                if self.overload.is_set():
                    LOG.info('overload detected')

            # stop the load
            LOG.info('end of experiment: {0} - run{1}/{2}'.format(
                self.experiment_name, self.run_number,
                len(self.configuration_space)))
            for vnf_name, cmd in cmd_dict.items():
                self.emu.exec(vnf_name=vnf_name, cmd=cmd, action='stop')

            # add the result of this profiling run to the results list
            profiling_result = dict(
                resource_alloc=(self.resource_configuration),
                input_metrics=(input_metrics),
                output_metrics=(output_metrics),
                name=self.experiment_name,
                run=self.run_number,
                total=len(self.configuration_space))
            result = self.filter_profile_results(profiling_result)
            self.profiling_results.append(result)
            # update the plot
            self.profile_calc.update_results(result)

            self.run_number += 1

        LOG.info('end of experiment: {}'.format(self.experiment_name))

    def display_loop(self, stdscr):
        # while profiling loop is running, display the metrics on the CLI
        # Clear screen
        stdscr.clear()
        # screen = curses.initscr()

        maxy, maxx = stdscr.getmaxyx()

        log_height = 10
        log_begin_y = maxy - log_height
        width = maxx
        logwin = curses.newwin(log_height, width, log_begin_y, 0)
        logwin.scrollok(True)

        height = maxy - log_height
        width = maxx
        # take large window to hold results
        resultwin = curses.newpad(height, 10000)
        resultwin.scrollok(True)

        # curses.setsyx(-1, -1)
        # win.setscrreg(begin_y, begin_y+height)
        # win.idlok(True)
        # win.leaveok(True)

        # LOG.removeHandler(logging.StreamHandler())
        LOG.propagate = False
        logging.getLogger('son_emu_lib').propagate = False
        LOG.addHandler(CursesHandler(logwin))

        stdscr.clear()

        i = 0
        resultwin.addstr(
            i, 0, "---- Run: {2}/{3}  -----  Timer: {0} secs ----".format(
                0, self.timeout, self.run_number,
                len(self.configuration_space)))
        i += 1
        resultwin.addstr(i, 0, "------------ resource allocation ------------")
        i += 1
        len_resources = 0
        for vnf_name, resource_dict in self.resource_configuration.items():
            for resource in resource_dict:
                resultwin.addstr(len_resources + i, 0, "{0}".format(resource))
                len_resources += 1

        i += 2 + len_resources
        resultwin.addstr(i, 0, "------------ input metrics ------------")
        i += 1
        for metric in self.input_metrics:
            resultwin.addstr(
                i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        i += 2
        resultwin.addstr(i, 0, "------------ output metrics ------------")
        i += 1
        for metric in self.output_metrics:
            resultwin.addstr(
                i, 0, "{0} ({1})".format(metric.metric_name, metric.unit))
            i += 1

        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)

        time_counter = 0
        while self.profiling_thread.isAlive():
            resultwin.addstr(
                0, 0, "---- Run: {2}/{3}  ----- Timer: {0} secs ----".format(
                    time_counter, self.timeout, self.run_number,
                    len(self.configuration_space)))
            i = 2
            for vnf_name, resource_dict in self.resource_configuration.items():
                for resource, value in resource_dict.items():
                    resultwin.addstr(i, 50, "{0}".format(value))
                    i += 1

            # start from length of resource parameters
            i += 3
            for metric in self.input_metrics:
                resultwin.addstr(i, 50, "{0:.2f}".format(metric.last_value))
                i += 1

            i += 3
            for metric in self.output_metrics:
                resultwin.addstr(i, 50, "{0:.2f}".format(metric.last_value))
                i += 1

            maxy, maxx = stdscr.getmaxyx()
            resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
            time.sleep(1)
            time_counter += 1

        # print the final result
        result_number = 1
        for result in self.profiling_results:

            # start from length of resource parameters
            i = len_resources + 5
            for metric in result['input_metrics']:
                resultwin.addstr(
                    i, 40 * result_number,
                    "{0:.2f} ({1:.2f},{2:.2f})".format(metric['average'],
                                                       metric['CI_low'],
                                                       metric['CI_high']))
                i += 1

            i += 3
            for metric in result['output_metrics']:
                resultwin.addstr(
                    i, 40 * result_number,
                    "{0:.2f} ({1:.2f},{2:.2f})".format(metric['average'],
                                                       metric['CI_low'],
                                                       metric['CI_high']))
                i += 1

            result_number += 1

        #wait for input keypress
        resultwin.addstr(i + 1, 0, "press a key to close this window...")
        maxy, maxx = stdscr.getmaxyx()
        resultwin.refresh(0, 0, 0, 0, height, maxx - 1)
        #stdscr.refresh()
        resultwin.getkey()
        LOG.removeHandler(CursesHandler(logwin))
        LOG.propagate = True
        logging.getLogger('son_emu_lib').propagate = True
        # curses.endwin()
        # LOG.addHandler(logging.StreamHandler())
        # wait until curses is finished
        # while not curses.isendwin():
        #    time.sleep(0.5)

    def write_results_to_file(self, file_name):
        write_yaml(file_name, self.profiling_results)

    def filter_profile_results(self, profile_result):
        result = dict()
        result['name'] = profile_result['name'] + str(profile_result['run'])
        result['input_metrics'] = []
        result['output_metrics'] = []
        result['resource_alloc'] = dict()
        for metric in profile_result['input_metrics']:
            metric_dict = dict(
                name=metric.metric_name,
                type=metric.metric_type,
                unit=metric.unit,
                average=metric.average,
                desc=metric.desc,
                CI_low=float(metric.CI[0]),
                CI_high=float(metric.CI[1]),
            )
            result['input_metrics'].append(metric_dict)

        for metric in profile_result['output_metrics']:
            metric_dict = dict(
                name=metric.metric_name,
                type=metric.metric_type,
                unit=metric.unit,
                average=metric.average,
                desc=metric.desc,
                CI_low=float(metric.CI[0]),
                CI_high=float(metric.CI[1]),
            )
            result['output_metrics'].append(metric_dict)

        # avoid copying empty dicts into the results
        for vnf_name in profile_result['resource_alloc']:
            if len(profile_result['resource_alloc'][vnf_name]) > 0:
                result['resource_alloc'][vnf_name] = profile_result[
                    'resource_alloc'][vnf_name]

        return result

    def stop_experiment(self):
        self.input_msd.stop()
        self.output_msd.stop()

    def query_metrics(self, metrics):
        # fill the values of the metrics
        for metric in metrics:
            query = metric.query
            try:
                ret = query_Prometheus(query)
                metric.addValue(float(ret[1]))
                LOG.debug('metric: {0}={1}'.format(metric.metric_name,
                                                   float(ret[1])))
            except:
                LOG.info(
                    'Prometheus query failed: {0} \nquery: {1} \nerror:{2}'.
                    format(ret, query,
                           sys.exc_info()[0]))
                continue
            #metric_name = metric.metric_name
            #metric_unit = metric.unit
            #LOG.info("metric query: {1} {0} {2}".format(metric.value, metric_name, metric_unit))
        return metrics

    def set_resources(self):
        """
        Allocate the specified resources
        :param resource_dict:
        {"vnf_name1" : {"param1":value,...},
         "vnf_name2" : {"param1":value,...},
         ...
        }
        :return:
        """
        res = copy.deepcopy(self.resource_configuration)
        for vnf_name in res:
            resource_config = res[vnf_name]
            self.emu.update_vnf_resources(vnf_name, resource_config)
コード例 #11
0
    def start_containers(self):
        # docker-compose up -d
        cmd = ['docker-compose', '-p sonmonitor', 'up', '-d']

        docker_cli = docker.from_env()
        # check if containers are already running
        c1 = docker_cli.containers.list(filters={
            'status': 'running',
            'name': 'prometheus'
        })
        if len(c1) >= 1:
            logging.info('prometheus is already running')
        c2 = docker_cli.containers.list(filters={
            'status': 'running',
            'name': 'grafana'
        })
        if len(c2) >= 1:
            logging.info('grafana is already running')
        if len(c1 + c2) > 0:
            return 'son-monitor not started'

        docker_based = os.getenv('SON_CLI_IN_DOCKER', False)
        if docker_based:
            # we are running son-cli in a docker container
            logging.info('son-cli is running inside a docker container')
            src_path = os.path.join('docker_compose_files',
                                    'docker-compose-docker.yml')
        else:
            # we are running son-cli locally
            src_path = os.path.join('docker_compose_files',
                                    'docker-compose-local.yml')
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        # copy the docker compose file to a working directory
        copy(srcfile, os.path.join(docker_dir, 'docker-compose.yml'))

        # copy the prometheus config file for use in the prometheus docker container
        src_path = os.path.join('prometheus', 'prometheus_sdk.yml')
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        copy(srcfile, prometheus_dir)

        # copy grafana directory
        src_path = os.path.join('grafana', 'grafana.db')
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        copy(srcfile, grafana_dir)

        logging.info('Start son-monitor containers: {0}'.format(docker_dir))
        process = Popen(cmd, cwd=docker_dir)
        process.wait()

        # Wait a while for containers to be completely started
        self.started = False
        wait_time = 0
        while not self.started:
            emu = Emu(SON_EMU_API,
                      ip=SON_EMU_IP,
                      vm=SON_EMU_IN_VM,
                      user=SON_EMU_USER,
                      password=SON_EMU_PASSW)
            list1 = emu.docker_client.containers.list(filters={
                'status': 'running',
                'name': 'prometheus'
            })
            list2 = emu.docker_client.containers.list(filters={
                'status': 'running',
                'name': 'grafana'
            })
            if len(list1 + list2) >= 2:
                self.started = True
                sleep(8)
            if wait_time > 5:
                return 'son-monitor not started'
            sleep(1)
            wait_time += 1
        logging.info('son-monitor started')

        return 'son-monitor started'