Ejemplo n.º 1
0
    def __init__(self, msd_path, vim, title=None):

        # Parse the msd file
        LOG.info('parsing msd: {0}'.format(msd_path))
        self.msd_dict = load_yaml(msd_path)

        # the VIM class where the monitoring is installed (son-emu manager)
        self.vim = vim

        # initialize a new Grafana dashboard
        self.grafana = Grafana()

        # get msd file parameters
        if title is None:
            title = self.msd_dict.get('dashboard')
        self.dashboard = title
        self.version = self.msd_dict.get('version')

        # get msd VNF metrics to monitor
        self.vnf_metrics = self.msd_dict.get('vnf_metrics', [])
        # get msd NSD links to monitor
        self.nsd_links = self.msd_dict.get('nsd_links', [])

        # cookie integer, unique per monitred flow
        self.cookie_counter = COOKIE_START
Ejemplo n.º 2
0
    def __init__(self, msd_path, vim, title=None):

        # Parse the msd file
        LOG.info('parsing msd: {0}'.format(msd_path))
        self.msd_dict = load_yaml(msd_path)

        # the VIM class where the monitoring is installed (son-emu manager)
        self.vim = vim

        # initialize a new Grafana dashboard
        self.grafana = Grafana()

        # get msd file parameters
        if title is None:
            title = self.msd_dict.get('dashboard')
        self.dashboard = title
        self.version = self.msd_dict.get('version')

        # get msd VNF metrics to monitor
        self.vnf_metrics = self.msd_dict.get('vnf_metrics', [])
        # get msd NSD links to monitor
        self.nsd_links = self.msd_dict.get('nsd_links', [])

        # cookie integer, unique per monitred flow
        self.cookie_counter = COOKIE_START
Ejemplo n.º 3
0
    def start_msd(self, file=None, **kwargs):

        # also start son-monitor containers
        self.start_containers()

        # Parse the msd file
        logging.info("parsing msd: {0}".format(file))
        msd = load_yaml(file)

        # initialize a new Grafana dashboard
        self.grafana = Grafana()
        dashboard_name = msd["dashboard"]
        self.grafana.init_dashboard(title=dashboard_name)

        # Install the vnf metrics
        self.install_vnf_metrics(msd, dashboard_name)

        # install the link metrics
        # first make sure everything is stopped
        # self.install_nsd_links(msd, 'stop', dashboard_name)
        self.install_nsd_links(msd, "start", dashboard_name)

        # execute the SAP commands
        # first make sure everything is stopped
        # self.install_sap_commands(msd, "stop")
        self.install_sap_commands(msd, "start")

        return "msd metrics installed"
Ejemplo n.º 4
0
    def stop_msd(self, file=None, **kwargs):

        logging.info("parsing msd: {0}".format(file))
        msd = load_yaml(file)

        # clear the dashboard
        self.grafana = Grafana()
        dashboard_name = msd["dashboard"]
        self.grafana.del_dashboard(title=dashboard_name)

        # delete all installed flow_metrics
        self.install_nsd_links(msd, "stop", dashboard_name)

        # kill all the SAP commands
        self.install_sap_commands(msd, "stop")

        sleep(3)
        # also stop son-monitor containers
        self.stop_containers()

        return "msd metrics deleted"
Ejemplo n.º 5
0
class msd():

    def __init__(self, msd_file, vim):

        # Parse the msd file
        LOG.info('parsing msd: {0}'.format(msd_file))
        self.msd_dict = load_yaml(msd_file)

        # the VIM class where the monitoring is installed (son-emu manager)
        self.vim = vim

        # initialize a new Grafana dashboard
        self.grafana = Grafana()

        # get msd file parameters
        self.dashboard = self.msd_dict.get('dashboard')
        self.version = self.msd_dict.get('version')

        # get msd VNF metrics to monitor
        self.vnf_metrics = self.msd_dict.get('vnf_metrics')
        # get msd NSD links to monitor
        self.nsd_links = self.msd_dict.get('nsd_links')

        # cookie integer, unique per monitred flow
        self.cookie_counter = COOKIE_START

    def start(self):
        # init the dashboard
        self.grafana.init_dashboard(title=self.dashboard)

        # install and export metrics from the MSD file
        self.set_vnf_metrics('start')
        self.set_nsdlink_metrics('start')

    def stop(self):
        # clear the dashboard
        self.grafana.del_dashboard(title=self.dashboard)

        # remove metrics from the MSD file
        self.set_vnf_metrics('stop')
        self.set_nsdlink_metrics('stop')


    def set_vnf_metrics(self, action):
        for metric_group in self.vnf_metrics:
            graph_list = []
            title = metric_group['description']
            graph_list = self.vnfmetric_classifier(metric_group, action)
            if action == 'start':
                if 'count' in metric_group['metric_type']:
                    self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard,
                                           graph_type='bars')
                else:
                    self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard)


    # execute the correct function tto start/stop the metric_type
    def vnfmetric_classifier(self, metric_group, action):
        compute_metric_dict = {'start': self.start_compute_metric}
        compute_metrics = ['cpu', 'mem']

        testvnf_metric_dict = {'start': self.start_testvnf_metric}
        testvnf_metrics = ['packet_loss', 'jitter']

        network_metric_dict = {'start': self.start_network_metric, 'stop': self.stop_network_metric}
        network_metrics = ['packet_rate', 'byte_rate', 'packet_count', 'byte_count',
                           'packet_rate_cadv', 'byte_rate_cadv', 'packet_count_cadv', 'byte_count_cadv']

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        graph_list = []
        for vnf_id in metric_group.get('vnf_ids', []):
            graph_dict = {}

            # Monitor metrics exported by Test-VNFs
            if metric_type in testvnf_metrics:
                function = testvnf_metric_dict.get(action)
                if function:
                    graph_dict = function(metric_group, vnf_id)

            # monitor compute stats (exported by cAdvisor in son-emu)
            elif metric_type in compute_metrics:
                function = compute_metric_dict.get(action)
                if function:
                    graph_dict = function(metric_group, vnf_id)

            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            elif metric_type in network_metrics:
                function = network_metric_dict.get(action)
                if function:
                    graph_dict = function(metric_group, vnf_id)

            else:
                logging.info("No query found for metric type: {0}".format(metric_type))
                continue

            graph_list.append(graph_dict)

        return graph_list


    def start_testvnf_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query =test2vnfquery[metric_type].format(vnf_id['vnf'])
        graph_dict = dict(desc=desc, metric=query)
        return graph_dict

    def start_compute_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query = compute2vnfquery[metric_type].format(vnf_id['vnf'])
        graph_dict = dict(desc=desc, metric=query)
        return graph_dict

    # network metrics gathered by the network interface counters
    def start_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])
        flow_metric = metric2flow_metric[metric_type2]

        # metrics of cadvisor al already exported by default
        if not '_cadv' in metric_type:
            self.vim.monitor_interface('start', vnf_name + ':' + vnf_interface, flow_metric)

        query = network2vnfquery[metric_type2].format(vnf_name, vnf_interface)
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf'] + ':' + vnf_id['direction']
        graph_dict = dict(desc=desc, metric=query)
        return graph_dict

    def stop_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])
        flow_metric = metric2flow_metric[metric_type2]
        # metrics of cadvisor al already exported by default
        if not '_cadv' in metric_type:
            self.vim.monitor_interface('stop', vnf_name + ':' + vnf_interface, flow_metric)
        return

    def set_nsdlink_metrics(self, action):
        for metric_group in self.nsd_links:
            title = metric_group['description']
            graph_list = self.nsdlink_classifier(metric_group, action)
            if action == 'start':
                if 'count' in metric_group['metric_type']:
                    self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard,
                                           graph_type='bars')
                else:
                    self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard)

    # execute the correct function tto start/stop the metric_type
    def nsdlink_classifier(self, metric_group, action):
        nsdlink_metric_dict = {'start': self.start_nsdlink_metric, 'stop': self.stop_nsdlink_metric}
        nsdlink_metrics = ['packet_rate', 'byte_rate', 'packet_count', 'byte_count',
                           'packet_rate_cadv', 'byte_rate_cadv', 'packet_count_cadv', 'byte_count_cadv']

        graph_list = []

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        #cookie = self.cookie_counter
        for nsdlink_id in metric_group.get('link_ids', []):
            graph_dict = {}

            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            if metric_type in nsdlink_metrics:
                function = nsdlink_metric_dict.get(action)
                if function:
                    graph_dict = function(metric_group, nsdlink_id)
                    # increment cookie when a match flow is installed
                    #if nsdlink_id.get('match'): cookie += 1

                else:
                    logging.info("No query found for metric type: {0}".format(metric_type))
                    continue

            graph_list.append(graph_dict)

        return graph_list

    # install flow_metrics for a specified chain
    def start_nsdlink_metric(self, metric_group, nsdlink_id):
        # install the link metric
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # make default description
        desc = nsdlink_id.get("description")
        if not desc:
            desc = nsdlink_id['link_id'] + ':' + nsdlink_id['direction']

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                self.vim.monitor_interface('start', vnf_name + ':' + vnf_interface, flow_metric)
            query = network2vnfquery[metric_type2].format(vnf_name, vnf_interface)
            graph_dict = dict(desc=desc, metric=query)

        # if a match is given, install a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            self.vim.flow_total('start', source, destination, flow_metric, self.cookie_counter, match=match,
                            bidirectional=False, priority=MONITOR_FLOW_PRIORITY)
            query = metric2flowquery[metric_type2].format(self.cookie_counter, vnf_name, vnf_interface)
            graph_dict = dict(desc=desc, metric=query)
            self.cookie_counter += 1

        return graph_dict

    # delete flow_metrics for a specified chain
    def stop_nsdlink_metric(self, metric_group, nsdlink_id):
        # install the link metrics
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                self.vim.monitor_interface('stop', vnf_name + ':' + vnf_interface, flow_metric)

        # if a match is given, uninstall a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            self.vim.flow_total('stop', source, destination, flow_metric, self.cookie_counter, match=match,
                                bidirectional=False, priority=MONITOR_FLOW_PRIORITY)
            self.cookie_counter += 1
Ejemplo n.º 6
0
class msd():

    def __init__(self, msd_path, vim, title=None):

        # Parse the msd file
        LOG.info('parsing msd: {0}'.format(msd_path))
        self.msd_dict = load_yaml(msd_path)

        # the VIM class where the monitoring is installed (son-emu manager)
        self.vim = vim

        # initialize a new Grafana dashboard
        self.grafana = Grafana()

        # get msd file parameters
        if title is None:
            title = self.msd_dict.get('dashboard')
        self.dashboard = title
        self.version = self.msd_dict.get('version')

        # get msd VNF metrics to monitor
        self.vnf_metrics = self.msd_dict.get('vnf_metrics', [])
        # get msd NSD links to monitor
        self.nsd_links = self.msd_dict.get('nsd_links', [])

        # cookie integer, unique per monitred flow
        self.cookie_counter = COOKIE_START

    def start(self, title=None, overwrite=True):
        if title is None:
            title = self.dashboard
        # init the dashboard
        self.grafana.init_dashboard(title=title, overwrite=overwrite)

        metrics = self.get_metrics()
        self.start_grafana(metrics)

    def stop(self):
        # clear the dashboard
        self.grafana.del_dashboard(title=self.dashboard)

        # remove metrics from the MSD file
        self.set_vnf_metrics('stop')
        self.set_nsdlink_metrics('stop')

    # install and return all metrics from the msd (without installing Grafana)
    def get_metrics(self):
        vnf_metrics_dict = self.set_vnf_metrics('start')
        link_metrics_dict = self.set_nsdlink_metrics('start')
        # merge the 2 dicts
        all_metrics = dict(vnf_metrics_dict, **link_metrics_dict)
        return all_metrics

    # install and return all metrics from the msd as a list (without installing Grafana)
    def get_metrics_list(self):
        vnf_metrics_dict = self.set_vnf_metrics('start')
        link_metrics_dict = self.set_nsdlink_metrics('start')
        # merge the 2 dicts
        all_metrics = dict(vnf_metrics_dict, **link_metrics_dict)
        flat_metric_list = []
        for metric_list in all_metrics.values():
            flat_metric_list += metric_list

        # return list of class Metric
        return flat_metric_list

    def start_grafana(self, metrics):
        for metric_group in metrics:
            title = metric_group
            metric_list = metrics[metric_group]

            # need list of dicts for Grafana lib
            graph_list = [metric.__dict__ for metric in metric_list]

            # check metric_type in first metric of the list
            if 'count' in metric_list[0].metric_type:
                self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard,
                                       graph_type='bars')
            else:
                self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=self.dashboard)

    def set_vnf_metrics(self, action=None):
        all_metrics = {}
        for metric_group in self.vnf_metrics:
            title = metric_group['description']
            metric_list = self.vnfmetric_classifier(metric_group, action)

            # group all metrics in dict
            all_metrics[title] = metric_list

        return all_metrics

    # execute the correct function to start/stop the metric_type
    def vnfmetric_classifier(self, metric_group, action):
        compute_metric_dict = {'start': self.start_compute_metric}

        testvnf_metric_dict = {'start': self.start_testvnf_metric}

        network_metric_dict = {'start': self.start_network_metric, 'stop': self.stop_network_metric}

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        metric_list = []
        for vnf_id in metric_group.get('vnf_ids', []):
            metric = None

            # Monitor metrics exported by Test-VNFs
            if metric_type in testvnf_metrics:
                function = testvnf_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            # monitor compute stats (exported by cAdvisor in son-emu)
            elif metric_type in compute_metrics:
                function = compute_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            elif metric_type in network_metrics:
                function = network_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            else:
                logging.info("No query found for metric type: {0}".format(metric_type))
                continue

            metric_list.append(metric)

        return metric_list


    def start_testvnf_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query = test2vnfquery[metric_type].query_template.format(vnf_id['vnf'])
        unit = test2vnfquery[metric_type].unit
        name = '@'.join([metric_type, vnf_id['vnf']])
        metric = Metric(metric_name=name, desc=desc, query=query, metric_type=metric_type, unit=unit)
        return metric

    def start_compute_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query = compute2vnfquery[metric_type].query_template.format(vnf_id['vnf'])
        unit = compute2vnfquery[metric_type].unit
        name = '@'.join([metric_type, vnf_id['vnf']])
        metric = Metric(metric_name=name, desc=desc, query=query, metric_type=metric_type, unit=unit)
        return metric

    # network metrics gathered by the network interface counters
    def start_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])

        # metrics of cadvisor are already exported by default
        if not '_cadv' in metric_type:
            flow_metric = metric2flow_metric[metric_type2]
            r = self.vim.monitor_interface(action='start', vnf_name=vnf_name + ':' + vnf_interface, metric=flow_metric)
            LOG.info('start metric ret:{0}'.format(r))
        query = network2vnfquery[metric_type2].query_template.format(vnf_name, vnf_interface)
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf'] + ':' + vnf_id['direction']
        unit = network2vnfquery[metric_type2].unit
        name = '@'.join([metric_type2, vnf_id['vnf']])
        metric = Metric(metric_name=name, desc=desc, query=query, metric_type=metric_type2, unit=unit)
        return metric

    def stop_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])

        # metrics of cadvisor are already exported by default
        if not '_cadv' in metric_type:
            flow_metric = metric2flow_metric[metric_type2]
            r = self.vim.monitor_interface('stop', vnf_name + ':' + vnf_interface, flow_metric)
            LOG.info('stop metric ret:{0}'.format(r))
        return

    def set_nsdlink_metrics(self, action=None):
        all_metrics = {}

        for metric_group in self.nsd_links:
            title = metric_group['description']
            metric_list = self.nsdlink_classifier(metric_group, action)

            # group all metrics in dict
            all_metrics[title] = metric_list

        return all_metrics

    # execute the correct function tto start/stop the metric_type
    def nsdlink_classifier(self, metric_group, action):
        nsdlink_metric_dict = {'start': self.start_nsdlink_metric, 'stop': self.stop_nsdlink_metric}

        metric_list = []

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        for nsdlink_id in metric_group.get('link_ids', []):
            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            if metric_type in nsdlink_metrics:
                function = nsdlink_metric_dict.get(action)
                if function:
                    metric = function(metric_group, nsdlink_id)
                    metric_list.append(metric)

                else:
                    logging.info("No query found for metric type: {0}".format(metric_type))
                    continue

        return metric_list

    # install flow_metrics for a specified chain
    def start_nsdlink_metric(self, metric_group, nsdlink_id):
        # install the link metric
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # make default description
        desc = nsdlink_id.get("description")
        if not desc:
            desc = nsdlink_id['link_id'] + ':' + nsdlink_id['direction']

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                if vnf_interface is None:
                    vnf_interface = ''
                r = self.vim.monitor_interface('start', vnf_name + ':' + vnf_interface, flow_metric)
                LOG.info('start link metric ret:{0}'.format(r))
            query = network2vnfquery[metric_type2].query_template.format(vnf_name, vnf_interface)
            unit = network2vnfquery[metric_type2].unit
            name = '{0}@{1}:{2}'.format(metric_type2, vnf_name, vnf_interface)
            metric = Metric(metric_name=name, desc=desc, query=query, metric_type=metric_type2, unit=unit)


        # if a match is given, install a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            r = self.vim.flow_total('start', source, destination, flow_metric, self.cookie_counter, match=match,
                            bidirectional=False, priority=MONITOR_FLOW_PRIORITY)
            LOG.info('start link metric ret:{0}'.format(r))
            query = metric2flowquery[metric_type2].query_template.format(self.cookie_counter, vnf_name, vnf_interface)
            unit = network2vnfquery[metric_type2].unit
            name = '{0}@{1}:{2}:{3}'.format(metric_type2, vnf_name, vnf_interface, self.cookie_counter)
            metric = Metric(metric_name=name, desc=desc, query=query, metric_type=metric_type2, unit=unit)
            self.cookie_counter += 1

        return metric

    # delete flow_metrics for a specified chain
    def stop_nsdlink_metric(self, metric_group, nsdlink_id):

        # install the link metrics
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                if vnf_interface is None:
                    vnf_interface = ''
                r = self.vim.monitor_interface('stop', vnf_name + ':' + vnf_interface, flow_metric)
                LOG.info('stop link metric ret:{0}'.format(r))

        # if a match is given, uninstall a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            r = self.vim.flow_total('stop', source, destination, flow_metric, self.cookie_counter, match=match,
                                bidirectional=False, priority=MONITOR_FLOW_PRIORITY)
            LOG.info('stop link metric ret:{0}'.format(r))
            self.cookie_counter += 1
Ejemplo n.º 7
0
class msd():
    def __init__(self, msd_path, vim, title=None):

        # Parse the msd file
        LOG.info('parsing msd: {0}'.format(msd_path))
        self.msd_dict = load_yaml(msd_path)

        # the VIM class where the monitoring is installed (son-emu manager)
        self.vim = vim

        # initialize a new Grafana dashboard
        self.grafana = Grafana()

        # get msd file parameters
        if title is None:
            title = self.msd_dict.get('dashboard')
        self.dashboard = title
        self.version = self.msd_dict.get('version')

        # get msd VNF metrics to monitor
        self.vnf_metrics = self.msd_dict.get('vnf_metrics', [])
        # get msd NSD links to monitor
        self.nsd_links = self.msd_dict.get('nsd_links', [])

        # cookie integer, unique per monitred flow
        self.cookie_counter = COOKIE_START

    def start(self, title=None, overwrite=True):
        if title is None:
            title = self.dashboard
        # init the dashboard
        self.grafana.init_dashboard(title=title, overwrite=overwrite)

        metrics = self.get_metrics()
        self.start_grafana(metrics)

    def stop(self):
        # clear the dashboard
        self.grafana.del_dashboard(title=self.dashboard)

        # remove metrics from the MSD file
        self.set_vnf_metrics('stop')
        self.set_nsdlink_metrics('stop')

    # install and return all metrics from the msd (without installing Grafana)
    def get_metrics(self):
        vnf_metrics_dict = self.set_vnf_metrics('start')
        link_metrics_dict = self.set_nsdlink_metrics('start')
        # merge the 2 dicts
        all_metrics = dict(vnf_metrics_dict, **link_metrics_dict)
        return all_metrics

    # install and return all metrics from the msd as a list (without installing Grafana)
    def get_metrics_list(self):
        vnf_metrics_dict = self.set_vnf_metrics('start')
        link_metrics_dict = self.set_nsdlink_metrics('start')
        # merge the 2 dicts
        all_metrics = dict(vnf_metrics_dict, **link_metrics_dict)
        flat_metric_list = []
        for metric_list in all_metrics.values():
            flat_metric_list += metric_list

        # return list of class Metric
        return flat_metric_list

    def start_grafana(self, metrics):
        for metric_group in metrics:
            title = metric_group
            metric_list = metrics[metric_group]

            # need list of dicts for Grafana lib
            graph_list = [metric.__dict__ for metric in metric_list]

            # check metric_type in first metric of the list
            if 'count' in metric_list[0].metric_type:
                self.grafana.add_panel(metric_list=graph_list,
                                       title=title,
                                       dashboard_name=self.dashboard,
                                       graph_type='bars')
            else:
                self.grafana.add_panel(metric_list=graph_list,
                                       title=title,
                                       dashboard_name=self.dashboard)

    def set_vnf_metrics(self, action=None):
        all_metrics = {}
        for metric_group in self.vnf_metrics:
            title = metric_group['description']
            metric_list = self.vnfmetric_classifier(metric_group, action)

            # group all metrics in dict
            all_metrics[title] = metric_list

        return all_metrics

    # execute the correct function to start/stop the metric_type
    def vnfmetric_classifier(self, metric_group, action):
        compute_metric_dict = {'start': self.start_compute_metric}

        testvnf_metric_dict = {'start': self.start_testvnf_metric}

        network_metric_dict = {
            'start': self.start_network_metric,
            'stop': self.stop_network_metric
        }

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        metric_list = []
        for vnf_id in metric_group.get('vnf_ids', []):
            metric = None

            # Monitor metrics exported by Test-VNFs
            if metric_type in testvnf_metrics:
                function = testvnf_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            # monitor compute stats (exported by cAdvisor in son-emu)
            elif metric_type in compute_metrics:
                function = compute_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            elif metric_type in network_metrics:
                function = network_metric_dict.get(action)
                if function:
                    metric = function(metric_group, vnf_id)

            else:
                logging.info(
                    "No query found for metric type: {0}".format(metric_type))
                continue

            metric_list.append(metric)

        return metric_list

    def start_testvnf_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query = test2vnfquery[metric_type].query_template.format(vnf_id['vnf'])
        unit = test2vnfquery[metric_type].unit
        name = '@'.join([metric_type, vnf_id['vnf']])
        metric = Metric(metric_name=name,
                        desc=desc,
                        query=query,
                        metric_type=metric_type,
                        unit=unit)
        return metric

    def start_compute_metric(self, metric_group, vnf_id):
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf']

        metric_type = metric_group['metric_type']
        # set correct Prometheus query
        query = compute2vnfquery[metric_type].query_template.format(
            vnf_id['vnf'])
        unit = compute2vnfquery[metric_type].unit
        name = '@'.join([metric_type, vnf_id['vnf']])
        metric = Metric(metric_name=name,
                        desc=desc,
                        query=query,
                        metric_type=metric_type,
                        unit=unit)
        return metric

    # network metrics gathered by the network interface counters
    def start_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])

        # metrics of cadvisor are already exported by default
        if not '_cadv' in metric_type:
            flow_metric = metric2flow_metric[metric_type2]
            r = self.vim.monitor_interface(action='start',
                                           vnf_name=vnf_name + ':' +
                                           vnf_interface,
                                           metric=flow_metric)
            LOG.info('start metric ret:{0}'.format(r))
        query = network2vnfquery[metric_type2].query_template.format(
            vnf_name, vnf_interface)
        # make default description
        desc = vnf_id.get("description")
        if not desc:
            desc = vnf_id['vnf'] + ':' + vnf_id['direction']
        unit = network2vnfquery[metric_type2].unit
        name = '@'.join([metric_type2, vnf_id['vnf']])
        metric = Metric(metric_name=name,
                        desc=desc,
                        query=query,
                        metric_type=metric_type2,
                        unit=unit)
        return metric

    def stop_network_metric(self, metric_group, vnf_id):
        metric_type = metric_group['metric_type']
        metric_type2 = vnf_id['direction'] + "_" + metric_type
        vnf_name = parse_vnf_name(vnf_id['vnf'])
        vnf_interface = parse_vnf_interface(vnf_id['vnf'])

        # metrics of cadvisor are already exported by default
        if not '_cadv' in metric_type:
            flow_metric = metric2flow_metric[metric_type2]
            r = self.vim.monitor_interface('stop',
                                           vnf_name + ':' + vnf_interface,
                                           flow_metric)
            LOG.info('stop metric ret:{0}'.format(r))
        return

    def set_nsdlink_metrics(self, action=None):
        all_metrics = {}

        for metric_group in self.nsd_links:
            title = metric_group['description']
            metric_list = self.nsdlink_classifier(metric_group, action)

            # group all metrics in dict
            all_metrics[title] = metric_list

        return all_metrics

    # execute the correct function tto start/stop the metric_type
    def nsdlink_classifier(self, metric_group, action):
        nsdlink_metric_dict = {
            'start': self.start_nsdlink_metric,
            'stop': self.stop_nsdlink_metric
        }

        metric_list = []

        metric_type = metric_group['metric_type']
        LOG.info('metric_type:{0}'.format(metric_type))

        for nsdlink_id in metric_group.get('link_ids', []):
            # monitor network stats (exported by Ryu/cAdvisor in son-emu)
            if metric_type in nsdlink_metrics:
                function = nsdlink_metric_dict.get(action)
                if function:
                    metric = function(metric_group, nsdlink_id)
                    metric_list.append(metric)

                else:
                    logging.info("No query found for metric type: {0}".format(
                        metric_type))
                    continue

        return metric_list

    # install flow_metrics for a specified chain
    def start_nsdlink_metric(self, metric_group, nsdlink_id):
        # install the link metric
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # make default description
        desc = nsdlink_id.get("description")
        if not desc:
            desc = nsdlink_id['link_id'] + ':' + nsdlink_id['direction']

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                if vnf_interface is None:
                    vnf_interface = ''
                r = self.vim.monitor_interface('start',
                                               vnf_name + ':' + vnf_interface,
                                               flow_metric)
                LOG.info('start link metric ret:{0}'.format(r))
            query = network2vnfquery[metric_type2].query_template.format(
                vnf_name, vnf_interface)
            unit = network2vnfquery[metric_type2].unit
            name = '{0}@{1}:{2}'.format(metric_type2, vnf_name, vnf_interface)
            metric = Metric(metric_name=name,
                            desc=desc,
                            query=query,
                            metric_type=metric_type2,
                            unit=unit)

        # if a match is given, install a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            r = self.vim.flow_total('start',
                                    source,
                                    destination,
                                    flow_metric,
                                    self.cookie_counter,
                                    match=match,
                                    bidirectional=False,
                                    priority=MONITOR_FLOW_PRIORITY)
            LOG.info('start link metric ret:{0}'.format(r))
            query = metric2flowquery[metric_type2].query_template.format(
                self.cookie_counter, vnf_name, vnf_interface)
            unit = network2vnfquery[metric_type2].unit
            name = '{0}@{1}:{2}:{3}'.format(metric_type2, vnf_name,
                                            vnf_interface, self.cookie_counter)
            metric = Metric(metric_name=name,
                            desc=desc,
                            query=query,
                            metric_type=metric_type2,
                            unit=unit)
            self.cookie_counter += 1

        return metric

    # delete flow_metrics for a specified chain
    def stop_nsdlink_metric(self, metric_group, nsdlink_id):

        # install the link metrics
        title = metric_group['description']
        metric_type = metric_group['metric_type']
        metric_type2 = nsdlink_id['direction'] + "_" + metric_type
        source = nsdlink_id['source']
        destination = nsdlink_id['destination']
        direction = nsdlink_id['direction']

        if 'rx' in direction:
            vnf_name = parse_vnf_name(destination)
            vnf_interface = parse_vnf_interface(destination)
        elif 'tx' in direction:
            vnf_name = parse_vnf_name(source)
            vnf_interface = parse_vnf_interface(source)

        # if match is empty then it is a total interface counter
        if not nsdlink_id.get('match'):
            flow_metric = metric2flow_metric[metric_type2]
            # metrics of cadvisor al already exported by default
            if not '_cadv' in metric_type:
                if vnf_interface is None:
                    vnf_interface = ''
                r = self.vim.monitor_interface('stop',
                                               vnf_name + ':' + vnf_interface,
                                               flow_metric)
                LOG.info('stop link metric ret:{0}'.format(r))

        # if a match is given, uninstall a flow specific counter
        else:
            flow_metric = metric2flow_metric[metric_type2]
            source = nsdlink_id['source']
            destination = nsdlink_id['destination']
            match = nsdlink_id['match']
            # install the flow and export the metric
            r = self.vim.flow_total('stop',
                                    source,
                                    destination,
                                    flow_metric,
                                    self.cookie_counter,
                                    match=match,
                                    bidirectional=False,
                                    priority=MONITOR_FLOW_PRIORITY)
            LOG.info('stop link metric ret:{0}'.format(r))
            self.cookie_counter += 1
Ejemplo n.º 8
0
class emu:
    def __init__(self, REST_api):
        self.url = REST_api
        self.tmp_dir = "/tmp/son-monitor"
        self.docker_dir = "/tmp/son-monitor/docker"
        self.prometheus_dir = "/tmp/son-monitor/prometheus"
        self.grafana_dir = "/tmp/son-monitor/grafana"
        for dir in [self.docker_dir, self.prometheus_dir, self.grafana_dir]:
            if not os.path.exists(dir):
                # make local working directory
                os.makedirs(dir)

        self.docker_based = os.getenv("SON_CLI_IN_DOCKER", False)

        self.grafana = None

    def init(self, action, **kwargs):
        # startup SONATA SDK environment (cAdvisor, Prometheus, PushGateway, son-emu(experimental))
        actions = {"start": self.start_containers, "stop": self.stop_containers}
        return actions[action](**kwargs)

    def nsd(self, action, **kwargs):
        # startup SONATA SDK environment (cAdvisor, Prometheus, PushGateway, son-emu(experimental))
        actions = {"start": self.start_nsd, "stop": self.stop_nsd}
        return actions[action](**kwargs)

    def msd(self, action, **kwargs):
        # startup SONATA SDK environment (cAdvisor, Prometheus, PushGateway, son-emu(experimental))
        actions = {"start": self.start_msd, "stop": self.stop_msd}
        return actions[action](**kwargs)

    # parse the nsd file and install the grafana metrics
    def start_nsd(self, file=None, **kwargs):
        self.grafana = Grafana()
        self.grafana.init_dashboard()
        self.grafana.parse_nsd(file)

        return "nsd metrics installed"

    def stop_nsd(self, **kwargs):
        self.grafana.init_dashboard()

    # parse the msd file and export the metrics ffrom son-emu and show in grafana
    def start_msd(self, file=None, **kwargs):

        # also start son-monitor containers
        self.start_containers()

        # Parse the msd file
        logging.info("parsing msd: {0}".format(file))
        msd = load_yaml(file)

        # initialize a new Grafana dashboard
        self.grafana = Grafana()
        dashboard_name = msd["dashboard"]
        self.grafana.init_dashboard(title=dashboard_name)

        # Install the vnf metrics
        self.install_vnf_metrics(msd, dashboard_name)

        # install the link metrics
        # first make sure everything is stopped
        # self.install_nsd_links(msd, 'stop', dashboard_name)
        self.install_nsd_links(msd, "start", dashboard_name)

        # execute the SAP commands
        # first make sure everything is stopped
        # self.install_sap_commands(msd, "stop")
        self.install_sap_commands(msd, "start")

        return "msd metrics installed"

    def stop_msd(self, file=None, **kwargs):

        logging.info("parsing msd: {0}".format(file))
        msd = load_yaml(file)

        # clear the dashboard
        self.grafana = Grafana()
        dashboard_name = msd["dashboard"]
        self.grafana.del_dashboard(title=dashboard_name)

        # delete all installed flow_metrics
        self.install_nsd_links(msd, "stop", dashboard_name)

        # kill all the SAP commands
        self.install_sap_commands(msd, "stop")

        sleep(3)
        # also stop son-monitor containers
        self.stop_containers()

        return "msd metrics deleted"

    # start or stop (kill) the sap commands
    def install_sap_commands(self, msd, action):
        # execute the SAP commands
        for sap in msd.get("saps", []):
            sap_docker_name = "mn." + sap["sap_name"]
            wait = sap.get("wait", False)
            for cmd in sap["commands"]:
                if sap["method"] == "son-emu-VM-ssh":
                    if action == "stop":
                        cmd = "sudo docker exec -it " + sap_docker_name + " pkill -9 -f '" + cmd + "'"
                        wait = True
                    else:
                        cmd = "sudo docker exec -it " + sap_docker_name + " " + cmd

                    thread = Thread(target=self.ssh_cmd, kwargs=dict(cmd=cmd, username="******", password="******"))
                    thread.start()
                    if wait:
                        thread.join()
                    # process = self.ssh_cmd(cmd, username='******', password='******', wait=wait)
                elif sap["method"] == "son-emu-local":
                    process = self.docker_exec_cmd(cmd, sap_docker_name)

    # Install the vnf metrics (cpu, mem, interface packet rate)
    def install_vnf_metrics(self, msd, dashboard_name):

        vnf_metrics = msd["vnf_metrics"]
        for metric_group in vnf_metrics:
            graph_list = []

            title = metric_group["desc"]
            metric_type = metric_group["type"]

            logging.info("metric_type:{0}".format(metric_type))
            if metric_type in ["jitter", "packet_loss"]:
                query = profile2vnfquery[metric_type]
                graph_dict = dict(desc=title, metric=query)
                graph_list.append(graph_dict)

            elif len(metric_group.get("vnf_ids", [])) == 0:
                # no vnfs need to be monitored
                continue

            for vnf_id in metric_group.get("vnf_ids", []):
                graph_dict = {}

                if metric_type in ["packet_rate", "byte_rate"]:
                    metric_type2 = vnf_id["direction"] + "_" + metric_type
                    vnf_name = parse_vnf_name(vnf_id["vnf"])
                    vnf_interface = parse_vnf_interface(vnf_id["vnf"])
                    flow_metric = metric2flow_metric[metric_type2]
                    self.interface("start", vnf_name + ":" + vnf_interface, flow_metric)
                    query = metric2vnfquery[metric_type2].format(vnf_name, vnf_interface)
                    desc = vnf_id["vnf"] + ":" + vnf_id["direction"]
                    graph_dict = dict(desc=desc, metric=query)

                elif metric_type in ["cpu", "mem"]:
                    query = metric2vnfquery[metric_type].format(vnf_id)
                    graph_dict = dict(desc=vnf_id, metric=query)

                else:
                    logging.info("No query found for metric type: {0}".format(metric_type))
                    continue

                graph_list.append(graph_dict)

            self.grafana.add_panel(metric_list=graph_list, title=title, dashboard_name=dashboard_name)

    # install or delete all installed flow_metrics
    def install_nsd_links(self, msd, action, dashboard_name):
        # install the link metrics
        cookie = COOKIE_START
        for nsd_link in msd["nsd_links"]:
            graph_list = []
            if nsd_link["metrics"] is None:
                # no vnfs need to be monitored
                break
            title = nsd_link["desc"]
            metric_type = nsd_link["metric_type"]
            source = nsd_link["source"]
            destination = nsd_link["destination"]
            if "rx" in metric_type:
                vnf_name = parse_vnf_name(destination)
                vnf_interface = parse_vnf_interface(destination)
            elif "tx" in metric_type:
                vnf_name = parse_vnf_name(source)
                vnf_interface = parse_vnf_interface(source)

            for metric in nsd_link["metrics"]:
                graph_dict = {}

                # an interface metric exported from cAdvisor
                if metric["type"] == "total":
                    if action == "stop":
                        continue
                    query = metric2total_query[metric_type].format(vnf_name, vnf_interface)
                    graph_dict = dict(desc=metric["desc"], metric=query)
                # an interface metric as an interface packet counter from the son-emu network switch
                elif metric["type"] == "flow_total":
                    flow_metric = metric2flow_metric[metric_type]
                    self.interface(action, vnf_name + ":" + vnf_interface, flow_metric)
                    if action == "stop":
                        continue
                    query = metric2totalflowquery[metric_type].format(vnf_name, vnf_interface)
                    graph_dict = dict(desc=metric["desc"], metric=query)
                # a metric as a custom flow counter from the son-emu network switch
                elif metric["type"] == "flow":
                    flow_metric = metric2flow_metric[metric_type]
                    source = nsd_link["source"]
                    destination = nsd_link["destination"]
                    match = metric["match"]
                    # install the flow and export the metric
                    if action == "stop":
                        self.flow_total(action, source, destination, flow_metric, cookie)
                        cookie += 1
                        continue
                    else:
                        self.flow_total(
                            action,
                            source,
                            destination,
                            flow_metric,
                            cookie,
                            match=match,
                            bidirectional=False,
                            priority=100,
                        )
                        query = metric2flowquery[metric_type].format(cookie, vnf_name, vnf_interface)
                        graph_dict = dict(desc=metric["desc"], metric=query)
                        cookie += 1

                if action == "start":
                    graph_list.append(graph_dict)

            if action == "start":
                self.grafana.add_panel(
                    metric_list=graph_list, title=title, dashboard_name=dashboard_name, graph_type="bars"
                )

    # start the sdk monitoring framework (cAdvisor, Prometheus, Pushgateway, ...)
    def start_containers(self, **kwargs):
        # docker-compose up -d
        cmd = ["docker-compose", "-p sonmonitor", "up", "-d"]

        if self.docker_based:
            # we are running son-cli in a docker container
            logging.info("son-cli is running inside a docker container")
            src_path = os.path.join("docker", "docker-compose-docker.yml")
        else:
            # we are running son-cli locally
            src_path = os.path.join("docker", "docker-compose-local.yml")
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        # copy the docker compose file to a working directory
        copy(srcfile, os.path.join(self.docker_dir, "docker-compose.yml"))

        # copy the prometheus config file for use in the prometheus docker container
        src_path = os.path.join("prometheus", "prometheus_sdk.yml")
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        copy(srcfile, self.prometheus_dir)

        # copy grafana directory
        src_path = os.path.join("grafana", "grafana.db")
        srcfile = pkg_resources.resource_filename(__name__, src_path)
        copy(srcfile, self.grafana_dir)

        logging.info("Start son-monitor containers: {0}".format(self.docker_dir))
        process = Popen(cmd, cwd=self.docker_dir)
        process.wait()

        # Wait a while for containers to be completely started
        sleep(2)
        return "son-monitor started"

    # start the sdk monitoring framework
    def stop_containers(self, **kwargs):
        """
        # hard stopping of containers
        cmd = [
            'docker',
            'rm',
            '-f',
            'grafana',
            'prometheus'
        ]
        logging.info('stop and remove son-monitor containers')
        process = Popen(cmd, cwd=self.docker_dir)
        process.wait()
        """
        # docker-compose down, remove volumes
        cmd = ["docker-compose", "-p sonmonitor", "down", "-v"]
        logging.info("stop and remove son-monitor containers")
        process = Popen(cmd, cwd=self.docker_dir)
        process.wait()
        # try to remove tmp directory
        try:
            if os.path.exists(self.tmp_dir):
                rmtree(self.tmp_dir)
        except:
            logging.info("cannot remove {0} (this is normal if mounted as a volume)".format(self.tmp_dir))

        return "son-monitor stopped"

    def ssh_cmd(self, cmd, host="localhost", port=22, username="******", password="******"):
        ssh = paramiko.SSHClient()
        ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        # ssh.connect(mgmt_ip, username='******', password='******')
        ssh.connect(host, port=port, username=username, password=password)
        logging.info("executing command: {0}".format(cmd))
        stdin, stdout, stderr = ssh.exec_command(cmd, get_pty=True)

        # Wait for the command to terminate
        timer = 0
        while (not stdout.channel.exit_status_ready()) and timer < 3:
            # Only print data if there is data to read in the channel
            if stdout.channel.recv_ready():
                rl, wl, xl = select.select([stdout.channel], [], [], 0.0)
                if len(rl) > 0:
                    # Print data from stdout
                    logging.info(stdout.channel.recv(1024))
                    timer = 0
            else:
                timer += 1
                sleep(1)

        ssh.close()

    def docker_exec_cmd(self, cmd, docker_name):
        cmd_list = shlex.split(cmd)
        cmd = ["docker", "exec", "-it", docker_name]
        cmd = cmd + cmd_list
        logging.info("executing command: {0}".format(cmd))
        process = Popen(cmd)
        # process.wait()
        return process

    def interface(self, action, vnf_name, metric, **kwargs):
        # check required arguments
        actions = {"start": put, "stop": delete}
        if not valid_arguments(action, vnf_name, metric):
            return "Function arguments not valid"
        if actions.get(action) is None:
            return "Action argument not valid"

        vnf_name2 = parse_vnf_name(vnf_name)
        vnf_interface = parse_vnf_interface(vnf_name)

        url = construct_url(self.url, "restapi/monitor", vnf_name2, vnf_interface, metric)

        response = actions[action](url)
        return response.json()

    def flow_mon(self, action, vnf_name, metric, cookie, **kwargs):
        # check required arguments
        actions = {"start": put, "stop": delete}
        if not valid_arguments(action, vnf_name, metric, cookie):
            return "Function arguments not valid"
        if actions.get(action) is None:
            return "Action argument not valid"

        vnf_name2 = parse_vnf_name(vnf_name)
        vnf_interface = parse_vnf_interface(vnf_name)

        url = construct_url(self.url, "restapi/flowmon", vnf_name2, vnf_interface, metric, cookie)

        response = actions[action](url)

        return response.json()

    def flow_entry(self, action, source, destination, **args):
        # check required arguments
        actions = {"start": put, "stop": delete}
        if not valid_arguments(source, destination):
            return "arguments not valid"
        if actions.get(action) is None:
            return "Action argument not valid"

        vnf_src_name = parse_vnf_name(source)
        vnf_dst_name = parse_vnf_name(destination)

        params = create_dict(
            vnf_src_interface=parse_vnf_interface(source),
            vnf_dst_interface=parse_vnf_interface(destination),
            weight=args.get("weight"),
            match=args.get("match"),
            bidirectional=args.get("bidirectional"),
            priority=args.get("priority"),
            cookie=args.get("cookie"),
        )

        response = actions[action](
            "{0}/restapi/network/{1}/{2}".format(self.url, vnf_src_name, vnf_dst_name), json=params
        )

        return response.json()

    def flow_total(self, action, source, destination, metric, cookie, **kwargs):
        # check required arguments
        actions = {"start": put, "stop": delete}
        if not valid_arguments(source, destination, cookie):
            return "arguments not valid"
        if actions.get(action) is None:
            return "Action argument not valid"

        vnf_src_name = parse_vnf_name(source)
        vnf_dst_name = parse_vnf_name(destination)

        params = create_dict(
            vnf_src_interface=parse_vnf_interface(source),
            vnf_dst_interface=parse_vnf_interface(destination),
            weight=kwargs.get("weight"),
            match=kwargs.get("match"),
            bidirectional=kwargs.get("bidirectional"),
            priority=kwargs.get("priority"),
            cookie=cookie,
        )

        # first add this specific flow to the emulator network
        ret1 = self.flow_entry(action, source, destination, **params)
        # then export its metrics (from the src and dst vnf_interface)
        if kwargs.get("bidirectional") == True:
            ret3 = self.flow_mon(action, destination, metric, cookie)
            ret2 = self.flow_mon(action, source, metric, cookie)

        elif "rx" in metric:
            ret3 = self.flow_mon(action, destination, metric, cookie)
            ret2 = ""

        elif "tx" in metric:
            ret2 = self.flow_mon(action, source, metric, cookie)
            ret3 = ""

        return_value = "flow-entry:\n{0} \nflow-mon src:\n{1} \nflow-mon dst:\n{2}".format(ret1, ret2, ret3)
        return return_value

    def query(self, vnf_name, query, datacenter=None, **kwargs):
        vnf_name2 = parse_vnf_name(vnf_name)
        vnf_interface = parse_vnf_interface(vnf_name)

        if datacenter is None:
            datacenter = self._find_dc(vnf_name2)
        dc_label = datacenter
        query = query
        vnf_status = get("{0}/restapi/compute/{1}/{2}".format(self.url, dc_label, vnf_name2)).json()
        uuid = vnf_status["id"]
        query = query.replace("<uuid>", uuid)

        r = query_Prometheus(query)
        return r

    def profile(self, args):

        return "not yet fully implemented"

        nw_list = list()
        if args.get("network") is not None:
            nw_list = parse_network(args.get("network"))

        params = create_dict(
            network=nw_list,
            command=args.get("docker_command"),
            image=args.get("image"),
            input=args.get("input"),
            output=args.get("output"),
        )

        profiler_emu = profiler.Emu_Profiler(self.url)

        # deploy the test service chain
        vnf_name = parse_vnf_name(args.get("vnf_name"))
        dc_label = args.get("datacenter")
        profiler_emu.deploy_chain(dc_label, vnf_name, params)

        # generate output table
        for output in profiler_emu.generate():
            print(output + "\n")

    def _find_dc(self, vnf_name):
        datacenter = None
        vnf_list = get("{0}/restapi/compute".format(self.url)).json()
        for vnf in vnf_list:
            if vnf[0] == vnf_name:
                datacenter = vnf[1]["datacenter"]
        return datacenter

    # find the public ip address where we can log into the node
    def _find_public_ip(self, vnf_name):
        dc_label = self._find_dc(vnf_name)
        vnf_status = get("{0}/restapi/compute/{1}/{2}".format(self.url, dc_label, vnf_name)).json()
        return vnf_status["docker_network"]
Ejemplo n.º 9
0
    def start_nsd(self, file=None, **kwargs):
        self.grafana = Grafana()
        self.grafana.init_dashboard()
        self.grafana.parse_nsd(file)

        return "nsd metrics installed"