예제 #1
0
class NetworkServiceRFC3511(NetworkServiceBase):
    """Class handles RFC3511 Network service testing"""

    __scenario_type__ = "NSPerf-RFC3511"

    def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
        super(NetworkServiceRFC3511, self).__init__(scenario_cfg, context_cfg)

    def setup(self):
        """Setup infrastructure, provision VNFs"""
        self.map_topology_to_infrastructure()
        self.load_vnf_models()

        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
        try:
            for vnf in chain(traffic_runners, non_traffic_runners):
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
                LOG.info("Waiting for %s to instantiate", vnf.name)
                vnf.wait_for_instantiate()
        except:
            LOG.exception("")
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        self._generate_pod_yaml()

    def run(self, output):
        """ Run experiment

        :param output: scenario output to push results
        :return: None
        """

        self._fill_traffic_profile()

        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]

        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        self.collector = Collector(self.vnfs,
                                   context_base.Context.get_physical_nodes())
        self.collector.start()

        for traffic_gen in traffic_runners:
            LOG.info("Run traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)

        output.push(self.collector.get_kpi())

        self.collector.stop()
예제 #2
0
class NetworkServiceTestCase(base.Scenario):
    """Class handles Generic framework to do pre-deployment VNF &
       Network service testing  """

    __scenario_type__ = "NSPerf"

    def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
        super(NetworkServiceTestCase, self).__init__()
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg

        # fixme: create schema to validate all fields have been provided
        with open_relative_file(scenario_cfg["topology"],
                                scenario_cfg['task_path']) as stream:
            topology_yaml = yaml_load(stream)

        self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
        self.vnfs = []
        self.collector = None
        self.traffic_profile = None

    def _get_ip_flow_range(self, ip_start_range):

        node_name, range_or_interface = next(iter(ip_start_range.items()),
                                             (None, '0.0.0.0'))
        if node_name is not None:
            node = self.context_cfg["nodes"].get(node_name, {})
            try:
                # the ip_range is the interface name
                interface = node.get("interfaces", {})[range_or_interface]
            except KeyError:
                ip = "0.0.0.0"
                mask = "255.255.255.0"
            else:
                ip = interface["local_ip"]
                # we can't default these values, they must both exist to be valid
                mask = interface["netmask"]

            ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(
                ip, mask)),
                                          strict=False)
            hosts = list(ipaddr.hosts())
            if len(hosts) > 2:
                # skip the first host in case of gateway
                ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
            else:
                LOG.warning("Only single IP in range %s", ipaddr)
                # fall back to single IP range
                ip_addr_range = ip
        else:
            # we are manually specifying the range
            ip_addr_range = range_or_interface
        return ip_addr_range

    def _get_traffic_flow(self):
        flow = {}
        try:
            fflow = self.scenario_cfg["options"]["flow"]
            for index, src in enumerate(fflow.get("src_ip", [])):
                flow["src_ip{}".format(index)] = self._get_ip_flow_range(src)

            for index, dst in enumerate(fflow.get("dst_ip", [])):
                flow["dst_ip{}".format(index)] = self._get_ip_flow_range(dst)

            for index, publicip in enumerate(fflow.get("publicip", [])):
                flow["public_ip{}".format(index)] = publicip

            flow["count"] = fflow["count"]
        except KeyError:
            flow = {}
        return {"flow": flow}

    def _get_traffic_imix(self):
        try:
            imix = {"imix": self.scenario_cfg['options']['framesize']}
        except KeyError:
            imix = {}
        return imix

    def _get_traffic_profile(self):
        profile = self.scenario_cfg["traffic_profile"]
        path = self.scenario_cfg["task_path"]
        with open_relative_file(profile, path) as infile:
            return infile.read()

    def _fill_traffic_profile(self):
        traffic_mapping = self._get_traffic_profile()
        traffic_map_data = {
            'flow': self._get_traffic_flow(),
            'imix': self._get_traffic_imix(),
            'private': {},
            'public': {},
        }

        traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
        self.traffic_profile = TrafficProfile.get(traffic_vnfd)
        return self.traffic_profile

    def _find_vnf_name_from_id(self, vnf_id):
        return next((vnfd["vnfd-id-ref"]
                     for vnfd in self.topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    @staticmethod
    def get_vld_networks(networks):
        return {n['vld_id']: n for n in networks.values()}

    def _resolve_topology(self):
        for vld in self.topology["vld"]:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise IncorrectConfig("Topology file corrupted, "
                                      "wrong endpoint count for connection")

            node0_name = self._find_vnf_name_from_id(
                node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(
                node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            try:
                nodes = self.context_cfg["nodes"]
                node0_if = nodes[node0_name]["interfaces"][node0_if_name]
                node1_if = nodes[node1_name]["interfaces"][node1_if_name]

                # names so we can do reverse lookups
                node0_if["ifname"] = node0_if_name
                node1_if["ifname"] = node1_if_name

                node0_if["node_name"] = node0_name
                node1_if["node_name"] = node1_name

                vld_networks = self.get_vld_networks(
                    self.context_cfg["networks"])
                node0_if["vld_id"] = vld["id"]
                node1_if["vld_id"] = vld["id"]

                # set peer name
                node0_if["peer_name"] = node1_name
                node1_if["peer_name"] = node0_name

                # set peer interface name
                node0_if["peer_ifname"] = node1_if_name
                node1_if["peer_ifname"] = node0_if_name

                # just load the network
                node0_if["network"] = vld_networks.get(vld["id"], {})
                node1_if["network"] = vld_networks.get(vld["id"], {})

                node0_if["dst_mac"] = node1_if["local_mac"]
                node0_if["dst_ip"] = node1_if["local_ip"]

                node1_if["dst_mac"] = node0_if["local_mac"]
                node1_if["dst_ip"] = node0_if["local_ip"]

            except KeyError:
                LOG.exception("")
                raise IncorrectConfig("Required interface not found, "
                                      "topology file corrupted")

        for vld in self.topology['vld']:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise IncorrectConfig("Topology file corrupted, "
                                      "wrong endpoint count for connection")

            node0_name = self._find_vnf_name_from_id(
                node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(
                node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            nodes = self.context_cfg["nodes"]
            node0_if = nodes[node0_name]["interfaces"][node0_if_name]
            node1_if = nodes[node1_name]["interfaces"][node1_if_name]

            # add peer interface dict, but remove circular link
            # TODO: don't waste memory
            node0_copy = node0_if.copy()
            node1_copy = node1_if.copy()
            node0_if["peer_intf"] = node1_copy
            node1_if["peer_intf"] = node0_copy

    def _find_vnfd_from_vnf_idx(self, vnf_idx):
        return next((vnfd for vnfd in self.topology["constituent-vnfd"]
                     if vnf_idx == vnfd["member-vnf-index"]), None)

    def _update_context_with_topology(self):
        for vnfd in self.topology["constituent-vnfd"]:
            vnf_idx = vnfd["member-vnf-index"]
            vnf_name = self._find_vnf_name_from_id(vnf_idx)
            vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
            self.context_cfg["nodes"][vnf_name].update(vnfd)

    @staticmethod
    def _sort_dpdk_port_num(netdevs):
        # dpdk_port_num is PCI BUS ID ordering, lowest first
        s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
        for dpdk_port_num, netdev in enumerate(s):
            netdev['dpdk_port_num'] = dpdk_port_num

    def _probe_netdevs(self, node, node_dict):
        cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
        netdevs = {}
        with SshManager(node_dict) as conn:
            if conn:
                exit_status = conn.execute(cmd)[0]
                if exit_status != 0:
                    raise IncorrectSetup("Node's %s lacks ip tool." % node)
                exit_status, stdout, _ = conn.execute(
                    self.FIND_NETDEVICE_STRING)
                if exit_status != 0:
                    raise IncorrectSetup("Cannot find netdev info in sysfs" %
                                         node)
                netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
        return netdevs

    @classmethod
    def _probe_missing_values(cls, netdevs, network):

        mac_lower = network['local_mac'].lower()
        for netdev in netdevs.values():
            if netdev['address'].lower() != mac_lower:
                continue
            network.update({
                'driver': netdev['driver'],
                'vpci': netdev['pci_bus_id'],
                'ifindex': netdev['ifindex'],
            })

    TOPOLOGY_REQUIRED_KEYS = frozenset(
        {"vpci", "local_ip", "netmask", "local_mac", "driver"})

    def map_topology_to_infrastructure(self):
        """ This method should verify if the available resources defined in pod.yaml
        match the topology.yaml file.

        :return: None. Side effect: context_cfg is updated
        """
        for node, node_dict in self.context_cfg["nodes"].items():

            for network in node_dict["interfaces"].values():
                missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
                if not missing:
                    continue

                # only ssh probe if there are missing values
                # ssh probe won't work on Ixia, so we had better define all our values
                try:
                    netdevs = self._probe_netdevs(node, node_dict)
                except (SSHError, SSHTimeout):
                    raise IncorrectConfig(
                        "Unable to probe missing interface fields '%s', on node %s "
                        "SSH Error" % (', '.join(missing), node))
                try:
                    self._probe_missing_values(netdevs, network)
                except KeyError:
                    pass
                else:
                    missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
                if missing:
                    raise IncorrectConfig(
                        "Require interface fields '%s' not found, topology file "
                        "corrupted" % ', '.join(missing))

        # 3. Use topology file to find connections & resolve dest address
        self._resolve_topology()
        self._update_context_with_topology()

    FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
$1/device/subsystem_vendor $1/device/subsystem_device ; \
printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
' sh  \{\}/* \;
"""
    BASE_ADAPTER_RE = re.compile(
        '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)

    @classmethod
    def parse_netdev_info(cls, stdout):
        network_devices = defaultdict(dict)
        matches = cls.BASE_ADAPTER_RE.findall(stdout)
        for bus_path, interface_name, name, value in matches:
            dirname, bus_id = os.path.split(bus_path)
            if 'virtio' in bus_id:
                # for some stupid reason VMs include virtio1/
                # in PCI device path
                bus_id = os.path.basename(dirname)
            # remove extra 'device/' from 'device/vendor,
            # device/subsystem_vendor', etc.
            if 'device/' in name:
                name = name.split('/')[1]
            network_devices[interface_name][name] = value
            network_devices[interface_name]['interface_name'] = interface_name
            network_devices[interface_name]['pci_bus_id'] = bus_id
        # convert back to regular dict
        return dict(network_devices)

    @classmethod
    def get_vnf_impl(cls, vnf_model_id):
        """ Find the implementing class from vnf_model["vnf"]["name"] field

        :param vnf_model_id: parsed vnfd model ID field
        :return: subclass of GenericVNF
        """
        import_modules_from_package(
            "yardstick.network_services.vnf_generic.vnf")
        expected_name = vnf_model_id
        classes_found = []

        def impl():
            for name, class_ in ((c.__name__, c)
                                 for c in itersubclasses(GenericVNF)):
                if name == expected_name:
                    yield class_
                classes_found.append(name)

        try:
            return next(impl())
        except StopIteration:
            pass

        raise IncorrectConfig("No implementation for %s found in %s" %
                              (expected_name, classes_found))

    @staticmethod
    def update_interfaces_from_node(vnfd, node):
        for intf in vnfd["vdu"][0]["external-interface"]:
            node_intf = node['interfaces'][intf['name']]
            intf['virtual-interface'].update(node_intf)

    def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
        """ Create VNF objects based on YAML descriptors

        :param scenario_cfg:
        :type scenario_cfg:
        :param context_cfg:
        :return:
        """
        trex_lib_path = get_nsb_option('trex_client_lib')
        sys.path[:] = list(
            chain([trex_lib_path],
                  (x for x in sys.path if x != trex_lib_path)))

        if scenario_cfg is None:
            scenario_cfg = self.scenario_cfg

        if context_cfg is None:
            context_cfg = self.context_cfg

        vnfs = []
        # we assume OrderedDict for consistenct in instantiation
        for node_name, node in context_cfg["nodes"].items():
            LOG.debug(node)
            file_name = node["VNF model"]
            file_path = scenario_cfg['task_path']
            with open_relative_file(file_name, file_path) as stream:
                vnf_model = stream.read()
            vnfd = vnfdgen.generate_vnfd(vnf_model, node)
            # TODO: here add extra context_cfg["nodes"] regardless of template
            vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
            self.update_interfaces_from_node(vnfd, node)
            vnf_impl = self.get_vnf_impl(vnfd['id'])
            vnf_instance = vnf_impl(node_name, vnfd)
            vnfs.append(vnf_instance)

        self.vnfs = vnfs
        return vnfs

    def setup(self):
        """ Setup infrastructure, provission VNFs & start traffic

        :return:
        """
        # 1. Verify if infrastructure mapping can meet topology
        self.map_topology_to_infrastructure()
        # 1a. Load VNF models
        self.load_vnf_models()
        # 1b. Fill traffic profile with information from topology
        self._fill_traffic_profile()

        # 2. Provision VNFs

        # link events will cause VNF application to exit
        # so we should start traffic runners before VNFs
        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        non_traffic_runners = [
            vnf for vnf in self.vnfs if not vnf.runs_traffic
        ]
        try:
            for vnf in chain(traffic_runners, non_traffic_runners):
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
                LOG.info("Waiting for %s to instantiate", vnf.name)
                vnf.wait_for_instantiate()
        except RuntimeError:
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        # 3. Run experiment
        # Start listeners first to avoid losing packets
        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        # register collector with yardstick for KPI collection.
        self.collector = Collector(self.vnfs, self.traffic_profile)
        self.collector.start()

        # Start the actual traffic
        for traffic_gen in traffic_runners:
            LOG.info("Starting traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)

    def run(self, result):  # yardstick API
        """ Yardstick calls run() at intervals defined in the yaml and
            produces timestamped samples

        :param result: dictionary with results to update
        :return: None
        """

        for vnf in self.vnfs:
            # Result example:
            # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
            LOG.debug("collect KPI for %s", vnf.name)
            result.update(self.collector.get_kpi(vnf))

    def teardown(self):
        """ Stop the collector and terminate VNF & TG instance

        :return
        """

        self.collector.stop()
        for vnf in self.vnfs:
            LOG.info("Stopping %s", vnf.name)
            vnf.terminate()
예제 #3
0
class NetworkServiceTestCase(scenario_base.Scenario):
    """Class handles Generic framework to do pre-deployment VNF &
       Network service testing  """

    __scenario_type__ = "NSPerf"

    def __init__(self, scenario_cfg, context_cfg):  # pragma: no cover
        super(NetworkServiceTestCase, self).__init__()
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg

        self._render_topology()
        self.vnfs = []
        self.collector = None
        self.traffic_profile = None
        self.node_netdevs = {}
        self.bin_path = get_nsb_option('bin_path', '')
        self._mq_ids = []

    def _get_ip_flow_range(self, ip_start_range):
        """Retrieve a CIDR first and last viable IPs

        :param ip_start_range: could be the IP range itself or a dictionary
               with the host name and the port.
        :return: (str) IP range (min, max) with this format "x.x.x.x-y.y.y.y"
        """
        if isinstance(ip_start_range, six.string_types):
            return ip_start_range

        node_name, range_or_interface = next(iter(ip_start_range.items()),
                                             (None, '0.0.0.0'))
        if node_name is None:
            return range_or_interface

        node = self.context_cfg['nodes'].get(node_name, {})
        interface = node.get('interfaces', {}).get(range_or_interface)
        if interface:
            ip = interface['local_ip']
            mask = interface['netmask']
        else:
            ip = '0.0.0.0'
            mask = '255.255.255.0'

        ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)),
                                      strict=False)
        if ipaddr.prefixlen + 2 < ipaddr.max_prefixlen:
            ip_addr_range = '{}-{}'.format(ipaddr[2], ipaddr[-2])
        else:
            LOG.warning('Only single IP in range %s', ipaddr)
            ip_addr_range = ip
        return ip_addr_range

    def _get_traffic_flow(self):
        flow = {}
        try:
            # TODO: should be .0  or .1 so we can use list
            # but this also roughly matches uplink_0, downlink_0
            fflow = self.scenario_cfg["options"]["flow"]
            for index, src in enumerate(fflow.get("src_ip", [])):
                flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)

            for index, dst in enumerate(fflow.get("dst_ip", [])):
                flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)

            for index, publicip in enumerate(fflow.get("public_ip", [])):
                flow["public_ip_{}".format(index)] = publicip

            for index, src_port in enumerate(fflow.get("src_port", [])):
                flow["src_port_{}".format(index)] = src_port

            for index, dst_port in enumerate(fflow.get("dst_port", [])):
                flow["dst_port_{}".format(index)] = dst_port

            if "count" in fflow:
                flow["count"] = fflow["count"]

            if "seed" in fflow:
                flow["seed"] = fflow["seed"]

        except KeyError:
            flow = {}
        return {"flow": flow}

    def _get_traffic_imix(self):
        try:
            imix = {"imix": self.scenario_cfg['options']['framesize']}
        except KeyError:
            imix = {}
        return imix

    def _get_traffic_profile(self):
        profile = self.scenario_cfg["traffic_profile"]
        path = self.scenario_cfg["task_path"]
        with utils.open_relative_file(profile, path) as infile:
            return infile.read()

    def _get_duration(self):
        options = self.scenario_cfg.get('options', {})
        return options.get('duration',
                           tprofile_base.TrafficProfileConfig.DEFAULT_DURATION)

    def _fill_traffic_profile(self):
        tprofile = self._get_traffic_profile()
        extra_args = self.scenario_cfg.get('extra_args', {})
        tprofile_data = {
            'flow': self._get_traffic_flow(),
            'imix': self._get_traffic_imix(),
            tprofile_base.TrafficProfile.UPLINK: {},
            tprofile_base.TrafficProfile.DOWNLINK: {},
            'extra_args': extra_args,
            'duration': self._get_duration()
        }
        traffic_vnfd = vnfdgen.generate_vnfd(tprofile, tprofile_data)
        self.traffic_profile = tprofile_base.TrafficProfile.get(traffic_vnfd)

    def _get_topology(self):
        topology = self.scenario_cfg["topology"]
        path = self.scenario_cfg["task_path"]
        with utils.open_relative_file(topology, path) as infile:
            return infile.read()

    def _render_topology(self):
        topology = self._get_topology()
        topology_args = self.scenario_cfg.get('extra_args', {})
        topolgy_data = {'extra_args': topology_args}
        topology_yaml = vnfdgen.generate_vnfd(topology, topolgy_data)
        self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]

    def _find_vnf_name_from_id(self, vnf_id):  # pragma: no cover
        return next((vnfd["vnfd-id-ref"]
                     for vnfd in self.topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    def _find_vnfd_from_vnf_idx(self, vnf_id):  # pragma: no cover
        return next((vnfd for vnfd in self.topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    @staticmethod
    def find_node_if(nodes, name, if_name, vld_id):  # pragma: no cover
        try:
            # check for xe0, xe1
            intf = nodes[name]["interfaces"][if_name]
        except KeyError:
            # if not xe0, then maybe vld_id,  uplink_0, downlink_0
            # pop it and re-insert with the correct name from topology
            intf = nodes[name]["interfaces"].pop(vld_id)
            nodes[name]["interfaces"][if_name] = intf
        return intf

    def _resolve_topology(self):
        for vld in self.topology["vld"]:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise exceptions.IncorrectConfig(
                    error_msg='Topology file corrupted, wrong endpoint count '
                    'for connection')

            node0_name = self._find_vnf_name_from_id(
                node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(
                node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            try:
                nodes = self.context_cfg["nodes"]
                node0_if = self.find_node_if(nodes, node0_name, node0_if_name,
                                             vld["id"])
                node1_if = self.find_node_if(nodes, node1_name, node1_if_name,
                                             vld["id"])

                # names so we can do reverse lookups
                node0_if["ifname"] = node0_if_name
                node1_if["ifname"] = node1_if_name

                node0_if["node_name"] = node0_name
                node1_if["node_name"] = node1_name

                node0_if["vld_id"] = vld["id"]
                node1_if["vld_id"] = vld["id"]

                # set peer name
                node0_if["peer_name"] = node1_name
                node1_if["peer_name"] = node0_name

                # set peer interface name
                node0_if["peer_ifname"] = node1_if_name
                node1_if["peer_ifname"] = node0_if_name

                # just load the network
                vld_networks = {
                    n.get('vld_id', name): n
                    for name, n in self.context_cfg["networks"].items()
                }

                node0_if["network"] = vld_networks.get(vld["id"], {})
                node1_if["network"] = vld_networks.get(vld["id"], {})

                node0_if["dst_mac"] = node1_if["local_mac"]
                node0_if["dst_ip"] = node1_if["local_ip"]

                node1_if["dst_mac"] = node0_if["local_mac"]
                node1_if["dst_ip"] = node0_if["local_ip"]

            except KeyError:
                LOG.exception("")
                raise exceptions.IncorrectConfig(
                    error_msg='Required interface not found, topology file '
                    'corrupted')

        for vld in self.topology['vld']:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise exceptions.IncorrectConfig(
                    error_msg='Topology file corrupted, wrong endpoint count '
                    'for connection')

            node0_name = self._find_vnf_name_from_id(
                node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(
                node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            nodes = self.context_cfg["nodes"]
            node0_if = self.find_node_if(nodes, node0_name, node0_if_name,
                                         vld["id"])
            node1_if = self.find_node_if(nodes, node1_name, node1_if_name,
                                         vld["id"])

            # add peer interface dict, but remove circular link
            # TODO: don't waste memory
            node0_copy = node0_if.copy()
            node1_copy = node1_if.copy()
            node0_if["peer_intf"] = node1_copy
            node1_if["peer_intf"] = node0_copy

    def _update_context_with_topology(self):  # pragma: no cover
        for vnfd in self.topology["constituent-vnfd"]:
            vnf_idx = vnfd["member-vnf-index"]
            vnf_name = self._find_vnf_name_from_id(vnf_idx)
            vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
            self.context_cfg["nodes"][vnf_name].update(vnfd)

    def _generate_pod_yaml(self):  # pragma: no cover
        context_yaml = os.path.join(
            LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
        # convert OrderedDict to a list
        # pod.yaml nodes is a list
        nodes = [
            self._serialize_node(node)
            for node in self.context_cfg["nodes"].values()
        ]
        pod_dict = {"nodes": nodes, "networks": self.context_cfg["networks"]}
        with open(context_yaml, "w") as context_out:
            yaml.safe_dump(pod_dict,
                           context_out,
                           default_flow_style=False,
                           explicit_start=True)

    @staticmethod
    def _serialize_node(node):  # pragma: no cover
        new_node = copy.deepcopy(node)
        # name field is required
        # remove context suffix
        new_node["name"] = node['name'].split('.')[0]
        try:
            new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
        except KeyError:
            pass
        return new_node

    def map_topology_to_infrastructure(self):
        """ This method should verify if the available resources defined in pod.yaml
        match the topology.yaml file.

        :return: None. Side effect: context_cfg is updated
        """
        # 3. Use topology file to find connections & resolve dest address
        self._resolve_topology()
        self._update_context_with_topology()

    @classmethod
    def get_vnf_impl(cls, vnf_model_id):  # pragma: no cover
        """ Find the implementing class from vnf_model["vnf"]["name"] field

        :param vnf_model_id: parsed vnfd model ID field
        :return: subclass of GenericVNF
        """
        utils.import_modules_from_package(
            "yardstick.network_services.vnf_generic.vnf")
        expected_name = vnf_model_id
        classes_found = []

        def impl():
            for name, class_ in ((c.__name__, c)
                                 for c in utils.itersubclasses(GenericVNF)):
                if name == expected_name:
                    yield class_
                classes_found.append(name)

        try:
            return next(impl())
        except StopIteration:
            pass

        message = ('No implementation for %s found in %s' %
                   (expected_name, classes_found))
        raise exceptions.IncorrectConfig(error_msg=message)

    @staticmethod
    def create_interfaces_from_node(vnfd, node):  # pragma: no cover
        ext_intfs = vnfd["vdu"][0]["external-interface"] = []
        # have to sort so xe0 goes first
        for intf_name, intf in sorted(node['interfaces'].items()):
            # only interfaces with vld_id are added.
            # Thus there are two layers of filters, only intefaces with vld_id
            # show up in interfaces, and only interfaces with traffic profiles
            # are used by the generators
            if intf.get('vld_id'):
                # force dpkd_port_num to int so we can do reverse lookup
                try:
                    intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
                except KeyError:
                    pass
                ext_intf = {
                    "name": intf_name,
                    "virtual-interface": intf,
                    "vnfd-connection-point-ref": intf_name,
                }
                ext_intfs.append(ext_intf)

    def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
        """ Create VNF objects based on YAML descriptors

        :param scenario_cfg:
        :type scenario_cfg:
        :param context_cfg:
        :return:
        """
        trex_lib_path = get_nsb_option('trex_client_lib')
        sys.path[:] = list(
            chain([trex_lib_path],
                  (x for x in sys.path if x != trex_lib_path)))

        if scenario_cfg is None:
            scenario_cfg = self.scenario_cfg

        if context_cfg is None:
            context_cfg = self.context_cfg

        vnfs = []
        # we assume OrderedDict for consistency in instantiation
        for node_name, node in context_cfg["nodes"].items():
            LOG.debug(node)
            try:
                file_name = node["VNF model"]
            except KeyError:
                LOG.debug("no model for %s, skipping", node_name)
                continue
            file_path = scenario_cfg['task_path']
            with utils.open_relative_file(file_name, file_path) as stream:
                vnf_model = stream.read()
            vnfd = vnfdgen.generate_vnfd(vnf_model, node)
            # TODO: here add extra context_cfg["nodes"] regardless of template
            vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
            # force inject pkey if it exists
            # we want to standardize Heat using pkey as a string so we don't rely
            # on the filesystem
            try:
                vnfd['mgmt-interface']['pkey'] = node['pkey']
            except KeyError:
                pass
            self.create_interfaces_from_node(vnfd, node)
            vnf_impl = self.get_vnf_impl(vnfd['id'])
            vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id'])
            vnfs.append(vnf_instance)

        self.vnfs = vnfs
        return vnfs

    def setup(self):
        """Setup infrastructure, provission VNFs & start traffic"""
        # 1. Verify if infrastructure mapping can meet topology
        self.map_topology_to_infrastructure()
        # 1a. Load VNF models
        self.load_vnf_models()
        # 1b. Fill traffic profile with information from topology
        self._fill_traffic_profile()

        # 2. Provision VNFs

        # link events will cause VNF application to exit
        # so we should start traffic runners before VNFs
        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        non_traffic_runners = [
            vnf for vnf in self.vnfs if not vnf.runs_traffic
        ]
        try:
            for vnf in chain(traffic_runners, non_traffic_runners):
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
                LOG.info("Waiting for %s to instantiate", vnf.name)
                vnf.wait_for_instantiate()
        except:
            LOG.exception("")
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        # we have to generate pod.yaml here after VNF has probed so we know vpci and driver
        self._generate_pod_yaml()

        # 3. Run experiment
        # Start listeners first to avoid losing packets
        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        # register collector with yardstick for KPI collection.
        self.collector = Collector(self.vnfs,
                                   context_base.Context.get_physical_nodes())
        self.collector.start()

        # Start the actual traffic
        for traffic_gen in traffic_runners:
            LOG.info("Starting traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)
            self._mq_ids.append(traffic_gen.get_mq_producer_id())

    def get_mq_ids(self):  # pragma: no cover
        """Return stored MQ producer IDs"""
        return self._mq_ids

    def run(self, result):  # yardstick API
        """ Yardstick calls run() at intervals defined in the yaml and
            produces timestamped samples

        :param result: dictionary with results to update
        :return: None
        """

        # this is the only method that is check from the runner
        # so if we have any fatal error it must be raised via these methods
        # otherwise we will not terminate

        result.update(self.collector.get_kpi())

    def teardown(self):
        """ Stop the collector and terminate VNF & TG instance

        :return
        """

        try:
            try:
                self.collector.stop()
                for vnf in self.vnfs:
                    LOG.info("Stopping %s", vnf.name)
                    vnf.terminate()
                LOG.debug("all VNFs terminated: %s",
                          ", ".join(vnf.name for vnf in self.vnfs))
            finally:
                terminate_children()
        except Exception:
            # catch any exception in teardown and convert to simple exception
            # never pass exceptions back to multiprocessing, because some exceptions can
            # be unpicklable
            # https://bugs.python.org/issue9400
            LOG.exception("")
            raise RuntimeError("Error in teardown")

    def pre_run_wait_time(self, time_seconds):  # pragma: no cover
        """Time waited before executing the run method"""
        time.sleep(time_seconds)

    def post_run_wait_time(self, time_seconds):  # pragma: no cover
        """Time waited after executing the run method"""
        pass
예제 #4
0
class NetworkServiceTestCase(base.Scenario):
    """Class handles Generic framework to do pre-deployment VNF &
       Network service testing  """

    __scenario_type__ = "NSPerf"

    def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
        super(NetworkServiceTestCase, self).__init__()
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg

        # fixme: create schema to validate all fields have been provided
        with open_relative_file(scenario_cfg["topology"],
                                scenario_cfg['task_path']) as stream:
            topology_yaml = yaml_load(stream)

        self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
        self.vnfs = []
        self.collector = None
        self.traffic_profile = None
        self.node_netdevs = {}

    def _get_ip_flow_range(self, ip_start_range):

        # IP range is specified as 'x.x.x.x-y.y.y.y'
        if isinstance(ip_start_range, six.string_types):
            return ip_start_range

        node_name, range_or_interface = next(iter(ip_start_range.items()), (None, '0.0.0.0'))
        if node_name is None:
            # we are manually specifying the range
            ip_addr_range = range_or_interface
        else:
            node = self.context_cfg["nodes"].get(node_name, {})
            try:
                # the ip_range is the interface name
                interface = node.get("interfaces", {})[range_or_interface]
            except KeyError:
                ip = "0.0.0.0"
                mask = "255.255.255.0"
            else:
                ip = interface["local_ip"]
                # we can't default these values, they must both exist to be valid
                mask = interface["netmask"]

            ipaddr = ipaddress.ip_network(six.text_type('{}/{}'.format(ip, mask)), strict=False)
            hosts = list(ipaddr.hosts())
            if len(hosts) > 2:
                # skip the first host in case of gateway
                ip_addr_range = "{}-{}".format(hosts[1], hosts[-1])
            else:
                LOG.warning("Only single IP in range %s", ipaddr)
                # fall back to single IP range
                ip_addr_range = ip
        return ip_addr_range

    def _get_traffic_flow(self):
        flow = {}
        try:
            # TODO: should be .0  or .1 so we can use list
            # but this also roughly matches uplink_0, downlink_0
            fflow = self.scenario_cfg["options"]["flow"]
            for index, src in enumerate(fflow.get("src_ip", [])):
                flow["src_ip_{}".format(index)] = self._get_ip_flow_range(src)

            for index, dst in enumerate(fflow.get("dst_ip", [])):
                flow["dst_ip_{}".format(index)] = self._get_ip_flow_range(dst)

            for index, publicip in enumerate(fflow.get("public_ip", [])):
                flow["public_ip_{}".format(index)] = publicip

            for index, src_port in enumerate(fflow.get("src_port", [])):
                flow["src_port_{}".format(index)] = src_port

            for index, dst_port in enumerate(fflow.get("dst_port", [])):
                flow["dst_port_{}".format(index)] = dst_port

            flow["count"] = fflow["count"]
        except KeyError:
            flow = {}
        return {"flow": flow}

    def _get_traffic_imix(self):
        try:
            imix = {"imix": self.scenario_cfg['options']['framesize']}
        except KeyError:
            imix = {}
        return imix

    def _get_traffic_profile(self):
        profile = self.scenario_cfg["traffic_profile"]
        path = self.scenario_cfg["task_path"]
        with open_relative_file(profile, path) as infile:
            return infile.read()

    def _fill_traffic_profile(self):
        traffic_mapping = self._get_traffic_profile()
        traffic_map_data = {
            'flow': self._get_traffic_flow(),
            'imix': self._get_traffic_imix(),
            TrafficProfile.UPLINK: {},
            TrafficProfile.DOWNLINK: {},
        }

        traffic_vnfd = vnfdgen.generate_vnfd(traffic_mapping, traffic_map_data)
        self.traffic_profile = TrafficProfile.get(traffic_vnfd)
        return self.traffic_profile

    def _find_vnf_name_from_id(self, vnf_id):
        return next((vnfd["vnfd-id-ref"]
                     for vnfd in self.topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    @staticmethod
    def get_vld_networks(networks):
        # network name is vld_id
        vld_map = {}
        for name, n in networks.items():
            try:
                vld_map[n['vld_id']] = n
            except KeyError:
                vld_map[name] = n
        return vld_map

    @staticmethod
    def find_node_if(nodes, name, if_name, vld_id):
        try:
            # check for xe0, xe1
            intf = nodes[name]["interfaces"][if_name]
        except KeyError:
            # if not xe0, then maybe vld_id,  uplink_0, downlink_0
            # pop it and re-insert with the correct name from topology
            intf = nodes[name]["interfaces"].pop(vld_id)
            nodes[name]["interfaces"][if_name] = intf
        return intf

    def _resolve_topology(self):
        for vld in self.topology["vld"]:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise IncorrectConfig("Topology file corrupted, "
                                      "wrong endpoint count for connection")

            node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            try:
                nodes = self.context_cfg["nodes"]
                node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
                node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])

                # names so we can do reverse lookups
                node0_if["ifname"] = node0_if_name
                node1_if["ifname"] = node1_if_name

                node0_if["node_name"] = node0_name
                node1_if["node_name"] = node1_name

                node0_if["vld_id"] = vld["id"]
                node1_if["vld_id"] = vld["id"]

                # set peer name
                node0_if["peer_name"] = node1_name
                node1_if["peer_name"] = node0_name

                # set peer interface name
                node0_if["peer_ifname"] = node1_if_name
                node1_if["peer_ifname"] = node0_if_name

                # just load the network
                vld_networks = self.get_vld_networks(self.context_cfg["networks"])
                node0_if["network"] = vld_networks.get(vld["id"], {})
                node1_if["network"] = vld_networks.get(vld["id"], {})

                node0_if["dst_mac"] = node1_if["local_mac"]
                node0_if["dst_ip"] = node1_if["local_ip"]

                node1_if["dst_mac"] = node0_if["local_mac"]
                node1_if["dst_ip"] = node0_if["local_ip"]

            except KeyError:
                LOG.exception("")
                raise IncorrectConfig("Required interface not found, "
                                      "topology file corrupted")

        for vld in self.topology['vld']:
            try:
                node0_data, node1_data = vld["vnfd-connection-point-ref"]
            except (ValueError, TypeError):
                raise IncorrectConfig("Topology file corrupted, "
                                      "wrong endpoint count for connection")

            node0_name = self._find_vnf_name_from_id(node0_data["member-vnf-index-ref"])
            node1_name = self._find_vnf_name_from_id(node1_data["member-vnf-index-ref"])

            node0_if_name = node0_data["vnfd-connection-point-ref"]
            node1_if_name = node1_data["vnfd-connection-point-ref"]

            nodes = self.context_cfg["nodes"]
            node0_if = self.find_node_if(nodes, node0_name, node0_if_name, vld["id"])
            node1_if = self.find_node_if(nodes, node1_name, node1_if_name, vld["id"])

            # add peer interface dict, but remove circular link
            # TODO: don't waste memory
            node0_copy = node0_if.copy()
            node1_copy = node1_if.copy()
            node0_if["peer_intf"] = node1_copy
            node1_if["peer_intf"] = node0_copy

    def _find_vnfd_from_vnf_idx(self, vnf_idx):
        return next((vnfd for vnfd in self.topology["constituent-vnfd"]
                     if vnf_idx == vnfd["member-vnf-index"]), None)

    def _update_context_with_topology(self):
        for vnfd in self.topology["constituent-vnfd"]:
            vnf_idx = vnfd["member-vnf-index"]
            vnf_name = self._find_vnf_name_from_id(vnf_idx)
            vnfd = self._find_vnfd_from_vnf_idx(vnf_idx)
            self.context_cfg["nodes"][vnf_name].update(vnfd)

    def _probe_netdevs(self, node, node_dict, timeout=120):
        try:
            return self.node_netdevs[node]
        except KeyError:
            pass

        netdevs = {}
        cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"

        with SshManager(node_dict, timeout=timeout) as conn:
            if conn:
                exit_status = conn.execute(cmd)[0]
                if exit_status != 0:
                    raise IncorrectSetup("Node's %s lacks ip tool." % node)
                exit_status, stdout, _ = conn.execute(
                    self.FIND_NETDEVICE_STRING)
                if exit_status != 0:
                    raise IncorrectSetup(
                        "Cannot find netdev info in sysfs" % node)
                netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)

        self.node_netdevs[node] = netdevs
        return netdevs

    @classmethod
    def _probe_missing_values(cls, netdevs, network):

        mac_lower = network['local_mac'].lower()
        for netdev in netdevs.values():
            if netdev['address'].lower() != mac_lower:
                continue
            network.update({
                'driver': netdev['driver'],
                'vpci': netdev['pci_bus_id'],
                'ifindex': netdev['ifindex'],
            })

    def _generate_pod_yaml(self):
        context_yaml = os.path.join(LOG_DIR, "pod-{}.yaml".format(self.scenario_cfg['task_id']))
        # convert OrderedDict to a list
        # pod.yaml nodes is a list
        nodes = [self._serialize_node(node) for node in self.context_cfg["nodes"].values()]
        pod_dict = {
            "nodes": nodes,
            "networks": self.context_cfg["networks"]
        }
        with open(context_yaml, "w") as context_out:
            yaml.safe_dump(pod_dict, context_out, default_flow_style=False,
                           explicit_start=True)

    @staticmethod
    def _serialize_node(node):
        new_node = copy.deepcopy(node)
        # name field is required
        # remove context suffix
        new_node["name"] = node['name'].split('.')[0]
        try:
            new_node["pkey"] = ssh.convert_key_to_str(node["pkey"])
        except KeyError:
            pass
        return new_node

    TOPOLOGY_REQUIRED_KEYS = frozenset({
        "vpci", "local_ip", "netmask", "local_mac", "driver"})

    def map_topology_to_infrastructure(self):
        """ This method should verify if the available resources defined in pod.yaml
        match the topology.yaml file.

        :return: None. Side effect: context_cfg is updated
        """
        num_nodes = len(self.context_cfg["nodes"])
        # OpenStack instance creation time is probably proportional to the number
        # of instances
        timeout = 120 * num_nodes
        for node, node_dict in self.context_cfg["nodes"].items():

            for network in node_dict["interfaces"].values():
                missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
                if not missing:
                    continue

                # only ssh probe if there are missing values
                # ssh probe won't work on Ixia, so we had better define all our values
                try:
                    netdevs = self._probe_netdevs(node, node_dict, timeout=timeout)
                except (SSHError, SSHTimeout):
                    raise IncorrectConfig(
                        "Unable to probe missing interface fields '%s', on node %s "
                        "SSH Error" % (', '.join(missing), node))
                try:
                    self._probe_missing_values(netdevs, network)
                except KeyError:
                    pass
                else:
                    missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
                        network)
                if missing:
                    raise IncorrectConfig(
                        "Require interface fields '%s' not found, topology file "
                        "corrupted" % ', '.join(missing))

        # we have to generate pod.yaml here so we have vpci and driver
        self._generate_pod_yaml()
        # 3. Use topology file to find connections & resolve dest address
        self._resolve_topology()
        self._update_context_with_topology()

    FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
$1/device/subsystem_vendor $1/device/subsystem_device ; \
printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
' sh  \{\}/* \;
"""
    BASE_ADAPTER_RE = re.compile(
        '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)

    @classmethod
    def parse_netdev_info(cls, stdout):
        network_devices = defaultdict(dict)
        matches = cls.BASE_ADAPTER_RE.findall(stdout)
        for bus_path, interface_name, name, value in matches:
            dirname, bus_id = os.path.split(bus_path)
            if 'virtio' in bus_id:
                # for some stupid reason VMs include virtio1/
                # in PCI device path
                bus_id = os.path.basename(dirname)
            # remove extra 'device/' from 'device/vendor,
            # device/subsystem_vendor', etc.
            if 'device/' in name:
                name = name.split('/')[1]
            network_devices[interface_name][name] = value
            network_devices[interface_name][
                'interface_name'] = interface_name
            network_devices[interface_name]['pci_bus_id'] = bus_id
        # convert back to regular dict
        return dict(network_devices)

    @classmethod
    def get_vnf_impl(cls, vnf_model_id):
        """ Find the implementing class from vnf_model["vnf"]["name"] field

        :param vnf_model_id: parsed vnfd model ID field
        :return: subclass of GenericVNF
        """
        import_modules_from_package(
            "yardstick.network_services.vnf_generic.vnf")
        expected_name = vnf_model_id
        classes_found = []

        def impl():
            for name, class_ in ((c.__name__, c) for c in itersubclasses(GenericVNF)):
                if name == expected_name:
                    yield class_
                classes_found.append(name)

        try:
            return next(impl())
        except StopIteration:
            pass

        raise IncorrectConfig("No implementation for %s found in %s" %
                              (expected_name, classes_found))

    @staticmethod
    def create_interfaces_from_node(vnfd, node):
        ext_intfs = vnfd["vdu"][0]["external-interface"] = []
        # have to sort so xe0 goes first
        for intf_name, intf in sorted(node['interfaces'].items()):
            # only interfaces with vld_id are added.
            # Thus there are two layers of filters, only intefaces with vld_id
            # show up in interfaces, and only interfaces with traffic profiles
            # are used by the generators
            if intf.get('vld_id'):
                # force dpkd_port_num to int so we can do reverse lookup
                try:
                    intf['dpdk_port_num'] = int(intf['dpdk_port_num'])
                except KeyError:
                    pass
                ext_intf = {
                    "name": intf_name,
                    "virtual-interface": intf,
                    "vnfd-connection-point-ref": intf_name,
                }
                ext_intfs.append(ext_intf)

    def load_vnf_models(self, scenario_cfg=None, context_cfg=None):
        """ Create VNF objects based on YAML descriptors

        :param scenario_cfg:
        :type scenario_cfg:
        :param context_cfg:
        :return:
        """
        trex_lib_path = get_nsb_option('trex_client_lib')
        sys.path[:] = list(chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path)))

        if scenario_cfg is None:
            scenario_cfg = self.scenario_cfg

        if context_cfg is None:
            context_cfg = self.context_cfg

        vnfs = []
        # we assume OrderedDict for consistenct in instantiation
        for node_name, node in context_cfg["nodes"].items():
            LOG.debug(node)
            try:
                file_name = node["VNF model"]
            except KeyError:
                LOG.debug("no model for %s, skipping", node_name)
                continue
            file_path = scenario_cfg['task_path']
            with open_relative_file(file_name, file_path) as stream:
                vnf_model = stream.read()
            vnfd = vnfdgen.generate_vnfd(vnf_model, node)
            # TODO: here add extra context_cfg["nodes"] regardless of template
            vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0]
            # force inject pkey if it exists
            # we want to standardize Heat using pkey as a string so we don't rely
            # on the filesystem
            try:
                vnfd['mgmt-interface']['pkey'] = node['pkey']
            except KeyError:
                pass
            self.create_interfaces_from_node(vnfd, node)
            vnf_impl = self.get_vnf_impl(vnfd['id'])
            vnf_instance = vnf_impl(node_name, vnfd)
            vnfs.append(vnf_instance)

        self.vnfs = vnfs
        return vnfs

    def setup(self):
        """ Setup infrastructure, provission VNFs & start traffic

        :return:
        """
        # 1. Verify if infrastructure mapping can meet topology
        self.map_topology_to_infrastructure()
        # 1a. Load VNF models
        self.load_vnf_models()
        # 1b. Fill traffic profile with information from topology
        self._fill_traffic_profile()

        # 2. Provision VNFs

        # link events will cause VNF application to exit
        # so we should start traffic runners before VNFs
        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        non_traffic_runners = [vnf for vnf in self.vnfs if not vnf.runs_traffic]
        try:
            for vnf in chain(traffic_runners, non_traffic_runners):
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
                LOG.info("Waiting for %s to instantiate", vnf.name)
                vnf.wait_for_instantiate()
        except:
            LOG.exception("")
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        # 3. Run experiment
        # Start listeners first to avoid losing packets
        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        # register collector with yardstick for KPI collection.
        self.collector = Collector(self.vnfs, self.context_cfg["nodes"], self.traffic_profile)
        self.collector.start()

        # Start the actual traffic
        for traffic_gen in traffic_runners:
            LOG.info("Starting traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)

    def run(self, result):  # yardstick API
        """ Yardstick calls run() at intervals defined in the yaml and
            produces timestamped samples

        :param result: dictionary with results to update
        :return: None
        """

        # this is the only method that is check from the runner
        # so if we have any fatal error it must be raised via these methods
        # otherwise we will not terminate

        result.update(self.collector.get_kpi())

    def teardown(self):
        """ Stop the collector and terminate VNF & TG instance

        :return
        """

        try:
            try:
                self.collector.stop()
                for vnf in self.vnfs:
                    LOG.info("Stopping %s", vnf.name)
                    vnf.terminate()
                LOG.debug("all VNFs terminated: %s", ", ".join(vnf.name for vnf in self.vnfs))
            finally:
                terminate_children()
        except Exception:
            # catch any exception in teardown and convert to simple exception
            # never pass exceptions back to multiprocessing, because some exceptions can
            # be unpicklable
            # https://bugs.python.org/issue9400
            LOG.exception("")
            raise RuntimeError("Error in teardown")
예제 #5
0
class NetworkServiceTestCase(base.Scenario):
    """Class handles Generic framework to do pre-deployment VNF &
       Network service testing  """

    __scenario_type__ = "NSPerf"

    def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
        super(NetworkServiceTestCase, self).__init__()
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg

        # fixme: create schema to validate all fields have been provided
        with open(scenario_cfg["topology"]) as stream:
            self.topology = yaml.load(stream)["nsd:nsd-catalog"]["nsd"][0]
        self.vnfs = []
        self.collector = None
        self.traffic_profile = None

    @classmethod
    def _get_traffic_flow(cls, scenario_cfg):
        try:
            with open(scenario_cfg["traffic_options"]["flow"]) as fflow:
                flow = yaml.load(fflow)
        except (KeyError, IOError, OSError):
            flow = {}
        return flow

    @classmethod
    def _get_traffic_imix(cls, scenario_cfg):
        try:
            with open(scenario_cfg["traffic_options"]["imix"]) as fimix:
                imix = yaml.load(fimix)
        except (KeyError, IOError, OSError):
            imix = {}
        return imix

    @classmethod
    def _get_traffic_profile(cls, scenario_cfg, context_cfg):
        traffic_profile_tpl = ""
        private = {}
        public = {}
        try:
            with open(scenario_cfg["traffic_profile"]) as infile:
                traffic_profile_tpl = infile.read()

        except (KeyError, IOError, OSError):
            raise

        return [traffic_profile_tpl, private, public]

    def _fill_traffic_profile(self, scenario_cfg, context_cfg):
        traffic_profile = {}

        flow = self._get_traffic_flow(scenario_cfg)

        imix = self._get_traffic_imix(scenario_cfg)

        traffic_mapping, private, public = \
            self._get_traffic_profile(scenario_cfg, context_cfg)

        traffic_profile = vnfdgen.generate_vnfd(traffic_mapping, {
            "imix": imix,
            "flow": flow,
            "private": private,
            "public": public
        })

        return TrafficProfile.get(traffic_profile)

    @classmethod
    def _find_vnf_name_from_id(cls, topology, vnf_id):
        return next((vnfd["vnfd-id-ref"]
                     for vnfd in topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    def _resolve_topology(self, context_cfg, topology):
        for vld in topology["vld"]:
            if len(vld["vnfd-connection-point-ref"]) > 2:
                raise IncorrectConfig("Topology file corrupted, "
                                      "too many endpoint for connection")

            node_0, node_1 = vld["vnfd-connection-point-ref"]

            node0 = self._find_vnf_name_from_id(topology,
                                                node_0["member-vnf-index-ref"])
            node1 = self._find_vnf_name_from_id(topology,
                                                node_1["member-vnf-index-ref"])

            if0 = node_0["vnfd-connection-point-ref"]
            if1 = node_1["vnfd-connection-point-ref"]

            try:
                nodes = context_cfg["nodes"]
                nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
                nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]

                nodes[node0]["interfaces"][if0]["dst_mac"] = \
                    nodes[node1]["interfaces"][if1]["local_mac"]
                nodes[node0]["interfaces"][if0]["dst_ip"] = \
                    nodes[node1]["interfaces"][if1]["local_ip"]

                nodes[node1]["interfaces"][if1]["dst_mac"] = \
                    nodes[node0]["interfaces"][if0]["local_mac"]
                nodes[node1]["interfaces"][if1]["dst_ip"] = \
                    nodes[node0]["interfaces"][if0]["local_ip"]
            except KeyError:
                raise IncorrectConfig("Required interface not found,"
                                      "topology file corrupted")

    @classmethod
    def _find_list_index_from_vnf_idx(cls, topology, vnf_idx):
        return next((topology["constituent-vnfd"].index(vnfd)
                     for vnfd in topology["constituent-vnfd"]
                     if vnf_idx == vnfd["member-vnf-index"]), None)

    def _update_context_with_topology(self, context_cfg, topology):
        for idx in topology["constituent-vnfd"]:
            vnf_idx = idx["member-vnf-index"]
            nodes = context_cfg["nodes"]
            node = self._find_vnf_name_from_id(topology, vnf_idx)
            list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
            nodes[node].update(topology["constituent-vnfd"][list_idx])

    def map_topology_to_infrastructure(self, context_cfg, topology):
        """ This method should verify if the available resources defined in pod.yaml
        match the topology.yaml file.

        :param topology:
        :return: None. Side effect: context_cfg is updated
        """

        for node, node_dict in context_cfg["nodes"].items():

            cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
            with SshManager(node_dict) as conn:
                exit_status = conn.execute(cmd)[0]
                if exit_status != 0:
                    raise IncorrectSetup("Node's %s lacks ip tool." % node)

                for interface in node_dict["interfaces"]:
                    network = node_dict["interfaces"][interface]
                    keys = [
                        "vpci", "local_ip", "netmask", "local_mac", "driver",
                        "dpdk_port_num"
                    ]
                    missing = set(keys).difference(network)
                    if missing:
                        raise IncorrectConfig("Require interface fields '%s' "
                                              "not found, topology file "
                                              "corrupted" % ', '.join(missing))

        # 3. Use topology file to find connections & resolve dest address
        self._resolve_topology(context_cfg, topology)
        self._update_context_with_topology(context_cfg, topology)

    @classmethod
    def get_vnf_impl(cls, vnf_model):
        """ Find the implementing class from vnf_model["vnf"]["name"] field

        :param vnf_model: dictionary containing a parsed vnfd
        :return: subclass of GenericVNF
        """
        import_modules_from_package(
            "yardstick.network_services.vnf_generic.vnf")
        expected_name = vnf_model['id']
        impl = (c for c in itersubclasses(GenericVNF)
                if c.__name__ == expected_name)
        try:
            return next(impl)
        except StopIteration:
            raise IncorrectConfig("No implementation for %s", expected_name)

    def load_vnf_models(self, context_cfg):
        """ Create VNF objects based on YAML descriptors

        :param context_cfg:
        :return:
        """
        vnfs = []
        for node in context_cfg["nodes"]:
            LOG.debug(context_cfg["nodes"][node])
            with open(context_cfg["nodes"][node]["VNF model"]) as stream:
                vnf_model = stream.read()
            vnfd = vnfdgen.generate_vnfd(vnf_model, context_cfg["nodes"][node])
            vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
            vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
            vnf_instance.name = node
            vnfs.append(vnf_instance)

        return vnfs

    def setup(self):
        """ Setup infrastructure, provission VNFs & start traffic

        :return:
        """

        # 1. Verify if infrastructure mapping can meet topology
        self.map_topology_to_infrastructure(self.context_cfg, self.topology)
        # 1a. Load VNF models
        self.vnfs = self.load_vnf_models(self.context_cfg)
        # 1b. Fill traffic profile with information from topology
        self.traffic_profile = self._fill_traffic_profile(
            self.scenario_cfg, self.context_cfg)

        # 2. Provision VNFs
        try:
            for vnf in self.vnfs:
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
        except RuntimeError:
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        # 3. Run experiment
        # Start listeners first to avoid losing packets
        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        # register collector with yardstick for KPI collection.
        self.collector = Collector(self.vnfs, self.traffic_profile)
        self.collector.start()

        # Start the actual traffic
        for traffic_gen in traffic_runners:
            LOG.info("Starting traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)

    def run(self, result):  # yardstick API
        """ Yardstick calls run() at intervals defined in the yaml and
            produces timestamped samples

        :param result: dictionary with results to update
        :return: None
        """

        for vnf in self.vnfs:
            # Result example:
            # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
            LOG.debug("vnf")
            result.update(self.collector.get_kpi(vnf))

    def teardown(self):
        """ Stop the collector and terminate VNF & TG instance

        :return
        """

        self.collector.stop()
        for vnf in self.vnfs:
            LOG.info("Stopping %s", vnf.name)
            vnf.terminate()
예제 #6
0
class NetworkServiceTestCase(base.Scenario):
    """Class handles Generic framework to do pre-deployment VNF &
       Network service testing  """

    __scenario_type__ = "NSPerf"

    def __init__(self, scenario_cfg, context_cfg):  # Yardstick API
        super(NetworkServiceTestCase, self).__init__()
        self.scenario_cfg = scenario_cfg
        self.context_cfg = context_cfg

        # fixme: create schema to validate all fields have been provided
        with open_relative_file(scenario_cfg["topology"],
                                scenario_cfg['task_path']) as stream:
            topology_yaml = yaml.load(stream)

        self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0]
        self.vnfs = []
        self.collector = None
        self.traffic_profile = None

    @classmethod
    def _get_traffic_flow(cls, scenario_cfg):
        try:
            with open(scenario_cfg["traffic_options"]["flow"]) as fflow:
                flow = yaml.load(fflow)
        except (KeyError, IOError, OSError):
            flow = {}
        return flow

    @classmethod
    def _get_traffic_imix(cls, scenario_cfg):
        try:
            with open(scenario_cfg["traffic_options"]["imix"]) as fimix:
                imix = yaml.load(fimix)
        except (KeyError, IOError, OSError):
            imix = {}
        return imix

    @classmethod
    def _get_traffic_profile(cls, scenario_cfg, context_cfg):
        traffic_profile_tpl = ""
        private = {}
        public = {}
        try:
            with open_relative_file(scenario_cfg["traffic_profile"],
                                    scenario_cfg["task_path"]) as infile:
                traffic_profile_tpl = infile.read()

        except (KeyError, IOError, OSError):
            raise

        return [traffic_profile_tpl, private, public]

    def _fill_traffic_profile(self, scenario_cfg, context_cfg):
        flow = self._get_traffic_flow(scenario_cfg)

        imix = self._get_traffic_imix(scenario_cfg)

        traffic_mapping, private, public = \
            self._get_traffic_profile(scenario_cfg, context_cfg)

        traffic_profile = vnfdgen.generate_vnfd(traffic_mapping, {
            "imix": imix,
            "flow": flow,
            "private": private,
            "public": public
        })

        return TrafficProfile.get(traffic_profile)

    @classmethod
    def _find_vnf_name_from_id(cls, topology, vnf_id):
        return next((vnfd["vnfd-id-ref"]
                     for vnfd in topology["constituent-vnfd"]
                     if vnf_id == vnfd["member-vnf-index"]), None)

    def _resolve_topology(self, context_cfg, topology):
        for vld in topology["vld"]:
            if len(vld["vnfd-connection-point-ref"]) > 2:
                raise IncorrectConfig("Topology file corrupted, "
                                      "too many endpoint for connection")

            node_0, node_1 = vld["vnfd-connection-point-ref"]

            node0 = self._find_vnf_name_from_id(topology,
                                                node_0["member-vnf-index-ref"])
            node1 = self._find_vnf_name_from_id(topology,
                                                node_1["member-vnf-index-ref"])

            if0 = node_0["vnfd-connection-point-ref"]
            if1 = node_1["vnfd-connection-point-ref"]

            try:
                nodes = context_cfg["nodes"]
                nodes[node0]["interfaces"][if0]["vld_id"] = vld["id"]
                nodes[node1]["interfaces"][if1]["vld_id"] = vld["id"]

                nodes[node0]["interfaces"][if0]["dst_mac"] = \
                    nodes[node1]["interfaces"][if1]["local_mac"]
                nodes[node0]["interfaces"][if0]["dst_ip"] = \
                    nodes[node1]["interfaces"][if1]["local_ip"]

                nodes[node1]["interfaces"][if1]["dst_mac"] = \
                    nodes[node0]["interfaces"][if0]["local_mac"]
                nodes[node1]["interfaces"][if1]["dst_ip"] = \
                    nodes[node0]["interfaces"][if0]["local_ip"]
            except KeyError:
                raise IncorrectConfig("Required interface not found,"
                                      "topology file corrupted")

    @classmethod
    def _find_list_index_from_vnf_idx(cls, topology, vnf_idx):
        return next((topology["constituent-vnfd"].index(vnfd)
                     for vnfd in topology["constituent-vnfd"]
                     if vnf_idx == vnfd["member-vnf-index"]), None)

    def _update_context_with_topology(self, context_cfg, topology):
        for idx in topology["constituent-vnfd"]:
            vnf_idx = idx["member-vnf-index"]
            nodes = context_cfg["nodes"]
            node = self._find_vnf_name_from_id(topology, vnf_idx)
            list_idx = self._find_list_index_from_vnf_idx(topology, vnf_idx)
            nodes[node].update(topology["constituent-vnfd"][list_idx])

    @staticmethod
    def _sort_dpdk_port_num(netdevs):
        # dpdk_port_num is PCI BUS ID ordering, lowest first
        s = sorted(netdevs.values(), key=itemgetter('pci_bus_id'))
        for dpdk_port_num, netdev in enumerate(s, 1):
            netdev['dpdk_port_num'] = dpdk_port_num

    @classmethod
    def _probe_missing_values(cls, netdevs, network, missing):
        mac = network['local_mac']
        for netdev in netdevs.values():
            if netdev['address'].lower() == mac.lower():
                network['driver'] = netdev['driver']
                network['vpci'] = netdev['pci_bus_id']
                network['dpdk_port_num'] = netdev['dpdk_port_num']
                network['ifindex'] = netdev['ifindex']

    TOPOLOGY_REQUIRED_KEYS = frozenset({
        "vpci", "local_ip", "netmask", "local_mac", "driver", "dpdk_port_num"
    })

    def map_topology_to_infrastructure(self, context_cfg, topology):
        """ This method should verify if the available resources defined in pod.yaml
        match the topology.yaml file.

        :param topology:
        :return: None. Side effect: context_cfg is updated
        """

        for node, node_dict in context_cfg["nodes"].items():

            cmd = "PATH=$PATH:/sbin:/usr/sbin ip addr show"
            with SshManager(node_dict) as conn:
                exit_status = conn.execute(cmd)[0]
                if exit_status != 0:
                    raise IncorrectSetup("Node's %s lacks ip tool." % node)
                exit_status, stdout, _ = conn.execute(
                    self.FIND_NETDEVICE_STRING)
                if exit_status != 0:
                    raise IncorrectSetup("Cannot find netdev info in sysfs" %
                                         node)
                netdevs = node_dict['netdevs'] = self.parse_netdev_info(stdout)
                self._sort_dpdk_port_num(netdevs)

                for network in node_dict["interfaces"].values():
                    missing = self.TOPOLOGY_REQUIRED_KEYS.difference(network)
                    if missing:
                        try:
                            self._probe_missing_values(netdevs, network,
                                                       missing)
                        except KeyError:
                            pass
                        else:
                            missing = self.TOPOLOGY_REQUIRED_KEYS.difference(
                                network)
                        if missing:
                            raise IncorrectConfig(
                                "Require interface fields '%s' "
                                "not found, topology file "
                                "corrupted" % ', '.join(missing))

        # 3. Use topology file to find connections & resolve dest address
        self._resolve_topology(context_cfg, topology)
        self._update_context_with_topology(context_cfg, topology)

    FIND_NETDEVICE_STRING = r"""find /sys/devices/pci* -type d -name net -exec sh -c '{ grep -sH ^ \
$1/ifindex $1/address $1/operstate $1/device/vendor $1/device/device \
$1/device/subsystem_vendor $1/device/subsystem_device ; \
printf "%s/driver:" $1 ; basename $(readlink -s $1/device/driver); } \
' sh  \{\}/* \;
"""
    BASE_ADAPTER_RE = re.compile(
        '^/sys/devices/(.*)/net/([^/]*)/([^:]*):(.*)$', re.M)

    @classmethod
    def parse_netdev_info(cls, stdout):
        network_devices = defaultdict(dict)
        matches = cls.BASE_ADAPTER_RE.findall(stdout)
        for bus_path, interface_name, name, value in matches:
            dirname, bus_id = os.path.split(bus_path)
            if 'virtio' in bus_id:
                # for some stupid reason VMs include virtio1/
                # in PCI device path
                bus_id = os.path.basename(dirname)
            # remove extra 'device/' from 'device/vendor,
            # device/subsystem_vendor', etc.
            if 'device/' in name:
                name = name.split('/')[1]
            network_devices[interface_name][name] = value
            network_devices[interface_name]['interface_name'] = interface_name
            network_devices[interface_name]['pci_bus_id'] = bus_id
        # convert back to regular dict
        return dict(network_devices)

    @classmethod
    def get_vnf_impl(cls, vnf_model):
        """ Find the implementing class from vnf_model["vnf"]["name"] field

        :param vnf_model: dictionary containing a parsed vnfd
        :return: subclass of GenericVNF
        """
        import_modules_from_package(
            "yardstick.network_services.vnf_generic.vnf")
        expected_name = vnf_model['id']
        impl = (c for c in itersubclasses(GenericVNF)
                if c.__name__ == expected_name)
        try:
            return next(impl)
        except StopIteration:
            raise IncorrectConfig("No implementation for %s", expected_name)

    def load_vnf_models(self, scenario_cfg, context_cfg):
        """ Create VNF objects based on YAML descriptors

        :param scenario_cfg:
        :type scenario_cfg:
        :param context_cfg:
        :return:
        """
        vnfs = []
        for node_name, node in context_cfg["nodes"].items():
            LOG.debug(node)
            with open_relative_file(node["VNF model"],
                                    scenario_cfg['task_path']) as stream:
                vnf_model = stream.read()
            vnfd = vnfdgen.generate_vnfd(vnf_model, node)
            vnf_impl = self.get_vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
            vnf_instance = vnf_impl(vnfd["vnfd:vnfd-catalog"]["vnfd"][0])
            vnf_instance.name = node_name
            vnfs.append(vnf_instance)

        return vnfs

    def setup(self):
        """ Setup infrastructure, provission VNFs & start traffic

        :return:
        """
        # 1. Verify if infrastructure mapping can meet topology
        self.map_topology_to_infrastructure(self.context_cfg, self.topology)
        # 1a. Load VNF models
        self.vnfs = self.load_vnf_models(self.scenario_cfg, self.context_cfg)
        # 1b. Fill traffic profile with information from topology
        self.traffic_profile = self._fill_traffic_profile(
            self.scenario_cfg, self.context_cfg)

        # 2. Provision VNFs
        try:
            for vnf in self.vnfs:
                LOG.info("Instantiating %s", vnf.name)
                vnf.instantiate(self.scenario_cfg, self.context_cfg)
        except RuntimeError:
            for vnf in self.vnfs:
                vnf.terminate()
            raise

        # 3. Run experiment
        # Start listeners first to avoid losing packets
        traffic_runners = [vnf for vnf in self.vnfs if vnf.runs_traffic]
        for traffic_gen in traffic_runners:
            traffic_gen.listen_traffic(self.traffic_profile)

        # register collector with yardstick for KPI collection.
        self.collector = Collector(self.vnfs, self.traffic_profile)
        self.collector.start()

        # Start the actual traffic
        for traffic_gen in traffic_runners:
            LOG.info("Starting traffic on %s", traffic_gen.name)
            traffic_gen.run_traffic(self.traffic_profile)

    def run(self, result):  # yardstick API
        """ Yardstick calls run() at intervals defined in the yaml and
            produces timestamped samples

        :param result: dictionary with results to update
        :return: None
        """

        for vnf in self.vnfs:
            # Result example:
            # {"VNF1: { "tput" : [1000, 999] }, "VNF2": { "latency": 100 }}
            LOG.debug("vnf")
            result.update(self.collector.get_kpi(vnf))

    def teardown(self):
        """ Stop the collector and terminate VNF & TG instance

        :return
        """

        self.collector.stop()
        for vnf in self.vnfs:
            LOG.info("Stopping %s", vnf.name)
            vnf.terminate()