def start(self, ports=None, *args, **kwargs): # pylint: disable=keyword-arg-before-vararg # NOTE(ralonsoh): defining keyworded arguments before variable # positional arguments is a bug. This function definition doesn't work # in Python 2, although it works in Python 3. Reference: # https://www.python.org/dev/peps/pep-3102/ cmd = "sudo fuser -n tcp {0.SYNC_PORT} {0.ASYNC_PORT} -k > /dev/null 2>&1" self.ssh_helper.execute(cmd.format(self)) self.ssh_helper.execute("sudo pkill -9 rex > /dev/null 2>&1") # We MUST default to 1 because TRex won't work on single-queue devices with # more than one core per port # We really should be trying to find the number of queues in the driver, # but there doesn't seem to be a way to do this # TRex Error: the number of cores should be 1 when the driver # support only one tx queue and one rx queue. Please use -c 1 threads_per_port = try_int(self.scenario_helper.options.get("queues_per_port"), 1) trex_path = self.ssh_helper.join_bin_path("trex", "scripts") path = get_nsb_option("trex_path", trex_path) cmd = "./t-rex-64 --no-scapy-server -i -c {} --cfg '{}'".format(threads_per_port, self.CONF_FILE) if self.scenario_helper.options.get("trex_server_debug"): # if there are errors we want to see them redir = "" else: redir = ">/dev/null" # we have to sudo cd because the path might be owned by root trex_cmd = """sudo bash -c "cd '{}' ; {}" {}""".format(path, cmd, redir) LOG.debug(trex_cmd) self.ssh_helper.execute(trex_cmd)
def start(self, ports=None, *args, **kwargs): cmd = "sudo fuser -n tcp {0.SYNC_PORT} {0.ASYNC_PORT} -k > /dev/null 2>&1" self.ssh_helper.execute(cmd.format(self)) self.ssh_helper.execute("sudo pkill -9 rex > /dev/null 2>&1") # We MUST default to 1 because TRex won't work on single-queue devices with # more than one core per port # We really should be trying to find the number of queues in the driver, # but there doesn't seem to be a way to do this # TRex Error: the number of cores should be 1 when the driver # support only one tx queue and one rx queue. Please use -c 1 threads_per_port = try_int( self.scenario_helper.options.get("queues_per_port"), 1) trex_path = self.ssh_helper.join_bin_path("trex", "scripts") path = get_nsb_option("trex_path", trex_path) cmd = "./t-rex-64 --no-scapy-server -i -c {} --cfg '{}'".format( threads_per_port, self.CONF_FILE) if self.scenario_helper.options.get("trex_server_debug"): # if there are errors we want to see them redir = "" else: redir = ">/dev/null" # we have to sudo cd because the path might be owned by root trex_cmd = """sudo bash -c "cd '{}' ; {}" {}""".format( path, cmd, redir) LOG.debug(trex_cmd) self.ssh_helper.execute(trex_cmd)
def deploy(self): """don't need to deploy""" # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config. if not self.vm_deploy: return self.connection = ssh.SSH.from_node(self.host_mgmt) self.dpdk_nic_bind = provision_tool( self.connection, os.path.join(get_nsb_option("bin_path"), "dpdk-devbind.py")) # Check dpdk/ovs version, if not present install self.check_ovs_dpdk_env() # Todo: NFVi deploy (sriov, vswitch, ovs etc) based on the config. StandaloneContextHelper.install_req_libs(self.connection) self.networks = StandaloneContextHelper.get_nic_details( self.connection, self.networks, self.dpdk_nic_bind) self.setup_ovs() self.start_ovs_serverswitch() self.setup_ovs_bridge_add_flows() self.nodes = self.setup_ovs_dpdk_context() LOG.debug("Waiting for VM to come up...") self.nodes = StandaloneContextHelper.wait_for_vnfs_to_start( self.connection, self.servers, self.nodes)
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): super(SampleVNFTrafficGen, self).__init__(name, vnfd) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path, wait=True) if setup_env_helper_type is None: setup_env_helper_type = SetupEnvHelper self.setup_helper = setup_env_helper_type(self.vnfd_helper, self.ssh_helper, self.scenario_helper) if resource_helper_type is None: resource_helper_type = ClientResourceHelper self.resource_helper = resource_helper_type(self.setup_helper) self.runs_traffic = True self.traffic_finished = False self._tg_process = None self._traffic_process = None
def __init__(self, name, vnfd, task_id, setup_env_helper_type=None, resource_helper_type=None): super(SampleVNF, self).__init__(name, vnfd, task_id) self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path) if setup_env_helper_type is None: setup_env_helper_type = SetupEnvHelper self.setup_helper = setup_env_helper_type(self.vnfd_helper, self.ssh_helper, self.scenario_helper) self.deploy_helper = SampleVNFDeployHelper(vnfd, self.ssh_helper) if resource_helper_type is None: resource_helper_type = ResourceHelper self.resource_helper = resource_helper_type(self.setup_helper) self.context_cfg = None self.pipeline_kwargs = {} self.uplink_ports = None self.downlink_ports = None # NOTE(esm): make QueueFileWrapper invert-able so that we # never have to manage the queues self.q_in = Queue() self.q_out = Queue() self.queue_wrapper = None self.run_kwargs = {} self.used_drivers = {} self.vnf_port_pairs = None self._vnf_process = None
def __init__(self, vnfs, nodes, traffic_profile, timeout=3600): super(Collector, self).__init__() self.traffic_profile = traffic_profile self.vnfs = vnfs self.nodes = nodes self.timeout = timeout self.bin_path = get_nsb_option('bin_path', '') self.resource_profiles = {node_name: ResourceProfile.make_from_node(node, self.timeout) for node_name, node in self.nodes.items() if node.get("collectd")}
def __init__(self, name, vnfd): super(VcmtsVNF, self).__init__(name, vnfd) self.name = name self.bin_path = get_nsb_option('bin_path', '') self.scenario_helper = ScenarioHelper(self.name) self.ssh_helper = VnfSshHelper(self.vnfd_helper.mgmt_interface, self.bin_path) self.setup_helper = VcmtsdSetupEnvHelper(self.vnfd_helper, self.ssh_helper, self.scenario_helper)
def __init__(self, vnfd): super(GenericVNF, self).__init__() self.vnfd = vnfd # fixme: parse this into a structure # List of statistics we can obtain from this VNF # - ETSI MANO 6.3.1.1 monitoring_parameter self.kpi = self._get_kpi_definition(vnfd) # Standard dictionary containing params like thread no, buffer size etc self.config = {} self.runs_traffic = False self.name = "vnf__1" # name in topology file self.bin_path = get_nsb_option("bin_path", "")
def __init__(self, scenario_cfg, context_cfg): # Yardstick API super(NetworkServiceTestCase, self).__init__() self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self._render_topology() self.vnfs = [] self.collector = None self.traffic_profile = None self.node_netdevs = {} self.bin_path = get_nsb_option('bin_path', '')
def _start_collectd(self, connection, bin_path): LOG.debug("Starting collectd to collect NFVi stats") connection.execute('sudo pkill -x -9 collectd') bin_path = get_nsb_option("bin_path") collectd_path = os.path.join(bin_path, "collectd", "sbin", "collectd") config_file_path = os.path.join(bin_path, "collectd", "etc") exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] if exit_status != 0: LOG.warning("%s is not present disabling", collectd_path) # disable auto-provisioning because it requires Internet access # collectd_installer = os.path.join(bin_path, "collectd.sh") # provision_tool(connection, collectd) # http_proxy = os.environ.get('http_proxy', '') # https_proxy = os.environ.get('https_proxy', '') # connection.execute("sudo %s '%s' '%s'" % ( # collectd_installer, http_proxy, https_proxy)) return if "intel_pmu" in self.plugins: LOG.debug("Downloading event list for pmu_stats plugin") cmd = 'sudo bash -c \'cd /opt/tempT/pmu-tools/; python event_download_local.py\'' connection.execute(cmd) LOG.debug("Starting collectd to collect NFVi stats") # ensure collectd.conf.d exists to avoid error/warning connection.execute("sudo mkdir -p /etc/collectd/collectd.conf.d") self._prepare_collectd_conf(config_file_path) # Reset amqp queue LOG.debug("reset and setup amqp to collect data from collectd") connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*") connection.execute("sudo service rabbitmq-server start") connection.execute("sudo rabbitmqctl stop_app") connection.execute("sudo rabbitmqctl reset") connection.execute("sudo rabbitmqctl start_app") connection.execute("sudo service rabbitmq-server restart") LOG.debug( "Creating admin user for rabbitmq in order to collect data from collectd" ) connection.execute("sudo rabbitmqctl delete_user guest") connection.execute("sudo rabbitmqctl add_user admin admin") connection.execute("sudo rabbitmqctl authenticate_user admin admin") connection.execute( "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'") LOG.debug("Start collectd service..... %s second timeout", self.timeout) # intel_pmu plug requires large numbers of files open, so try to set # ulimit -n to a large value connection.execute("sudo bash -c 'ulimit -n 1000000 ; %s'" % collectd_path, timeout=self.timeout) LOG.debug("Done")
def __init__(self, vnfs, contexts_nodes, timeout=3600): super(Collector, self).__init__() self.vnfs = vnfs self.nodes = contexts_nodes self.bin_path = get_nsb_option('bin_path', '') self.resource_profiles = {} for ctx_name, nodes in contexts_nodes.items(): for node in (node for node in nodes if node.get('collectd')): name = ".".join([node['name'], ctx_name]) self.resource_profiles.update( {name: ResourceProfile.make_from_node(node, timeout)} )
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): vnfd_cpy = copy.deepcopy(vnfd) super(ProxIrq, self).__init__(name, vnfd_cpy) self._vnf_wrapper = ProxApproxVnf( name, vnfd, setup_env_helper_type, resource_helper_type) self.bin_path = get_nsb_option('bin_path', '') self.name = self._vnf_wrapper.name self.ssh_helper = self._vnf_wrapper.ssh_helper self.setup_helper = self._vnf_wrapper.setup_helper self.resource_helper = self._vnf_wrapper.resource_helper self.scenario_helper = self._vnf_wrapper.scenario_helper self.irq_cores = None
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): # don't call superclass, use custom wrapper of ProxApproxVnf self._vnf_wrapper = ProxApproxVnf(name, vnfd, setup_env_helper_type, resource_helper_type) self.bin_path = get_nsb_option('bin_path', '') self.name = self._vnf_wrapper.name self.ssh_helper = self._vnf_wrapper.ssh_helper self.setup_helper = self._vnf_wrapper.setup_helper self.resource_helper = self._vnf_wrapper.resource_helper self.scenario_helper = self._vnf_wrapper.scenario_helper self.runs_traffic = True self.traffic_finished = False self._tg_process = None self._traffic_process = None
def _start_server(self): mgmt_interface = self.vnfd["mgmt-interface"] _server = ssh.SSH.from_node(mgmt_interface) _server.wait() _server.execute("fuser -n tcp %s %s -k > /dev/null 2>&1" % (TREX_SYNC_PORT, TREX_ASYNC_PORT)) trex_path = os.path.join(self.bin_path, "trex/scripts") path = get_nsb_option("trex_path", trex_path) trex_cmd = "cd " + path + "; sudo ./t-rex-64 -i > /dev/null 2>&1" _server.execute(trex_cmd)
def load_vnf_models(self, scenario_cfg=None, context_cfg=None): """ Create VNF objects based on YAML descriptors :param scenario_cfg: :type scenario_cfg: :param context_cfg: :return: """ trex_lib_path = get_nsb_option('trex_client_lib') sys.path[:] = list( chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path))) if scenario_cfg is None: scenario_cfg = self.scenario_cfg if context_cfg is None: context_cfg = self.context_cfg vnfs = [] # we assume OrderedDict for consistency in instantiation for node_name, node in context_cfg["nodes"].items(): LOG.debug(node) try: file_name = node["VNF model"] except KeyError: LOG.debug("no model for %s, skipping", node_name) continue file_path = scenario_cfg['task_path'] with utils.open_relative_file(file_name, file_path) as stream: vnf_model = stream.read() vnfd = vnfdgen.generate_vnfd(vnf_model, node) # TODO: here add extra context_cfg["nodes"] regardless of template vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0] # force inject pkey if it exists # we want to standardize Heat using pkey as a string so we don't rely # on the filesystem try: vnfd['mgmt-interface']['pkey'] = node['pkey'] except KeyError: pass self.create_interfaces_from_node(vnfd, node) vnf_impl = self.get_vnf_impl(vnfd['id']) vnf_instance = vnf_impl(node_name, vnfd, scenario_cfg['task_id']) vnfs.append(vnf_instance) self.vnfs = vnfs return vnfs
def __init__(self): self.file_path = None self.sriov = [] self.first_run = True self.dpdk_devbind = os.path.join(get_nsb_option('bin_path'), 'dpdk-devbind.py') self.vm_names = [] self.nfvi_host = [] self.nodes = [] self.networks = {} self.attrs = {} self.vm_flavor = None self.servers = None self.helper = model.StandaloneContextHelper() self.vnf_node = model.Server() self.drivers = [] super(SriovContext, self).__init__()
def start(self, ports=None, *args, **kwargs): cmd = "sudo fuser -n tcp {0.SYNC_PORT} {0.ASYNC_PORT} -k > /dev/null 2>&1" self.ssh_helper.execute(cmd.format(self)) self.ssh_helper.execute("sudo pkill -9 rex > /dev/null 2>&1") trex_path = self.ssh_helper.join_bin_path("trex", "scripts") path = get_nsb_option("trex_path", trex_path) # cmd = "sudo ./t-rex-64 -i --cfg %s > /dev/null 2>&1" % self.CONF_FILE cmd = "./t-rex-64 -i --cfg '{}'".format(self.CONF_FILE) # if there are errors we want to see them # we have to sudo cd because the path might be owned by root trex_cmd = """sudo bash -c "cd '{}' ; {}" >/dev/null""".format( path, cmd) self.ssh_helper.execute(trex_cmd)
def __init__(self, name, vnfd, setup_env_helper_type=None, resource_helper_type=None): vnfd_cpy = copy.deepcopy(vnfd) super(ProxTrafficGen, self).__init__(name, vnfd_cpy) self._vnf_wrapper = ProxApproxVnf( name, vnfd, setup_env_helper_type, resource_helper_type) self.bin_path = get_nsb_option('bin_path', '') self.name = self._vnf_wrapper.name self.ssh_helper = self._vnf_wrapper.ssh_helper self.setup_helper = self._vnf_wrapper.setup_helper self.resource_helper = self._vnf_wrapper.resource_helper self.scenario_helper = self._vnf_wrapper.scenario_helper self.runs_traffic = True self.traffic_finished = False self._tg_process = None self._traffic_process = None
def ssh_remote_machine(self): if self.sriov[0]['auth_type'] == "password": self.connection = ssh.SSH(self.user, self.ssh_ip, password=self.passwd) self.connection.wait() else: if self.ssh_port is not None: ssh_port = self.ssh_port else: ssh_port = ssh.DEFAULT_PORT self.connection = ssh.SSH(self.user, self.ssh_ip, port=ssh_port, key_filename=self.key_filename) self.connection.wait() self.dpdk_nic_bind = provision_tool( self.connection, os.path.join(get_nsb_option("bin_path"), "dpdk_nic_bind.py"))
def check_ovs_dpdk_env(self): self.cleanup_ovs_dpdk_env() version = self.ovs_properties.get("version", {}) ovs_ver = version.get("ovs", self.DEFAULT_OVS) dpdk_ver = version.get("dpdk", "16.07.2").split('.') supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None) if supported_version is None or supported_version.split( '.')[:2] != dpdk_ver[:2]: raise Exception( "Unsupported ovs '{}'. Please check the config...".format( ovs_ver)) status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0] if status: deploy = OvsDeploy(self.connection, get_nsb_option("bin_path"), self.ovs_properties) deploy.ovs_deploy()
def _start_collectd(self, connection, bin_path): LOG.debug("Starting collectd to collect NFVi stats") connection.execute('sudo pkill -9 collectd') bin_path = get_nsb_option("bin_path") collectd_path = os.path.join(bin_path, "collectd", "collectd") exit_status = connection.execute("which %s > /dev/null 2>&1" % collectd_path)[0] if exit_status != 0: LOG.warning("%s is not present disabling", collectd_path) # disable auto-provisioning because it requires Internet access # collectd_installer = os.path.join(bin_path, "collectd.sh") # provision_tool(connection, collectd) # http_proxy = os.environ.get('http_proxy', '') # https_proxy = os.environ.get('https_proxy', '') # connection.execute("sudo %s '%s' '%s'" % ( # collectd_installer, http_proxy, https_proxy)) return LOG.debug("Starting collectd to collect NFVi stats") self._prepare_collectd_conf(bin_path) # Reset amqp queue LOG.debug("reset and setup amqp to collect data from collectd") connection.execute("sudo rm -rf /var/lib/rabbitmq/mnesia/rabbit*") connection.execute("sudo service rabbitmq-server start") connection.execute("sudo rabbitmqctl stop_app") connection.execute("sudo rabbitmqctl reset") connection.execute("sudo rabbitmqctl start_app") connection.execute("sudo service rabbitmq-server restart") LOG.debug( "Creating amdin user for rabbitmq in order to collect data from collectd" ) connection.execute("sudo rabbitmqctl delete_user guest") connection.execute("sudo rabbitmqctl add_user admin admin") connection.execute("sudo rabbitmqctl authenticate_user admin admin") connection.execute( "sudo rabbitmqctl set_permissions -p / admin '.*' '.*' '.*'") LOG.debug("Start collectd service.....") connection.execute("sudo %s" % collectd_path) LOG.debug("Done")
def check_ovs_dpdk_env(self): self.cleanup_ovs_dpdk_env() version = self.ovs_properties.get("version", {}) ovs_ver = version.get("ovs", self.DEFAULT_OVS) dpdk_ver = version.get("dpdk", "16.07.2").split('.') supported_version = self.SUPPORTED_OVS_TO_DPDK_MAP.get(ovs_ver, None) if supported_version is None or supported_version.split( '.')[:2] != dpdk_ver[:2]: raise exceptions.OVSUnsupportedVersion( ovs_version=ovs_ver, ovs_to_dpdk_map=self.SUPPORTED_OVS_TO_DPDK_MAP) status = self.connection.execute("ovs-vsctl -V | grep -i '%s'" % ovs_ver)[0] if status: deploy = model.OvsDeploy(self.connection, utils.get_nsb_option("bin_path"), self.ovs_properties) deploy.ovs_deploy()
def load_vnf_models(self, scenario_cfg=None, context_cfg=None): """ Create VNF objects based on YAML descriptors :param scenario_cfg: :type scenario_cfg: :param context_cfg: :return: """ trex_lib_path = get_nsb_option('trex_client_lib') sys.path[:] = list( chain([trex_lib_path], (x for x in sys.path if x != trex_lib_path))) if scenario_cfg is None: scenario_cfg = self.scenario_cfg if context_cfg is None: context_cfg = self.context_cfg vnfs = [] # we assume OrderedDict for consistenct in instantiation for node_name, node in context_cfg["nodes"].items(): LOG.debug(node) file_name = node["VNF model"] file_path = scenario_cfg['task_path'] with open_relative_file(file_name, file_path) as stream: vnf_model = stream.read() vnfd = vnfdgen.generate_vnfd(vnf_model, node) # TODO: here add extra context_cfg["nodes"] regardless of template vnfd = vnfd["vnfd:vnfd-catalog"]["vnfd"][0] self.update_interfaces_from_node(vnfd, node) vnf_impl = self.get_vnf_impl(vnfd['id']) vnf_instance = vnf_impl(node_name, vnfd) vnfs.append(vnf_instance) self.vnfs = vnfs return vnfs
def test_get_nsb_option_default(self): default = object() result = utils.get_nsb_option("nosuch", default) self.assertIs(result, default)
def test_get_nsb_option_is_invalid_key(self): result = utils.get_nsb_option("bin", None) self.assertEqual(result, None)
def test_get_nsb_options(self): result = utils.get_nsb_option("bin_path", None) self.assertEqual(result, utils.NSB_ROOT)