def run_one_scenario(self, scenario_cfg, output_file): """run one scenario using context""" runner_cfg = scenario_cfg["runner"] runner_cfg['output_filename'] = output_file options = scenario_cfg.get('options', {}) scenario_cfg['options'] = self._parse_options(options) # TODO support get multi hosts/vms info context_cfg = {} if "host" in scenario_cfg: context_cfg['host'] = Context.get_server(scenario_cfg["host"]) if "target" in scenario_cfg: if is_ip_addr(scenario_cfg["target"]): context_cfg['target'] = {} context_cfg['target']["ipaddr"] = scenario_cfg["target"] else: context_cfg['target'] = Context.get_server( scenario_cfg["target"]) if self._is_same_heat_context(scenario_cfg["host"], scenario_cfg["target"]): context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["private_ip"] else: context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["ip"] if "targets" in scenario_cfg: ip_list = [] for target in scenario_cfg["targets"]: if is_ip_addr(target): ip_list.append(target) context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) if self._is_same_heat_context(scenario_cfg["host"], target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) context_cfg['target']['ipaddr'] = ','.join(ip_list) if "nodes" in scenario_cfg: context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) context_cfg["networks"] = get_networks_from_nodes( context_cfg["nodes"]) runner = base_runner.Runner.get(runner_cfg) print("Starting runner of type '%s'" % runner_cfg["type"]) runner.run(scenario_cfg, context_cfg) return runner
def run_one_scenario(task_id, scenario_cfg, output_file): '''run one scenario using context''' runner_cfg = scenario_cfg["runner"] runner_cfg['output_filename'] = output_file # TODO support get multi hosts/vms info context_cfg = {} if "host" in scenario_cfg: scenario_cfg['host'] = scenario_cfg['host'] + '-' + task_id[:8] context_cfg['host'] = Context.get_server(scenario_cfg["host"]) if "target" in scenario_cfg: scenario_cfg['target'] = scenario_cfg['target'] + '-' + task_id[:8] if is_ip_addr(scenario_cfg["target"]): context_cfg['target'] = {} context_cfg['target']["ipaddr"] = scenario_cfg["target"] else: context_cfg['target'] = Context.get_server(scenario_cfg["target"]) if _is_same_heat_context(scenario_cfg["host"], scenario_cfg["target"]): context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["private_ip"] else: context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["ip"] if "targets" in scenario_cfg: ip_list = [] for target in scenario_cfg["targets"]: if is_ip_addr(target): ip_list.append(target) context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) if _is_same_heat_context(scenario_cfg["host"], target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) context_cfg['target']['ipaddr'] = ','.join(ip_list) if "nodes" in scenario_cfg: context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) runner = base_runner.Runner.get(runner_cfg) print "Starting runner of type '%s'" % runner_cfg["type"] runner.run(scenario_cfg, context_cfg) return runner
def test_get_physical_nodes(self): ctx_obj = DummyContextClass() self.addCleanup(self._remove_ctx, ctx_obj) result = Context.get_physical_nodes() self.assertEqual(result, {None: None})
def run_one_scenario(scenario_cfg, output_file): '''run one scenario using context''' runner_cfg = scenario_cfg["runner"] runner_cfg['output_filename'] = output_file # TODO support get multi hosts/vms info context_cfg = {} if "host" in scenario_cfg: context_cfg['host'] = Context.get_server(scenario_cfg["host"]) if "target" in scenario_cfg: if is_ip_addr(scenario_cfg["target"]): context_cfg['target'] = {} context_cfg['target']["ipaddr"] = scenario_cfg["target"] else: context_cfg['target'] = Context.get_server(scenario_cfg["target"]) if _is_same_heat_context(scenario_cfg["host"], scenario_cfg["target"]): context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["private_ip"] else: context_cfg["target"]["ipaddr"] = \ context_cfg["target"]["ip"] if "targets" in scenario_cfg: ip_list = [] for target in scenario_cfg["targets"]: if is_ip_addr(target): ip_list.append(target) context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) if _is_same_heat_context(scenario_cfg["host"], target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) context_cfg['target']['ipaddr'] = ','.join(ip_list) if "nodes" in scenario_cfg: context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) runner = base_runner.Runner.get(runner_cfg) print "Starting runner of type '%s'" % runner_cfg["type"] runner.run(scenario_cfg, context_cfg) return runner
def parse_nodes_with_context(scenario_cfg): """paras the 'nodes' fields in scenario """ nodes = scenario_cfg["nodes"] nodes_cfg = {} for nodename in nodes: nodes_cfg[nodename] = Context.get_server(nodes[nodename]) return nodes_cfg
def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name]) # self.nfvi_context = None self.deploy_helper.deploy_vnfs(self.APP_NAME) self.resource_helper.setup() self._start_vnf()
def parse_nodes_with_context(scenario_cfg): '''paras the 'nodes' fields in scenario ''' nodes = scenario_cfg["nodes"] nodes_cfg = {} for nodename in nodes: nodes_cfg[nodename] = Context.get_server(nodes[nodename]) return nodes_cfg
def test_get_physical_node_from_server(self, mock_get_ctx): ctx_obj = DummyContextClass() self.addCleanup(self._remove_ctx, ctx_obj) mock_get_ctx.return_value = ctx_obj result = Context.get_physical_node_from_server("mock_server") mock_get_ctx.assert_called_once() self.assertIsNone(result)
def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.nfvi_context = Context.get_context_from_server(self.scenario_helper.nodes[self.name]) # self.nfvi_context = None # vnf deploy is unsupported, use ansible playbooks if self.scenario_helper.options.get("vnf_deploy", False): self.deploy_helper.deploy_vnfs(self.APP_NAME) self.resource_helper.setup() self._start_vnf()
def config_context_target(cfg): target = cfg['target'] if is_ip_addr(target): context_cfg['target'] = {"ipaddr": target} else: context_cfg['target'] = Context.get_server(target) if self._is_same_context(cfg["host"], target): context_cfg['target']["ipaddr"] = context_cfg['target'][ "private_ip"] else: context_cfg['target']["ipaddr"] = context_cfg['target'][ "ip"]
def collect_kpi(self): # check if the tg processes have exited physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = {"physical_node": physical_node} for proc in (self._tg_process, self._traffic_process): check_if_process_failed(proc) result["collect_stats"] = self.resource_helper.collect_kpi() LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result
def parse_task(self, task_id, task_args=None, task_args_file=None): """parses the task file and return an context and scenario instances""" LOG.info("Parsing task config: %s", self.path) cfg, rendered = self._render_task(task_args, task_args_file) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] contexts = [] for cfg_attrs in context_cfgs: cfg_attrs['task_id'] = task_id # default to Heat context because we are testing OpenStack context_type = cfg_attrs.get("type", "Heat") context = Context.get(context_type) context.init(cfg_attrs) # Update the name in case the context has used the name_suffix cfg_attrs['name'] = context.name contexts.append(context) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # embed task path into scenario so we can load other files # relative to task path scenario["task_path"] = os.path.dirname(self.path) self._change_node_names(scenario, contexts) # TODO we need something better here, a class that represent the file return { 'scenarios': cfg['scenarios'], 'run_in_parallel': run_in_parallel, 'meet_precondition': meet_precondition, 'contexts': contexts, 'rendered': rendered }
def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.resource_helper.update_from_context( Context.get_context_from_server(self.scenario_helper.nodes[self.name]), self.scenario_helper.nodes[self.name] ) self.resource_helper.setup() # must generate_cfg after DPDK bind because we need port number self.resource_helper.generate_cfg() LOG.info("Starting %s server...", self.APP_NAME) name = "{}-{}-{}".format(self.name, self.APP_NAME, os.getpid()) self._tg_process = Process(name=name, target=self._start_server) self._tg_process.start()
def get_networks_from_nodes(nodes): """parse the 'nodes' fields in scenario """ networks = {} for node in nodes.values(): if not node: continue interfaces = node.get('interfaces', {}) for interface in interfaces.values(): vld_id = interface.get('vld_id') # mgmt network doesn't have vld_id if not vld_id: continue network = Context.get_network({"vld_id": vld_id}) if network: networks[network['name']] = network return networks
def get_networks_from_nodes(nodes): """parse the 'nodes' fields in scenario """ networks = {} for node in nodes.values(): if not node: continue interfaces = node.get('interfaces', {}) for interface in interfaces.values(): # vld_id is network_name network_name = interface.get('network_name') if not network_name: continue network = Context.get_network(network_name) if network: networks[network['name']] = network return networks
def collect_kpi(self): # we can't get KPIs if the VNF is down check_if_process_failed(self._vnf_process, 0.01) stats = self.get_stats() m = re.search(self.COLLECT_KPI, stats, re.MULTILINE) physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = {"physical_node": physical_node} if m: result.update({k: int(m.group(v)) for k, v in self.COLLECT_MAP.items()}) result["collect_stats"] = self.resource_helper.collect_kpi() else: result.update({"packets_in": 0, "packets_fwd": 0, "packets_dropped": 0}) LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result
def collect_kpi(self): # Implement stats collection ip_link_stats = '/sbin/ip -s link' stdout = self.ssh_helper.execute(ip_link_stats)[1] link_stats = self.get_stats(stdout) # get RX/TX from link_stats and assign to results physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = { "physical_node": physical_node, "packets_in": 0, "packets_dropped": 0, "packets_fwd": 0, "link_stats": link_stats } LOG.debug("%s collect KPIs %s", "RouterVNF", result) return result
def collect_kpi(self): # check if the tg processes have exited physical_node = Context.get_physical_node_from_server( self.scenario_helper.nodes[self.name]) result = {"physical_node": physical_node} for proc in (self._tg_process, self._traffic_process): check_if_process_failed(proc) if self.resource_helper is None: return result if self.irq_cores is None: self.setup_helper.build_config_file() self.irq_cores = self.get_irq_cores() data = self.resource_helper.sut.irq_core_stats(self.irq_cores) new_data = copy.deepcopy(data) self.end_test_time = time.time() self.resource_helper.sut.reset_stats() if self.start_test_time is None: new_data = {} else: test_time = self.end_test_time - self.start_test_time for index, item in data.items(): for counter, value in item.items(): if counter.startswith("bucket_") or \ counter.startswith("overflow"): if value is 0: del new_data[index][counter] else: new_data[index][counter] = float(value) / test_time self.start_test_time = time.time() result["collect_stats"] = new_data LOG.debug("%s collect KPIs %s", self.APP_NAME, result) return result
def instantiate(self, scenario_cfg, context_cfg): self.scenario_helper.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.nfvi_context = Context.get_context_from_server( self.scenario_helper.nodes[self.name]) self.configure_routes(self.name, scenario_cfg, context_cfg)
def parse_task(self, task_args=None, task_args_file=None): '''parses the task file and return an context and scenario instances''' print "Parsing task config:", self.path try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print(("Failed to render template:\n%(task)s\n%(err)s\n") % { "task": input_task, "err": e }) raise e print(("Input task is:\n%s\n") % rendered_task) cfg = yaml.load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] for cfg_attrs in context_cfgs: context_type = cfg_attrs.get("type", "Heat") if "Heat" == context_type and "networks" in cfg_attrs: # bugfix: if there are more than one network, # only add "external_network" on first one. # the name of netwrok should follow this rule: # test, test2, test3 ... # sort network with the length of network's name sorted_networks = sorted(cfg_attrs["networks"].keys()) # config external_network based on env var cfg_attrs["networks"][sorted_networks[0]]["external_network"] \ = os.environ.get("EXTERNAL_NETWORK", "net04_ext") context = Context.get(context_type) context.init(cfg_attrs) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags task_id = str(uuid.uuid4()) for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition
def parse_nodes_with_context(scenario_cfg): """parse the 'nodes' fields in scenario """ # ensure consistency in node instantiation order return OrderedDict( (nodename, Context.get_server(scenario_cfg["nodes"][nodename])) for nodename in sorted(scenario_cfg["nodes"]))
def parse_task(self, task_id, task_args=None, task_args_file=None): """parses the task file and return an context and scenario instances""" print("Parsing task config:", self.path) try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print("Failed to render template:\n%(task)s\n%(err)s\n" % { "task": input_task, "err": e }) raise e print("Input task is:\n%s\n" % rendered_task) cfg = yaml_load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] contexts = [] name_suffix = '-{}'.format(task_id[:8]) for cfg_attrs in context_cfgs: try: cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'], name_suffix) except KeyError: pass # default to Heat context because we are testing OpenStack context_type = cfg_attrs.get("type", "Heat") context = Context.get(context_type) context.init(cfg_attrs) contexts.append(context) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # embed task path into scenario so we can load other files # relative to task path scenario["task_path"] = os.path.dirname(self.path) change_server_name(scenario, name_suffix) try: for node in scenario['nodes']: scenario['nodes'][node] += name_suffix except KeyError: pass # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
def parse_task(self, task_args=None, task_args_file=None): '''parses the task file and return an context and scenario instances''' print "Parsing task config:", self.path try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print(("Failed to render template:\n%(task)s\n%(err)s\n") % {"task": input_task, "err": e}) raise e print(("Input task is:\n%s\n") % rendered_task) cfg = yaml.load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] for cfg_attrs in context_cfgs: context_type = cfg_attrs.get("type", "Heat") if "Heat" == context_type and "networks" in cfg_attrs: # bugfix: if there are more than one network, # only add "external_network" on first one. # the name of netwrok should follow this rule: # test, test2, test3 ... # sort network with the length of network's name sorted_networks = sorted(cfg_attrs["networks"].keys()) # config external_network based on env var cfg_attrs["networks"][sorted_networks[0]]["external_network"] \ = os.environ.get("EXTERNAL_NETWORK", "net04_ext") context = Context.get(context_type) context.init(cfg_attrs) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags task_id = str(uuid.uuid4()) for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition
def run_one_scenario(self, scenario_cfg, output_file): """run one scenario using context""" runner_cfg = scenario_cfg["runner"] runner_cfg['output_filename'] = output_file options = scenario_cfg.get('options', {}) scenario_cfg['options'] = self._parse_options(options) # TODO support get multi hosts/vms info context_cfg = {} server_name = scenario_cfg.get('options', {}).get('server_name', {}) def config_context_target(cfg): target = cfg['target'] if is_ip_addr(target): context_cfg['target'] = {"ipaddr": target} else: context_cfg['target'] = Context.get_server(target) if self._is_same_context(cfg["host"], target): context_cfg['target']["ipaddr"] = context_cfg['target'][ "private_ip"] else: context_cfg['target']["ipaddr"] = context_cfg['target'][ "ip"] host_name = server_name.get('host', scenario_cfg.get('host')) if host_name: context_cfg['host'] = Context.get_server(host_name) for item in [server_name, scenario_cfg]: try: config_context_target(item) except KeyError: pass else: break if "targets" in scenario_cfg: ip_list = [] for target in scenario_cfg["targets"]: if is_ip_addr(target): ip_list.append(target) context_cfg['target'] = {} else: context_cfg['target'] = Context.get_server(target) if self._is_same_context(scenario_cfg["host"], target): ip_list.append(context_cfg["target"]["private_ip"]) else: ip_list.append(context_cfg["target"]["ip"]) context_cfg['target']['ipaddr'] = ','.join(ip_list) if "nodes" in scenario_cfg: context_cfg["nodes"] = parse_nodes_with_context(scenario_cfg) context_cfg["networks"] = get_networks_from_nodes( context_cfg["nodes"]) runner = base_runner.Runner.get(runner_cfg) LOG.info("Starting runner of type '%s'", runner_cfg["type"]) runner.run(scenario_cfg, context_cfg) return runner