def parse_plugin(self): """parses the plugin file and return a plugins instance and a deployment instance """ print("Parsing plugin config:", self.path) try: kw = {} with open(self.path) as f: try: input_plugin = f.read() rendered_plugin = TaskTemplate.render(input_plugin, **kw) except Exception as e: print("Failed to render template:\n%(plugin)s\n%(err)s\n" % { "plugin": input_plugin, "err": e }) raise e print("Input plugin is:\n%s\n" % rendered_plugin) cfg = yaml_load(rendered_plugin) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "plugin") return cfg["plugins"], cfg["deployment"]
def read_config_file(self): """Read from config file""" with open(self.file_path) as stream: LOG.info("Parsing pod file: %s", self.file_path) cfg = yaml_load(stream) return cfg
def get_param(key, default=''): # we have to defer this to runtime so that we can mock os.environ.get in unittests conf_file = os.environ.get('CONF_FILE', '/etc/yardstick/yardstick.yaml') # don't re-parse yaml for each lookup if not CONF: # do not use yardstick.common.utils.parse_yaml # since yardstick.common.utils creates a logger # and so it cannot be imported before this code try: with open(conf_file) as f: value = yaml_load(f) except IOError: pass except OSError as e: if e.errno != errno.EEXIST: raise else: CONF.update(value) try: return reduce(lambda a, b: a[b], key.split('.'), CONF) except KeyError: if not default: raise return default
def _render_task(self, task_args, task_args_file): """Render the input task with the given arguments :param task_args: (dict) arguments to render the task :param task_args_file: (str) file containing the arguments to render the task :return: (str) task file rendered """ try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args('task_args_file', f.read())) kw.update(parse_task_args('task_args', task_args)) except TypeError: raise y_exc.TaskRenderArgumentError() input_task = None try: with open(self.path) as f: input_task = f.read() rendered_task = task_template.TaskTemplate.render(input_task, **kw) LOG.debug('Input task is:\n%s', rendered_task) parsed_task = yaml_load(rendered_task) except (IOError, OSError): raise y_exc.TaskReadError(task_file=self.path) except Exception: raise y_exc.TaskRenderError(input_task=input_task) return parsed_task, rendered_task
def _read_config(self): # TODO: add some error handling in case of empty or non-existing file try: with open(self._config_file) as f: self._options = yaml_load(f) except Exception as e: LOG.exception("Failed to load the yaml %s", e) raise
def __init__(self, config, context): if not BaseOperation.operation_cfgs: with open(operation_conf_path) as stream: BaseOperation.operation_cfgs = yaml_load(stream) self.key = '' self._config = config self._context = context self.intermediate_variables = {}
def build_config(self): vnf_cfg = self.scenario_helper.vnf_cfg task_path = self.scenario_helper.task_path config_file = vnf_cfg.get('file') lb_count = vnf_cfg.get('lb_count', 3) lb_config = vnf_cfg.get('lb_config', 'SW') worker_config = vnf_cfg.get('worker_config', '1C/1T') worker_threads = vnf_cfg.get('worker_threads', 3) traffic_type = self.scenario_helper.all_options.get('traffic_type', 4) traffic_options = { 'traffic_type': traffic_type, 'pkt_type': 'ipv%s' % traffic_type, 'vnf_type': self.VNF_TYPE, } # read actions/rules from file acl_options = None acl_file_name = self.scenario_helper.options.get('rules') if acl_file_name: with utils.open_relative_file(acl_file_name, task_path) as infile: acl_options = yaml_loader.yaml_load(infile) config_tpl_cfg = utils.find_relative_file(self.DEFAULT_CONFIG_TPL_CFG, task_path) config_basename = posixpath.basename(self.CFG_CONFIG) script_basename = posixpath.basename(self.CFG_SCRIPT) multiport = MultiPortConfig(self.scenario_helper.topology, config_tpl_cfg, config_basename, self.vnfd_helper, self.VNF_TYPE, lb_count, worker_threads, worker_config, lb_config, self.socket) multiport.generate_config() if config_file: with utils.open_relative_file(config_file, task_path) as infile: new_config = ['[EAL]'] vpci = [] for port in self.vnfd_helper.port_pairs.all_ports: interface = self.vnfd_helper.find_interface(name=port) vpci.append(interface['virtual-interface']["vpci"]) new_config.extend('w = {0}'.format(item) for item in vpci) new_config = '\n'.join(new_config) + '\n' + infile.read() else: with open(self.CFG_CONFIG) as handle: new_config = handle.read() new_config = self._update_traffic_type(new_config, traffic_options) new_config = self._update_packet_type(new_config, traffic_options) self.ssh_helper.upload_config_file(config_basename, new_config) self.ssh_helper.upload_config_file( script_basename, multiport.generate_script(self.vnfd_helper, self.get_flows_config(acl_options))) LOG.info("Provision and start the %s", self.APP_NAME) self._build_pipeline_kwargs() return self.PIPELINE_COMMAND.format(**self.pipeline_kwargs)
def __init__(self, config, context): if not BaseAttacker.attacker_cfgs: with open(attacker_conf_path) as stream: BaseAttacker.attacker_cfgs = yaml_load(stream) self._config = config self._context = context self.data = {} self.setup_done = False
def __init__(self, config, context, data): if not BaseMonitor.monitor_cfgs: with open(monitor_conf_path) as stream: BaseMonitor.monitor_cfgs = yaml_load(stream) multiprocessing.Process.__init__(self) self._config = config self._context = context self._queue = multiprocessing.Queue() self._event = multiprocessing.Event() self.monitor_data = data self.setup_done = False
def __init__(self, config, context): if not BaseAttacker.attacker_cfgs: with open(attacker_conf_path) as stream: BaseAttacker.attacker_cfgs = yaml_load(stream) self._config = config self._context = context self.data = {} self.setup_done = False self.intermediate_variables = {} self.mandatory = False
def parse_yaml(file_path): try: with open(file_path) as f: value = yaml_load(f) except IOError: return {} except OSError as e: if e.errno != errno.EEXIST: raise else: return value
def __init__(self, config, context): if not BaseResultChecker.resultchecker_cfgs: with open(resultchecker_conf_path) as stream: BaseResultChecker.resultchecker_cfgs = yaml_load(stream) self.actualResult = object() self.expectedResult = object() self.success = False self._config = config self._context = context self.setup_done = False
def _parse_testcase(self, testcase_info): rendered_testcase = TaskTemplate.render(testcase_info) testcase_cfg = yaml_load(rendered_testcase) test_precondition = testcase_cfg.get('precondition', {}) installer_type = test_precondition.get('installer_type', 'all') deploy_scenarios = test_precondition.get('deploy_scenarios', 'all') description = self._get_description(testcase_cfg) return description, installer_type, deploy_scenarios
def __init__(self, scenario_cfg, context_cfg): # Yardstick API super(NetworkServiceTestCase, self).__init__() self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg # fixme: create schema to validate all fields have been provided with open_relative_file(scenario_cfg["topology"], scenario_cfg['task_path']) as stream: topology_yaml = yaml_load(stream) self.topology = topology_yaml["nsd:nsd-catalog"]["nsd"][0] self.vnfs = [] self.collector = None self.traffic_profile = None
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg.get('options', {}) server = self.options['server'] self.server_id = server['id'] self.host = self._get_current_host_name(self.server_id) node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get('file')) with open(node_file) as f: nodes = yaml_load(TaskTemplate.render(f.read())) self.nodes = {a['host_name']: a for a in nodes['nodes']}
def upload_pod_file(self, args): try: pod_file = args['file'] except KeyError: return result_handler(consts.API_ERROR, 'file must be provided') LOG.info('Checking file') data = yaml_load(pod_file.read()) if not isinstance(data, collections.Mapping): return result_handler(consts.API_ERROR, 'invalid yaml file') LOG.info('Writing file') with open(consts.POD_FILE, 'w') as f: yaml.dump(data, f, default_flow_style=False) LOG.info('Writing finished') return result_handler(consts.API_SUCCESS, {'pod_info': data})
def parse_suite(self): """parse the suite file and return a list of task config file paths and lists of optional parameters if present""" LOG.info("\nParsing suite file:%s", self.path) try: with open(self.path) as stream: cfg = yaml_load(stream) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "suite") LOG.info("\nStarting scenario:%s", cfg["name"]) test_cases_dir = cfg.get("test_cases_dir", test_cases_dir_default) test_cases_dir = os.path.join(constants.YARDSTICK_ROOT_PATH, test_cases_dir) if test_cases_dir[-1] != os.sep: test_cases_dir += os.sep cur_pod = os.environ.get('NODE_NAME', None) cur_installer = os.environ.get('INSTALLER_TYPE', None) valid_task_files = [] valid_task_args = [] valid_task_args_fnames = [] for task in cfg["test_cases"]: # 1.check file_name if "file_name" in task: task_fname = task.get('file_name', None) if task_fname is None: continue else: continue # 2.check constraint if self._meet_constraint(task, cur_pod, cur_installer): valid_task_files.append(test_cases_dir + task_fname) else: continue # 3.fetch task parameters task_args, task_args_fnames = self._get_task_para(task, cur_pod) valid_task_args.append(task_args) valid_task_args_fnames.append(task_args_fnames) return valid_task_files, valid_task_args, valid_task_args_fnames
def parse_task_args(src_name, args): if isinstance(args, collections.Mapping): return args try: kw = args and yaml_load(args) kw = {} if kw is None else kw except yaml.parser.ParserError as e: print_invalid_header(src_name, args) print("%(source)s has to be YAML. Details:\n\n%(err)s\n" % {"source": src_name, "err": e}) raise TypeError() if not isinstance(kw, dict): print_invalid_header(src_name, args) print("%(src)s had to be dict, actually %(src_type)s\n" % {"src": src_name, "src_type": type(kw)}) raise TypeError() return kw
def upload_pod_file(self, args): try: upload_file = args['file'] except KeyError: return result_handler(consts.API_ERROR, 'file must be provided') try: environment_id = args['environment_id'] except KeyError: return result_handler(consts.API_ERROR, 'environment_id must be provided') try: uuid.UUID(environment_id) except ValueError: return result_handler(consts.API_ERROR, 'invalid environment id') LOG.info('writing pod file: %s', consts.POD_FILE) upload_file.save(consts.POD_FILE) with open(consts.POD_FILE) as f: data = yaml_load(TaskTemplate.render(f.read())) LOG.debug('pod content is: %s', data) LOG.info('create pod in database') pod_id = str(uuid.uuid4()) pod_handler = V2PodHandler() pod_init_data = { 'uuid': pod_id, 'environment_id': environment_id, 'content': jsonutils.dumps(data) } pod_handler.insert(pod_init_data) LOG.info('update pod in environment') environment_handler = V2EnvironmentHandler() environment_handler.update_attr(environment_id, {'pod_id': pod_id}) return result_handler(consts.API_SUCCESS, { 'uuid': pod_id, 'pod': data })
def generate_vnfd(vnf_model, node): """ :param vnf_model: VNF definition template, e.g. tg_ping_tpl.yaml :param node: node configuration taken from pod.yaml :return: Complete VNF Descriptor that will be taken as input for GenericVNF.__init__ """ # get is unused as global method inside template # node["get"] = key_flatten_get node["get"] = deepgetitem # Set Node details to default if not defined in pod file # we CANNOT use TaskTemplate.render because it does not allow # for missing variables, we need to allow password for key_filename # to be undefined rendered_vnfd = render(vnf_model, **node) # This is done to get rid of issues with serializing node del node["get"] filled_vnfd = yaml_load(rendered_vnfd) return filled_vnfd
def upload_pod_file(self, args): try: upload_file = args['file'] except KeyError: return result_handler(consts.API_ERROR, 'file must be provided') try: environment_id = args['environment_id'] except KeyError: return result_handler(consts.API_ERROR, 'environment_id must be provided') try: uuid.UUID(environment_id) except ValueError: return result_handler(consts.API_ERROR, 'invalid environment id') LOG.info('writing pod file: %s', consts.POD_FILE) upload_file.save(consts.POD_FILE) with open(consts.POD_FILE) as f: data = yaml_load(TaskTemplate.render(f.read())) LOG.debug('pod content is: %s', data) LOG.info('create pod in database') pod_id = str(uuid.uuid4()) pod_handler = V2PodHandler() pod_init_data = { 'uuid': pod_id, 'environment_id': environment_id, 'content': jsonutils.dumps(data) } pod_handler.insert(pod_init_data) LOG.info('update pod in environment') environment_handler = V2EnvironmentHandler() environment_handler.update_attr(environment_id, {'pod_id': pod_id}) return result_handler(consts.API_SUCCESS, {'uuid': pod_id, 'pod': data})
def parse_task(self, task_id, task_args=None, task_args_file=None): """parses the task file and return an context and scenario instances""" print("Parsing task config:", self.path) try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print("Failed to render template:\n%(task)s\n%(err)s\n" % { "task": input_task, "err": e }) raise e print("Input task is:\n%s\n" % rendered_task) cfg = yaml_load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] contexts = [] name_suffix = '-{}'.format(task_id[:8]) for cfg_attrs in context_cfgs: try: cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'], name_suffix) except KeyError: pass # default to Heat context because we are testing OpenStack context_type = cfg_attrs.get("type", "Heat") context = Context.get(context_type) context.init(cfg_attrs) contexts.append(context) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # embed task path into scenario so we can load other files # relative to task path scenario["task_path"] = os.path.dirname(self.path) change_server_name(scenario, name_suffix) try: for node in scenario['nodes']: scenario['nodes'][node] += name_suffix except KeyError: pass # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
def test_render_unicode_dict(self): tmpl = "{{ routing_table }}" self.assertEqual(yaml_load(vnfdgen.render(tmpl, **NODE_CFG)), NODE_CFG["routing_table"])
def test_render_none(self): tmpl = "{{ routing_table }}" self.assertEqual(vnfdgen.render(tmpl, routing_table=None), u'~') self.assertEqual(yaml_load(vnfdgen.render(tmpl, routing_table=None)), None)
def read_yaml_file(path): """Read yaml file""" with open(path) as stream: data = yaml_load(stream) return data
def test_parse_to_value_exception(self): self.assertEqual(yaml_loader.yaml_load("string"), u"string")