def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg host = self.context_cfg['host'] self.user = host.get('user', 'ubuntu') self.port = host.get("ssh_port", ssh.DEFAULT_PORT) self.ip = host.get('ip') self.key_filename = host.get('key_filename', '/root/.ssh/id_rsa') self.password = host.get('password') self.nova_client = openstack_utils.get_nova_client() node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, scenario_cfg.get('node_file')) with open(node_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['name']: a for a in nodes['nodes']} options = self.scenario_cfg.get('options', {}) host_list = options.get('host', '').split(',') self.controller_nodes = self._get_host_node(host_list, 'Controller') self.compute_nodes = self._get_host_node(host_list, 'Compute') self.cpu_set = options.get('cpu_set', '1,2,3,4,5,6')
def parse_plugin(self): """parses the plugin file and return a plugins instance and a deployment instance """ print("Parsing plugin config:", self.path) try: kw = {} with open(self.path) as f: try: input_plugin = f.read() rendered_plugin = TaskTemplate.render(input_plugin, **kw) except Exception as e: print("Failed to render template:\n%(plugin)s\n%(err)s\n" % { "plugin": input_plugin, "err": e }) raise e print("Input plugin is:\n%s\n" % rendered_plugin) cfg = yaml.safe_load(rendered_plugin) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "plugin") return cfg["plugins"], cfg["deployment"]
def parse_plugin(self): '''parses the plugin file and return a plugins instance and a deployment instance ''' print "Parsing plugin config:", self.path try: kw = {} with open(self.path) as f: try: input_plugin = f.read() rendered_plugin = TaskTemplate.render(input_plugin, **kw) except Exception as e: print(("Failed to render template:\n%(plugin)s\n%(err)s\n") % {"plugin": input_plugin, "err": e}) raise e print(("Input plugin is:\n%s\n") % rendered_plugin) cfg = yaml.load(rendered_plugin) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "plugin") return cfg["plugins"], cfg["deployment"]
def _get_cases_from_suite_file(testsuite): def get_name(full_name): return os.path.splitext(full_name)[0] with open(testsuite) as f: contents = TaskTemplate.render(f.read()) suite_dic = yaml.safe_load(contents) testcases = (get_name(c['file_name']) for c in suite_dic['test_cases']) return ','.join(testcases)
def _parse_testcase(self, testcase_info): rendered_testcase = TaskTemplate.render(testcase_info) testcase_cfg = yaml_load(rendered_testcase) test_precondition = testcase_cfg.get('precondition', {}) installer_type = test_precondition.get('installer_type', 'all') deploy_scenarios = test_precondition.get('deploy_scenarios', 'all') description = self._get_description(testcase_cfg) return description, installer_type, deploy_scenarios
def _parse_testcase(self, testcase_info): kw = {} rendered_testcase = TaskTemplate.render(testcase_info, **kw) testcase_cfg = yaml.load(rendered_testcase) test_precondition = testcase_cfg.get('precondition', None) installer_type = 'all' deploy_scenarios = 'all' if test_precondition is not None: installer_type = test_precondition.get('installer_type', 'all') deploy_scenarios = test_precondition.get('deploy_scenarios', 'all') description = testcase_info.split("\n")[2][1:].strip() return description, installer_type, deploy_scenarios
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg.get('options', {}) server = self.options['server'] self.server_id = server['id'] self.host = self._get_current_host_name(self.server_id) node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get('file')) with open(node_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['host_name']: a for a in nodes['nodes']}
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg['options'] self.server = self.options.get("server") self.server_id = self.server["id"] self.server_host = self.server["OS-EXT-SRV-ATTR:host"] self.connection = None pod_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get("pod_file")) with open(pod_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['host_name']: a for a in nodes['nodes']} self.setup_done = False
def generate_vnfd(vnf_model, node): """ :param vnf_model: VNF definition template, e.g. tg_ping_tpl.yaml :param node: node configuration taken from pod.yaml :return: Complete VNF Descriptor that will be taken as input for GenericVNF.__init__ """ # get is unused as global method inside template node["get"] = get # Set Node details to default if not defined in pod file rendered_vnfd = TaskTemplate.render(vnf_model, **node) # This is done to get rid of issues with serializing node del node["get"] filled_vnfd = yaml.load(rendered_vnfd) return filled_vnfd
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg['options'] self.cpu_set = self.options.get("cpu_set", None) self.host_str = self.options.get("host", "node4") self.host_list = self.host_str.split(',') self.nova_client = op_utils.get_nova_client() self.instance = None self.client = None node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get("file")) with open(node_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['name']: a for a in nodes['nodes']} self.setup_done = False
def upload_pod_file(self, args): try: upload_file = args['file'] except KeyError: return result_handler(consts.API_ERROR, 'file must be provided') try: environment_id = args['environment_id'] except KeyError: return result_handler(consts.API_ERROR, 'environment_id must be provided') try: uuid.UUID(environment_id) except ValueError: return result_handler(consts.API_ERROR, 'invalid environment id') LOG.info('writing pod file: %s', consts.POD_FILE) upload_file.save(consts.POD_FILE) with open(consts.POD_FILE) as f: data = yaml_load(TaskTemplate.render(f.read())) LOG.debug('pod content is: %s', data) LOG.info('create pod in database') pod_id = str(uuid.uuid4()) pod_handler = V2PodHandler() pod_init_data = { 'uuid': pod_id, 'environment_id': environment_id, 'content': jsonutils.dumps(data) } pod_handler.insert(pod_init_data) LOG.info('update pod in environment') environment_handler = V2EnvironmentHandler() environment_handler.update_attr(environment_id, {'pod_id': pod_id}) return result_handler(consts.API_SUCCESS, { 'uuid': pod_id, 'pod': data })
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg['options'] self.server_id = self.options.get("server_id", None) self.node_type = self.options.get("node_type", None) self.host_str = self.options.get("host", None) self.host_list = self.host_str.split(',') self.ssh_client = None node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get("file")) with open(node_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['name']: a for a in nodes['nodes']} self.setup_done = False
def __init__(self, scenario_cfg, context_cfg): self.scenario_cfg = scenario_cfg self.context_cfg = context_cfg self.options = self.scenario_cfg['options'] self.host_str = self.options.get("host", "node4") self.host_list = self.host_str.split(',') self.image = self.options.get("image", 'cirros-0.3.3') self.external_network = os.getenv("EXTERNAL_NETWORK") self.nova_client = op_utils.get_nova_client() self.neutron_client = op_utils.get_neutron_client() self.glance_client = op_utils.get_glance_client() self.instance = None self.instance_2 = None self.client = None node_file = os.path.join(consts.YARDSTICK_ROOT_PATH, self.options.get("file")) with open(node_file) as f: nodes = yaml.safe_load(TaskTemplate.render(f.read())) self.nodes = {a['name']: a for a in nodes['nodes']} self.setup_done = False
def upload_pod_file(self, args): try: upload_file = args['file'] except KeyError: return result_handler(consts.API_ERROR, 'file must be provided') try: environment_id = args['environment_id'] except KeyError: return result_handler(consts.API_ERROR, 'environment_id must be provided') try: uuid.UUID(environment_id) except ValueError: return result_handler(consts.API_ERROR, 'invalid environment id') LOG.info('writing pod file: %s', consts.POD_FILE) upload_file.save(consts.POD_FILE) with open(consts.POD_FILE) as f: data = yaml_load(TaskTemplate.render(f.read())) LOG.debug('pod content is: %s', data) LOG.info('create pod in database') pod_id = str(uuid.uuid4()) pod_handler = V2PodHandler() pod_init_data = { 'uuid': pod_id, 'environment_id': environment_id, 'content': jsonutils.dumps(data) } pod_handler.insert(pod_init_data) LOG.info('update pod in environment') environment_handler = V2EnvironmentHandler() environment_handler.update_attr(environment_id, {'pod_id': pod_id}) return result_handler(consts.API_SUCCESS, {'uuid': pod_id, 'pod': data})
def parse_task(self, task_id, task_args=None, task_args_file=None): """parses the task file and return an context and scenario instances""" print("Parsing task config:", self.path) try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print("Failed to render template:\n%(task)s\n%(err)s\n" % { "task": input_task, "err": e }) raise e print("Input task is:\n%s\n" % rendered_task) cfg = yaml_load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] contexts = [] name_suffix = '-{}'.format(task_id[:8]) for cfg_attrs in context_cfgs: try: cfg_attrs['name'] = '{}{}'.format(cfg_attrs['name'], name_suffix) except KeyError: pass # default to Heat context because we are testing OpenStack context_type = cfg_attrs.get("type", "Heat") context = Context.get(context_type) context.init(cfg_attrs) contexts.append(context) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # embed task path into scenario so we can load other files # relative to task path scenario["task_path"] = os.path.dirname(self.path) change_server_name(scenario, name_suffix) try: for node in scenario['nodes']: scenario['nodes'][node] += name_suffix except KeyError: pass # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition, contexts
def parse_task(self, task_args=None, task_args_file=None): '''parses the task file and return an context and scenario instances''' print "Parsing task config:", self.path try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print(("Failed to render template:\n%(task)s\n%(err)s\n") % {"task": input_task, "err": e}) raise e print(("Input task is:\n%s\n") % rendered_task) cfg = yaml.load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] for cfg_attrs in context_cfgs: context_type = cfg_attrs.get("type", "Heat") if "Heat" == context_type and "networks" in cfg_attrs: # bugfix: if there are more than one network, # only add "external_network" on first one. # the name of netwrok should follow this rule: # test, test2, test3 ... # sort network with the length of network's name sorted_networks = sorted(cfg_attrs["networks"].keys()) # config external_network based on env var cfg_attrs["networks"][sorted_networks[0]]["external_network"] \ = os.environ.get("EXTERNAL_NETWORK", "net04_ext") context = Context.get(context_type) context.init(cfg_attrs) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags task_id = str(uuid.uuid4()) for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition
def parse_task(self, task_args=None, task_args_file=None): '''parses the task file and return an context and scenario instances''' print "Parsing task config:", self.path try: kw = {} if task_args_file: with open(task_args_file) as f: kw.update(parse_task_args("task_args_file", f.read())) kw.update(parse_task_args("task_args", task_args)) except TypeError: raise TypeError() try: with open(self.path) as f: try: input_task = f.read() rendered_task = TaskTemplate.render(input_task, **kw) except Exception as e: print(("Failed to render template:\n%(task)s\n%(err)s\n") % { "task": input_task, "err": e }) raise e print(("Input task is:\n%s\n") % rendered_task) cfg = yaml.load(rendered_task) except IOError as ioerror: sys.exit(ioerror) self._check_schema(cfg["schema"], "task") meet_precondition = self._check_precondition(cfg) # TODO: support one or many contexts? Many would simpler and precise # TODO: support hybrid context type if "context" in cfg: context_cfgs = [cfg["context"]] elif "contexts" in cfg: context_cfgs = cfg["contexts"] else: context_cfgs = [{"type": "Dummy"}] for cfg_attrs in context_cfgs: context_type = cfg_attrs.get("type", "Heat") if "Heat" == context_type and "networks" in cfg_attrs: # bugfix: if there are more than one network, # only add "external_network" on first one. # the name of netwrok should follow this rule: # test, test2, test3 ... # sort network with the length of network's name sorted_networks = sorted(cfg_attrs["networks"].keys()) # config external_network based on env var cfg_attrs["networks"][sorted_networks[0]]["external_network"] \ = os.environ.get("EXTERNAL_NETWORK", "net04_ext") context = Context.get(context_type) context.init(cfg_attrs) run_in_parallel = cfg.get("run_in_parallel", False) # add tc and task id for influxdb extended tags task_id = str(uuid.uuid4()) for scenario in cfg["scenarios"]: task_name = os.path.splitext(os.path.basename(self.path))[0] scenario["tc"] = task_name scenario["task_id"] = task_id # TODO we need something better here, a class that represent the file return cfg["scenarios"], run_in_parallel, meet_precondition