def bench(env=None, **kwargs): def cartesian(d): """returns the cartesian product of the args.""" logging.debug(d) f = [] for k, v in d.items(): if isinstance(v, list): f.extend([[[k, vv] for vv in v]]) else: f.append([[k, v]]) logging.debug(f) product = [] for e in itertools.product(*f): product.append(dict(e)) return product logging.debug('phase[bench]: args=%s' % kwargs) playbook_values = mk_enos_values(env) workload_dir = kwargs["--workload"] with open(os.path.join(workload_dir, "run.yml")) as workload_f: workload = yaml.load(workload_f) for bench_type, desc in workload.items(): scenarios = desc.get("scenarios", []) for scenario in scenarios: # merging args top_args = desc.get("args", {}) args = scenario.get("args", {}) top_args.update(args) # merging enabled, skipping if not enabled top_enabled = desc.get("enabled", True) enabled = scenario.get("enabled", True) if not (top_enabled and enabled): continue for a in cartesian(top_args): playbook_path = os.path.join(ANSIBLE_DIR, 'run-bench.yml') inventory_path = os.path.join(env['resultdir'], 'multinode') # NOTE(msimonin) all the scenarios and plugins # must reside on the workload directory scenario_location = os.path.join(workload_dir, scenario["file"]) scenario_location = os.path.abspath(scenario_location) bench = { 'type': bench_type, 'scenario_location': scenario_location, 'file': scenario["file"], 'args': a } if "plugin" in scenario: plugin = os.path.abspath( os.path.join(workload_dir, scenario["plugin"])) if os.path.isdir(plugin): plugin = plugin + "/" bench['plugin_location'] = plugin playbook_values.update(bench=bench) run_ansible([playbook_path], inventory_path, extra_vars=playbook_values)
def backup(env=None, **kwargs): backup_dir = kwargs['--backup_dir'] if backup_dir is None: backup_dir = SYMLINK_NAME backup_dir = to_abs_path(backup_dir) # create if necessary if not os.path.isdir(backup_dir): os.mkdir(backup_dir) # update the env env['config']['backup_dir'] = backup_dir playbook_path = os.path.join(ANSIBLE_DIR, 'backup.yml') inventory_path = os.path.join(SYMLINK_NAME, 'multinode') run_ansible([playbook_path], inventory_path, env['config'])
def install_os(env=None, **kwargs): logging.debug('phase[os]: args=%s' % kwargs) # Generates kolla globals.yml, passwords.yml generated_kolla_vars = { # Kolla + common specific 'neutron_external_address': env['ips'][NEUTRON_IP], 'network_interface': env['eths'][NETWORK_IFACE], # Kolla specific 'kolla_internal_vip_address': env['ips'][INTERNAL_IP], 'neutron_external_interface': env['eths'][EXTERNAL_IFACE] } generate_kolla_files(env['config']["kolla"], generated_kolla_vars, env['resultdir']) # Clone or pull Kolla kolla_path = os.path.join(env['resultdir'], 'kolla') if os.path.isdir(kolla_path): logging.info("Remove previous Kolla installation") call("rm -rf %s" % kolla_path, shell=True) logging.info("Cloning Kolla") call("git clone %s -b %s %s > /dev/null" % (env['kolla_repo'], env['kolla_branch'], kolla_path), shell=True) logging.warning(("Patching kolla, this should be ", "deprecated with the new version of Kolla")) playbook = os.path.join(ANSIBLE_DIR, "patches.yml") run_ansible([playbook], env['inventory'], env['config']) kolla_cmd = [os.path.join(kolla_path, "tools", "kolla-ansible")] if kwargs['--reconfigure']: kolla_cmd.append('reconfigure') else: kolla_cmd.append('deploy') kolla_cmd.extend([ "-i", "%s/multinode" % SYMLINK_NAME, "--passwords", "%s/passwords.yml" % SYMLINK_NAME, "--configdir", "%s" % SYMLINK_NAME ]) if kwargs['--tags']: kolla_cmd.extend(['--tags', kwargs['--tags']]) call(kolla_cmd)
def backup(env=None, **kwargs): backup_dir = kwargs['--backup_dir'] \ or kwargs['--env'] \ or SYMLINK_NAME backup_dir = os.path.abspath(backup_dir) # create if necessary if not os.path.isdir(backup_dir): os.mkdir(backup_dir) # update the env env['config']['backup_dir'] = backup_dir playbook_path = os.path.join(ANSIBLE_DIR, 'backup.yml') inventory_path = os.path.join(env['resultdir'], 'multinode') run_ansible([playbook_path], inventory_path, extra_vars=env['config'])
def bench(env=None, **kwargs): def cartesian(d): """returns the cartesian product of the args.""" logging.debug(d) f = [] for k, v in d.items(): if isinstance(v, list): f.extend([[[k, vv] for vv in v]]) else: f.append([[k, v]]) logging.debug(f) product = [] for e in itertools.product(*f): product.append(dict(e)) return product logging.debug('phase[bench]: args=%s' % kwargs) workload_dir = kwargs["--workload"] with open(os.path.join(workload_dir, "run.yml")) as workload_f: workload = yaml.load(workload_f) for bench_type, desc in workload.items(): scenarios = desc.get("scenarios", []) for scenario in scenarios: # merging args top_args = desc.get("args", {}) args = scenario.get("args", {}) top_args.update(args) # merging enabled, skipping if not enabled top_enabled = desc.get("enabled", True) enabled = scenario.get("enabled", True) if not (top_enabled and enabled): continue for a in cartesian(top_args): playbook_path = os.path.join(ANSIBLE_DIR, 'run-bench.yml') inventory_path = os.path.join(SYMLINK_NAME, 'multinode') # NOTE(msimonin) all the scenarios must reside on the workload directory env['config']['bench'] = { 'type': bench_type, 'location': os.path.abspath( os.path.join(workload_dir, scenario["file"])), 'file': scenario["file"], 'args': a } run_ansible([playbook_path], inventory_path, env['config'])
def up(provider=None, env=None, **kwargs): logging.debug('phase[up]: args=%s' % kwargs) # Loads the configuration file config_file = kwargs['-f'] if os.path.isfile(config_file): env['config_file'] = config_file with open(config_file, 'r') as f: env['config'].update(yaml.load(f)) logging.info("Reloaded config %s", env['config']) else: logging.error('Configuration file %s does not exist', config_file) sys.exit(1) # Calls the provider and initialise resources rsc, ips, eths = provider.init(env['config'], kwargs['--force-deploy']) env['rsc'] = rsc env['ips'] = ips env['eths'] = eths # Generates a directory for results resultdir_name = 'enos_' + datetime.today().isoformat() resultdir = os.path.join(CALL_PATH, resultdir_name) os.mkdir(resultdir) logging.info('Generates result directory %s' % resultdir_name) env['resultdir'] = resultdir # Generates inventory for ansible/kolla base_inventory = env['config']['inventory'] inventory = os.path.join(resultdir, 'multinode') generate_inventory(env['rsc'], base_inventory, inventory) logging.info('Generates inventory %s' % inventory) env['inventory'] = inventory # Set variables required by playbooks of the application env['config'].update({ # Enos specific 'vip': ips[INTERNAL_IP], 'registry_vip': ips[REGISTRY_IP], 'influx_vip': ips[INFLUX_IP], 'grafana_vip': ips[GRAFANA_IP], # Kolla + common specific 'neutron_external_address': ips[NEUTRON_IP], 'network_interface': eths[NETWORK_IFACE], # Kolla specific 'kolla_internal_vip_address': ips[INTERNAL_IP], 'neutron_external_interface': eths[EXTERNAL_IFACE] }) passwords = os.path.join(TEMPLATE_DIR, "passwords.yml") with open(passwords) as f: env['config'].update(yaml.load(f)) # Executes hooks and runs playbook that initializes resources (eg, # installs the registry, install monitoring tools, ...) provider.before_preintsall(env) up_playbook = os.path.join(ANSIBLE_DIR, 'up.yml') run_ansible([up_playbook], inventory, env['config'], kwargs['--tags']) provider.after_preintsall(env) # Symlink current directory link = os.path.abspath(SYMLINK_NAME) try: os.remove(link) except OSError: pass os.symlink(resultdir, link) logging.info("Symlinked %s to %s" % (resultdir, link))
def up(env=None, **kwargs): logging.debug('phase[up]: args=%s' % kwargs) # Generates a directory for results resultdir_name = kwargs['--env'] or \ 'enos_' + datetime.today().isoformat() resultdir = os.path.join(CALL_PATH, resultdir_name) # The result directory cannot be created if a related file exists if os.path.isfile(resultdir): logging.error("Result directory cannot be created due to %s" % resultdir) sys.exit(1) # Create the result directory if it does not exist os.path.isdir(resultdir) or os.mkdir(resultdir) logging.info('Generate results in %s' % resultdir) env['resultdir'] = resultdir # Symlink the result directory with the current directory link = os.path.abspath(SYMLINK_NAME) os.path.lexists(link) and os.remove(link) try: os.symlink(env['resultdir'], link) logging.info("Symlinked %s to %s" % (env['resultdir'], link)) except OSError: # An harmless error can occur due to a race condition when multiple # regions are simultaneously deployed logging.warning("Symlink %s to %s failed" % (env['resultdir'], link)) pass # Loads the configuration file config_file = kwargs['-f'] if os.path.isfile(config_file): env['config_file'] = config_file with open(config_file, 'r') as f: env['config'].update(yaml.load(f)) logging.info("Reloaded config %s", env['config']) else: logging.error('Configuration file %s does not exist', config_file) sys.exit(1) # Calls the provider and initialise resources provider = make_provider(env) rsc, provider_net, eths = \ provider.init(env['config'], CALL_PATH, kwargs['--force-deploy']) env['rsc'] = rsc env['provider_net'] = provider_net env['eths'] = eths # Generates inventory for ansible/kolla base_inventory = env['config']['inventory'] inventory = os.path.join(env['resultdir'], 'multinode') generate_inventory(env['rsc'], base_inventory, inventory) logging.info('Generates inventory %s' % inventory) env['inventory'] = inventory wait_ssh(env) # Set variables required by playbooks of the application if 'ip' in env['config']['registry']: registry_vip = env['config']['registry']['ip'] else: registry_vip = pop_ip(env) env['config'].update({ 'vip': pop_ip(env), 'registry_vip': registry_vip, 'influx_vip': pop_ip(env), 'grafana_vip': pop_ip(env), 'network_interface': eths[NETWORK_IFACE], 'resultdir': env['resultdir'], 'rabbitmq_password': "******", 'database_password': "******", 'external_vip': pop_ip(env) }) # Executes hooks and runs playbook that initializes resources (eg, # installs the registry, install monitoring tools, ...) provider.before_preintsall(env) up_playbook = os.path.join(ANSIBLE_DIR, 'up.yml') run_ansible([up_playbook], inventory, extra_vars=env['config'], tags=kwargs['--tags']) provider.after_preintsall(env)
def tc(env=None, **kwargs): """ Enforce network constraints 1) Retrieve the list of ips for all nodes (ansible) 2) Build all the constraints (python) {source:src, target: ip_dest, device: if, rate:x, delay:y} 3) Enforce those constraints (ansible) """ test = kwargs['--test'] if test: logging.info('Checking the constraints') utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml') # NOTE(msimonin): we retrieve eth name from the env instead # of env['config'] in case os hasn't been called options = { 'action': 'test', 'tc_output_dir': env['resultdir'], 'network_interface': env['eths'][NETWORK_IFACE] } run_ansible([utils_playbook], env['inventory'], extra_vars=options) return # 1. getting ips/devices information logging.info('Getting the ips of all nodes') utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml') ips_file = os.path.join(env['resultdir'], 'ips.txt') # NOTE(msimonin): we retrieve eth name from the env instead # of env['config'] in case os hasn't been called options = { 'action': 'ips', 'ips_file': ips_file, 'network_interface': env['eths'][NETWORK_IFACE], 'neutron_external_interface': env['eths'][EXTERNAL_IFACE] } run_ansible([utils_playbook], env['inventory'], extra_vars=options) # 2.a building the group constraints logging.info('Building all the constraints') topology = env['config']['topology'] network_constraints = env['config']['network_constraints'] constraints = build_grp_constraints(topology, network_constraints) # 2.b Building the ip/device level constaints with open(ips_file) as f: ips = yaml.load(f) # will hold every single constraint ips_with_constraints = build_ip_constraints(env['rsc'], ips, constraints) # dumping it for debugging purpose ips_with_constraints_file = os.path.join(env['resultdir'], 'ips_with_constraints.yml') with open(ips_with_constraints_file, 'w') as g: yaml.dump(ips_with_constraints, g) # 3. Enforcing those constraints logging.info('Enforcing the constraints') # enabling/disabling network constraints enable = network_constraints.setdefault('enable', True) utils_playbook = os.path.join(ANSIBLE_DIR, 'utils.yml') options = { 'action': 'tc', 'ips_with_constraints': ips_with_constraints, 'tc_enable': enable, } run_ansible([utils_playbook], env['inventory'], extra_vars=options)
def up(env=None, **kwargs): logging.debug('phase[up]: args=%s' % kwargs) # Generate or get the directory for results env['resultdir'] = _set_resultdir(kwargs['--env']) logging.info("Directory for experiment results is %s", env['resultdir']) # Loads the configuration file config_file = os.path.abspath(kwargs['-f']) if os.path.isfile(config_file): env['config_file'] = config_file with open(config_file, 'r') as f: env['config'].update(yaml.load(f)) logging.info("Reloaded configuration file %s", env['config_file']) logging.debug("Configuration is %s", env['config']) else: raise EnosFilePathError( config_file, "Configuration file %s does not exist" % config_file) # Calls the provider and initialise resources provider = make_provider(env) config = load_config(env['config'], provider.topology_to_resources, provider.default_config()) rsc, provider_net, eths = \ provider.init(config, kwargs['--force-deploy']) env['rsc'] = rsc env['provider_net'] = provider_net env['eths'] = eths logging.debug("Provider ressources: %s", env['rsc']) logging.debug("Provider network information: %s", env['provider_net']) logging.debug("Provider network interfaces: %s", env['eths']) # Generates inventory for ansible/kolla base_inventory = seekpath(env['config']['inventory']) inventory = os.path.join(env['resultdir'], 'multinode') generate_inventory(env['rsc'], base_inventory, inventory) logging.info('Generates inventory %s' % inventory) env['inventory'] = inventory # Wait for resources to be ssh reachable wait_ssh(env) # Set variables required by playbooks of the application env['config'].update({ 'vip': pop_ip(env), 'registry_vip': env['config']['registry'].get('ip') or pop_ip(env), 'influx_vip': pop_ip(env), 'grafana_vip': pop_ip(env), 'network_interface': eths[NETWORK_IFACE], 'resultdir': env['resultdir'], 'rabbitmq_password': "******", 'database_password': "******", 'external_vip': pop_ip(env) }) # Runs playbook that initializes resources (eg, # installs the registry, install monitoring tools, ...) up_playbook = os.path.join(ANSIBLE_DIR, 'up.yml') run_ansible([up_playbook], inventory, extra_vars=env['config'], tags=kwargs['--tags'])