def destroy(env=None, **kwargs): hard = kwargs['--hard'] if hard: logging.info('Destroying all the resources') provider_conf = env['config']['provider'] provider = make_provider(provider_conf) provider.destroy(env) else: command = ['destroy', '--yes-i-really-really-mean-it'] if kwargs['--include-images']: command.append('--include-images') kolla_kwargs = { '--': True, '--env': kwargs['--env'], '-v': kwargs['-v'], '<command>': command, '--silent': kwargs['--silent'], 'kolla': True } options = {"enos_action": "destroy"} up_playbook = os.path.join(ANSIBLE_DIR, 'enos.yml') inventory_path = os.path.join(env['resultdir'], 'multinode') # Destroying enos resources run_ansible([up_playbook], inventory_path, extra_vars=options) # Destroying kolla resources _kolla(env=env, **kolla_kwargs)
def deploy(self): """Deploy docker and optionnaly a docker registry cache.""" _playbook = os.path.join(SERVICE_PATH, "docker.yml") extra_vars = {"registry": self.registry_opts, "enos_action": "deploy"} if self.bind_volumes: extra_vars.update(bind_volumes=self.bind_volumes) run_ansible([_playbook], roles=self._roles, extra_vars=extra_vars)
def bootstrap_kolla(env): """Setups all necessities for calling kolla-ansible. - On the local host + Patches kolla+ansible sources (if any). + Builds globals.yml into result dir. + Builds password.yml into result dir. + Builds admin+openrc into result dir. - On all the hosts + Remove the ip addresses on the neutron_external_interface (set in the inventory hostvars) """ # Write the globals.yml file in the result dir. # # FIXME: Find a neat way to put this into the next bootsrap_kolla # playbook. Then, remove this util function and call directly the # playbook from `enos os`. globals_path = os.path.join(env['resultdir'], 'globals.yml') globals_values = get_kolla_required_values(env) globals_values.update(env['config']['kolla']) globals_values.update(cwd=env['cwd']) with open(globals_path, 'w') as f: yaml.dump(globals_values, f, default_flow_style=False) # Patch kolla-ansible sources + Write admin-openrc and # password.yml in the result dir enos_values = mk_enos_values(env) playbook = os.path.join(ANSIBLE_DIR, 'bootstrap_kolla.yml') api.run_ansible([playbook], env['inventory'], extra_vars=enos_values)
def validate(self, *, output_dir=None): """Validate the network parameters (latency, bandwidth ...) Performs flent, ping tests to validate the constraints set by :py:meth:`enoslib.service.netem.Netem.deploy`. Reports are available in the tmp directory used by enos. Args: roles (dict): role->hosts mapping as returned by :py:meth:`enoslib.infra.provider.Provider.init` inventory_path (str): path to an inventory output_dir (str): directory where validation files will be stored. Default to :py:const:`enoslib.constants.TMP_DIRNAME`. """ logger.debug("Checking the constraints") if not output_dir: output_dir = os.path.join(os.getcwd(), TMP_DIRNAME) output_dir = os.path.abspath(output_dir) _check_tmpdir(output_dir) _playbook = os.path.join(SERVICE_PATH, "netem.yml") options = self._build_options({ "enos_action": "tc_validate", "tc_output_dir": output_dir }) run_ansible([_playbook], roles=self.roles, extra_vars=options)
def prepare(env=None, db='cockroachdb', locality=False, **kwargs): """ usage: juice prepare [--db {mariadb,cockroachdb}] [--locality] Configure the resources, requires both g5k and inventory executions --db DATABASE Database to deploy on [default: cockroachdb] --locality Use follow-the-workload (only for CockroachDB) """ db_validation(db) # Generate inventory extra_vars = { "registry": env["config"]["registry"], "db": db, "locality": locality, # Set monitoring to True by default "enable_monitoring": env['config'].get('enable_monitoring', True) } env["db"] = db # use deploy of each role extra_vars.update({"enos_action": "deploy"}) run_ansible([os.path.join(JUICE_PATH, "ansible/prepare.yml")], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('prepare')
def prepare(env=None, broker=BROKER, **kwargs): # Generate inventory extra_vars = { "registry": { "type": "internal" }, "broker": broker } # Preparing the installation of the bus under evaluation # Need to pass specific options if broker == "rabbitmq": # Nothing to do pass elif broker == "qdr": # Building the graph of routers roles = env["roles"] machines = [desc.alias for desc in roles["bus"]] graph = generate(GRAPH_TYPE, *GRAPH_ARGS) confs = get_conf(graph, machines, round_robin) qdr_confs = {"qdr_confs": confs.values()} extra_vars.update(qdr_confs) env.update(qdr_confs) else: raise Exception("Unknown broker chosen") # use deploy of each role extra_vars.update({"enos_action": "deploy"}) run_ansible(["ansible/site.yml"], env["inventory"], extra_vars=extra_vars) env["broker"] = broker
def start_virtualmachines(provider_conf, g5k_init, vmong5k_roles): vms_by_host = _index_by_host(vmong5k_roles) extra_vars = {'vms': vms_by_host, 'base_image': provider_conf.image} pm_inventory_path = os.path.join(os.getcwd(), "pm_hosts") generate_inventory(*g5k_init, pm_inventory_path) # deploy virtual machines with ansible playbook run_ansible([PLAYBOOK_PATH], pm_inventory_path, extra_vars)
def up(config, config_file=None, env=None, **kwargs): logging.debug('phase[up]: args=%s' % kwargs) # Calls the provider and initialise resources provider_conf = config['provider'] provider = make_provider(provider_conf) # Applying default configuration config = load_config(config, provider.default_config()) env['config'] = config env['config_file'] = config_file logging.debug("Loaded config: %s", config) rsc, networks = \ provider.init(env['config'], kwargs['--force-deploy']) env['rsc'] = rsc env['networks'] = networks logging.debug("Provider ressources: %s", env['rsc']) logging.debug("Provider network information: %s", env['networks']) # Generates inventory for ansible/kolla inventory = os.path.join(env['resultdir'], 'multinode') inventory_conf = env['config'].get('inventory') if not inventory_conf: logging.debug("No inventory specified, using the sample.") base_inventory = os.path.join(INVENTORY_DIR, 'inventory.sample') else: base_inventory = seekpath(inventory_conf) generate_inventory(env['rsc'], env['networks'], base_inventory, inventory) logging.info('Generates inventory %s' % inventory) env['inventory'] = inventory # Set variables required by playbooks of the application vip_pool = get_vip_pool(networks) env['config'].update({ 'vip': pop_ip(vip_pool), 'registry_vip': pop_ip(vip_pool), 'influx_vip': pop_ip(vip_pool), 'grafana_vip': pop_ip(vip_pool), 'resultdir': env['resultdir'], 'rabbitmq_password': "******", 'database_password': "******" }) options = {} options.update(env['config']) enos_action = "pull" if kwargs.get("--pull") else "deploy" options.update(enos_action=enos_action) # Runs playbook that initializes resources (eg, # installs the registry, install monitoring tools, ...) up_playbook = os.path.join(ANSIBLE_DIR, 'enos.yml') run_ansible([up_playbook], env['inventory'], extra_vars=options, tags=kwargs['--tags'])
def post_install(**kwargs): env = kwargs["env"] extra_vars = {"enos_action": "post-install"} # Adding the k vars extra_vars.update(env["config"].get("vars", {})) run_ansible([os.path.join(ANSIBLE_DIR, "post_install.yml")], env["inventory"], extra_vars=extra_vars)
def prepare(env=None, broker=BROKER, **kwargs): # Generate inventory extra_vars = {"registry": env["config"]["registry"], "broker": broker} # Preparing the installation of the bus under evaluation # Need to pass specific options # We generate a configuration dict that captures the minimal set of # parameters of each agents of the bus # This configuration dict is used in subsequent test* tasks to configure the # ombt agents. roles = env["roles"] machines = [desc.alias for desc in roles["bus"]] # Get the specific configuration from the file config = env["config"][broker] if broker == "rabbitmq": # NOTE(msimonin): generate the configuration for rabbitmq # Assuming only one node for now bus_conf = [{ "port": 5672, "management_port": 15672, "machine": machines[0] }] # saving the conf object env.update({"bus_conf": [RabbitMQConf(c) for c in bus_conf]}) # but passing its serialization to ansible ansible_bus_conf = {"bus_conf": bus_conf} elif broker == "qdr": # Building the graph of routers graph = generate(config["type"], *config["args"]) bus_conf = get_conf(graph, machines, round_robin) env.update({"bus_conf": [QdrConf(c) for c in bus_conf.values()]}) ansible_bus_conf = {"bus_conf": list(bus_conf.values())} else: raise Exception("Unknown broker chosen") # Let's do the same for the control bus machines = [desc.alias for desc in roles["control-bus"]] control_bus_conf = [{ "port": 5672, "management_port": 15672, "machine": machines[0] }] env.update( {"control_bus_conf": [RabbitMQConf(c) for c in control_bus_conf]}) ansible_control_bus_conf = {"control_bus_conf": control_bus_conf} # use deploy of each role extra_vars.update({"enos_action": "deploy"}) extra_vars.update(ansible_bus_conf) extra_vars.update(ansible_control_bus_conf) # Finally let's give ansible the bus_conf if config: extra_vars.update(config) run_ansible(["ansible/site.yml"], env["inventory"], extra_vars=extra_vars) env["broker"] = broker
def destroy(env=None, **kwargs): extra_vars = {} # Call destroy on each component extra_vars.update({ "enos_action": "destroy", "broker": env["broker"], "qdr_confs": env.get("qdr_confs") }) run_ansible(["ansible/site.yml"], env["inventory"], extra_vars=extra_vars) run_ansible(["ansible/ombt.yml"], env["inventory"], extra_vars=extra_vars)
def destroy(env=None, **kwargs): extra_vars = {} # Call destroy on each component extra_vars.update({ "enos_action": "destroy", "broker": env["broker"], "bus_conf": [o.to_dict() for o in env.get("bus_conf")] }) run_ansible(["ansible/site.yml"], env["inventory"], extra_vars=extra_vars) run_ansible(["ansible/ombt.yml"], env["inventory"], extra_vars=extra_vars)
def prepare(**kwargs): env = kwargs["env"] config = env["config"] tc = config["traffic"] roles = env["roles"] inventory = env["inventory"] emulate_network(roles, inventory, tc) extra_vars = {"enos_action": "deploy"} run_ansible([os.path.join(ANSIBLE_DIR, "site.yml")], env["inventory"], extra_vars=extra_vars)
def deploy(self): # Some requirements with play_on(pattern_hosts="all", roles=self.roles) as p: p.apt( display_name="[Preinstall] Installing python-pip", name=["python3", "python-pip", "python3-pip"], state="present", update_cache=True, ) p.pip(display_name="[Preinstall] Installing pyyaml", name="pyyaml") _playbook = os.path.join(SERVICE_PATH, "skydive", "skydive.yml") run_ansible([_playbook], roles=self.roles, extra_vars=self.extra_vars)
def destroy(env=None, **kwargs): """ usage: juice destroy Destroy all the running dockers (not destroying the resources), requires g5k and inventory executions """ extra_vars = {} # Call destroy on each component extra_vars.update({ "enos_action": "destroy", "db": env.get('db', 'cockroachdb'), "tasks_ran": env["tasks_ran"], # Set monitoring to True by default "enable_monitoring": env['config'].get('enable_monitoring', True) }) run_ansible([os.path.join(JUICE_PATH, "ansible/prepare.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/stress.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/openstack.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/rally.yml")], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('destroy')
def backup(env=None, **kwargs): """ usage: juice backup Backup the environment, requires g5k, inventory and prepare executions """ db = env.get('db', 'cockroachdb') nb_nodes = len(env["roles"]["database"]) latency = env["latency"] extra_vars = { "enos_action": "backup", "backup_dir": os.path.join(os.getcwd(), "current/backup/%snodes-%s-%s" % (nb_nodes, db, latency)), "tasks_ran": env["tasks_ran"], # Set monitoring to True by default "enable_monitoring": env['config'].get('enable_monitoring', True) } run_ansible([os.path.join(JUICE_PATH, "ansible/prepare.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/stress.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/openstack.yml")], env["inventory"], extra_vars=extra_vars) run_ansible([os.path.join(JUICE_PATH, "ansible/rally.yml")], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('backup')
def start_virtualmachines(provider_conf, g5k_roles, vmong5k_roles): vms_by_host = _index_by_host(vmong5k_roles) extra_vars = { 'vms': vms_by_host, 'base_image': provider_conf.image, # push the g5k user in the env 'g5k_user': os.environ.get('USER'), 'working_dir': provider_conf.working_dir, 'strategy': provider_conf.strategy } # pm_inventory_path = os.path.join(os.getcwd(), "pm_hosts") # generate_inventory(*g5k_init, pm_inventory_path) # deploy virtual machines with ansible playbook run_ansible([PLAYBOOK_PATH], roles=g5k_roles, extra_vars=extra_vars)
def destroy(self): """Reset the network constraints (latency, bandwidth ...) Remove any filter that have been applied to shape the traffic. """ logger.debug("Reset the constraints") tmpdir = os.path.join(os.getcwd(), TMP_DIRNAME) _check_tmpdir(tmpdir) _playbook = os.path.join(SERVICE_PATH, "netem.yml") options = self._build_options({ "enos_action": "tc_reset", "tc_output_dir": tmpdir }) run_ansible([_playbook], roles=self.roles, extra_vars=options)
def backup(env=None, **kwargs): backup_dir = kwargs['--backup_dir'] \ or kwargs['--env'] \ or SYMLINK_NAME backup_dir = os.path.abspath(backup_dir) # create if necessary if not os.path.isdir(backup_dir): os.mkdir(backup_dir) # update the env env['config']['backup_dir'] = backup_dir options = {} options.update(env['config']) options.update({'enos_action': 'backup'}) playbook_path = os.path.join(ANSIBLE_DIR, 'enos.yml') inventory_path = os.path.join(env['resultdir'], 'multinode') run_ansible([playbook_path], inventory_path, extra_vars=options)
def openstack(db, env=None, **kwargs): """ usage: juice openstack [--db {mariadb,cockroachdb}] Launch OpenStack --db DATABASE Database to test [default: cockroachdb] """ db_validation(db) # Generate inventory extra_vars = { "registry": env["config"]["registry"], "db": db, } # use deploy of each role extra_vars.update({"enos_action": "deploy"}) run_ansible([os.path.join(JUICE_PATH, "ansible/openstack.yml")], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('openstack')
def stress(db, env=None, **kwargs): """ usage: juice stress [--db {mariadb,cockroachdb}] Launch sysbench tests --db DATABASE Database to test [default: cockroachdb] """ db_validation(db) # Generate inventory extra_vars = { "registry": env["config"]["registry"], "db": db, } # use deploy of each role extra_vars.update({"enos_action": "deploy"}) run_ansible(["ansible/stress.yml"], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('stress')
def prepare(**kwargs): env = kwargs["env"] # common tasks extra_vars = {"enos_action": "deploy", "context": env["context"]} run_ansible([os.path.join(ANSIBLE_DIR, "site.yml")], env["inventory"], extra_vars=extra_vars) kspray_path = os.path.join(env["resultdir"], KUBESPRAY_PATH) logger.info("Remove previous Kubespray installation") check_call("rm -rf %s" % kspray_path, shell=True) logger.info( "Cloning Kubespray rekubernetes-dashboard-7fc94b7fc5-ff5rqpository...") check_call( "git clone -b {ref} --depth 1 --single-branch --quiet {url} {dest}". format(ref=KUBESPRAY_VERSION, url=KUBESPRAY_URL, dest=kspray_path), shell=True, ) in_kubespray("cd %s && pip install -r requirements.txt" % kspray_path) kspray_inventory_path = os.path.join(kspray_path, "inventory", "mycluster", "hosts.ini") in_kubespray("cd %s && cp -rfp inventory/sample inventory/mycluster" % kspray_path) in_kubespray("cd %s && cp %s %s" % (kspray_path, env["inventory"], kspray_inventory_path)) k_vars = env["config"].get("vars", {}) update_k_vars(k_vars) # Dumping overriden vars extra_vars_file = os.path.join(env["resultdir"], "extra_vars.yaml") with open(extra_vars_file, "w") as f: f.write(yaml.dump(env["config"].get("vars", {}))) in_kubespray("cd %s && ansible-playbook -i inventory/mycluster/hosts.ini" " " "cluster.yml -e @%s" % (kspray_path, extra_vars_file))
def _start_virtualmachines(provider_conf, vmong5k_roles): vms_by_host = _index_by_host(vmong5k_roles) extra_vars = { "vms": vms_by_host, "base_image": provider_conf.image, # push the g5k user in the env "g5k_user": os.environ.get("USER"), "working_dir": provider_conf.working_dir, "strategy": provider_conf.strategy, "enable_taktuk": provider_conf.enable_taktuk } # pm_inventory_path = os.path.join(os.getcwd(), "pm_hosts") # generate_inventory(*g5k_init, pm_inventory_path) # deploy virtual machines with ansible playbook all_pms = [] for machine in provider_conf.machines: all_pms.extend(machine.undercloud) all_pms = {"all": all_pms} run_ansible([PLAYBOOK_PATH], roles=all_pms, extra_vars=extra_vars)
def init_os(env=None, **kwargs): logging.debug('phase[init]: args=%s' % kwargs) playbook_values = mk_enos_values(env) playbook_path = os.path.join(ANSIBLE_DIR, 'init_os.yml') inventory_path = os.path.join(env['resultdir'], 'multinode') # Yes, if the external network isn't found we take the external ip in the # pool used for OpenStack services (like the apis) This mimic what was done # before the enoslib integration. An alternative solution would be to # provision a external pool of ip regardless the number of nic available # (in g5k this would be a kavlan) but in this case we'll need to know # whether the network is physicaly attached (or no) to the physical nics provider_net = lookup_network( env['networks'], [NEUTRON_EXTERNAL_INTERFACE, NETWORK_INTERFACE]) if not provider_net: msg = "External network not found, define %s networks" % " or ".join( [NEUTRON_EXTERNAL_INTERFACE, NETWORK_INTERFACE]) raise Exception(msg) enos_action = 'pull' if kwargs.get('--pull') else 'deploy' playbook_values.update(provider_net=provider_net, enos_action=enos_action) run_ansible([playbook_path], inventory_path, extra_vars=playbook_values)
def rally(files, directory, burst, env=None, **kwargs): """ usage: juice rally [--files FILE... | --directory DIRECTORY] [--burst] Benchmark the Openstack --files FILE Files to use for rally scenarios (name must be a path from rally scenarios folder). --directory DIRECTORY Directory that contains rally scenarios. [default: keystone] --burst Use burst or not """ logging.info("Launching rally using scenarios: %s" % (', '.join(files))) logging.info("Launching rally using all scenarios in %s directory.", directory) if burst: rally = map( operator.attrgetter('address'), reduce(operator.add, [ hosts for role, hosts in env['roles'].iteritems() if role.startswith('database') ])) else: rally = [ hosts[0].address for role, hosts in env['roles'].iteritems() if role.startswith('database') ] print(rally) extra_vars = {"registry": env["config"]["registry"], "rally_nodes": rally} if files: extra_vars.update({"rally_files": files}) else: extra_vars.update({"rally_directory": directory}) # use deploy of each role extra_vars.update({"enos_action": "deploy"}) run_ansible([os.path.join(JUICE_PATH, "ansible/rally.yml")], env["inventory"], extra_vars=extra_vars) env["tasks_ran"].append('rally')
def destroy(**kwargs): env = kwargs["env"] run_ansible([os.path.join(ANSIBLE_DIR, "post_install.yml")], env["inventory"], extra_vars=extra_vars)
def backup(**kwargs): env = kwargs["env"] extra_vars = {"enos_action": "backup"} run_ansible([os.path.join(ANSIBLE_DIR, "site.yml")], env["inventory"], extra_vars=extra_vars)
"cores": 1, "mem": 2048000, "mac": mac, "backing_file": "/tmp/%s.qcow2" % name, "ip": ip }) # Distribute vms to pms machines = roles["compute"] # the vms indexed by the pm hosting them all_vms = {} for index, vm in enumerate(vms): # host is set to the inventory hostname machine = machines[index % len(machines)].alias vm["host"] = machine all_vms.setdefault(machine, []) all_vms[machine].append(vm) logging.info(vms) run_ansible(["site.yml"], inventory, extra_vars={"vms": all_vms}) print("If everything went fine you can access one of those") print("+{:->16}+{:->16}+".format('', '')) for idx, vm in enumerate(vms): print('|{:16}|{:16}|'.format(vm["name"], vm["ip"])) print("+{:->16}+{:->16}+".format('', '')) # destroy the reservation # provider.destroy()
def test_case_1(nbr_clients=NBR_CLIENTS, nbr_servers=NBR_SERVERS, call_type=CALL_TYPE, nbr_calls=NBR_CALLS, pause=PAUSE, timeout=TIMEOUT, version=VERSION, backup_dir=BACKUP_DIR, length=LENGTH, executor=EXECUTOR, env=None, **kwargs): iteration_id = str("-".join([ "nbr_servers__%s" % nbr_servers, "nbr_clients__%s" % nbr_clients, "call_type__%s" % call_type, "nbr_calls__%s" % nbr_calls, "pause__%s" % pause ])) # Create the backup dir for this experiment # NOTE(msimonin): We don't need to identify the backup dir we could use a # dedicated env name for that backup_dir = os.path.join(os.getcwd(), "current/%s" % backup_dir) os.system("mkdir -p %s" % backup_dir) extra_vars = { "backup_dir": backup_dir, "ombt_version": version, } descs = [{ "agent_type": "rpc-client", "number": int(nbr_clients), "machines": env["roles"]["ombt-client"], "klass": OmbtClient, "kwargs": { "timeout": timeout, } }, { "agent_type": "rpc-server", "number": int(nbr_servers), "machines": env["roles"]["ombt-server"], "klass": OmbtServer, "kwargs": { "timeout": timeout, "executor": executor } }, { "agent_type": "controller", "number": 1, "machines": env["roles"]["ombt-control"], "klass": OmbtController, "kwargs": { "call_type": call_type, "nbr_calls": nbr_calls, "pause": pause, "timeout": timeout, "length": length } }] # Below we build the specific variables for each client/server # ombt_conf = { # "machine01": [confs], # ... # # } ombt_confs = {} bus_conf = env["bus_conf"] control_bus_conf = env["control_bus_conf"] for agent_desc in descs: machines = agent_desc["machines"] # make sure all the machines appears in the ombt_confs for machine in machines: ombt_confs.setdefault(machine.alias, []) for agent_index in range(agent_desc["number"]): agent_id = "%s-%s-%s" % (agent_desc["agent_type"], agent_index, iteration_id) # choose a machine machine = machines[agent_index % len(machines)].alias # choose a bus agent bus_agent = bus_conf[agent_index % len(bus_conf)] control_agent = control_bus_conf[agent_index % len(control_bus_conf)] kwargs = agent_desc["kwargs"] kwargs.update({ "agent_id": agent_id, "machine": machine, "bus_agents": [bus_agent], "control_agents": [control_agent] # TODO }) ombt_confs[machine].append(agent_desc["klass"](**kwargs)) ansible_ombt_confs = {} for m, confs in ombt_confs.items(): ansible_ombt_confs[m] = [o.to_dict() for o in confs] extra_vars.update({"ombt_confs": ansible_ombt_confs}) run_ansible(["ansible/test_case_1.yml"], env["inventory"], extra_vars=extra_vars) # saving the conf env["ombt_confs"] = ombt_confs
def backup(env=None, **kwargs): extra_vars = { "enos_action": "backup", "backup_dir": os.path.join(os.getcwd(), "current") } run_ansible(["ansible/site.yml"], env["inventory"], extra_vars=extra_vars)