def start_virtualmachines(provider_conf, g5k_init, vmong5k_roles): vms_by_host = _index_by_host(vmong5k_roles) extra_vars = {'vms': vms_by_host, 'base_image': provider_conf.image} pm_inventory_path = os.path.join(os.getcwd(), "pm_hosts") generate_inventory(*g5k_init, pm_inventory_path) # deploy virtual machines with ansible playbook run_ansible([PLAYBOOK_PATH], pm_inventory_path, extra_vars)
def inventory(env=None, **kwargs): "Generate the Ansible inventory file, requires a g5k execution" roles = env["roles"] networks = env["networks"] env["inventory"] = os.path.join(env["resultdir"], "hosts") generate_inventory(roles, networks, env["inventory"], check_networks=True) env["tasks_ran"].append('inventory')
def up(force, env=None, **kwargs): """Starts a new experiment using vagrant""" inventory = os.path.join(os.getcwd(), "hosts") provider = Enos_vagrant(provider_conf) roles, networks = provider.init(force_deploy=force) generate_inventory(roles, networks, inventory, check_networks=True) env["roles"] = roles env["networks"] = networks env["inventory"] = inventory
def up(force=True, env=None, **kwargs): "Starts a new experiment" inventory = os.path.join(os.getcwd(), "hosts") conf = Configuration.from_dictionnary(provider_conf) provider = Enos_vagrant(conf) roles, networks = provider.init() generate_inventory(roles, networks, inventory, check_networks=True) env["roles"] = roles env["networks"] = networks env["inventory"] = inventory
def generate_inventory(roles, networks, base_inventory, dest): """ Generate the inventory. It will generate a group for each role in roles and concatenate them with the base_inventory file. The generated inventory is written in dest """ # NOTE(msimonin): if len(networks) is <= 1 # provision a fake one that will map the external network fake_interfaces = [] fake_networks = [] provider_net = lookup_network(networks, [C.NEUTRON_EXTERNAL_INTERFACE]) if not provider_net: logging.error(f"The {C.NEUTRON_EXTERNAL_INTERFACE} network is missing") logging.error("EnOS will try to fix that ....") fake_interfaces = [C.FAKE_NEUTRON_EXTERNAL_INTERFACE] fake_networks = [C.NEUTRON_EXTERNAL_INTERFACE] elib_api.generate_inventory(roles, networks, dest, check_networks=True, fake_interfaces=fake_interfaces, fake_networks=fake_networks) with open(dest, 'a') as f: f.write("\n") # generate mandatory groups that are empty mandatory = [ group for group in KOLLA_MANDATORY_GROUPS if group not in roles.keys() ] for group in mandatory: f.write("[%s]\n" % group) with open(base_inventory, 'r') as a: for line in a: f.write(line) logging.info("Inventory file written to " + dest)
def inventory(**kwargs): env = kwargs["env"] roles = env["roles"] networks = env["networks"] env["inventory"] = os.path.join(env["resultdir"], "hosts") generate_inventory(roles, networks, env["inventory"], check_networks=True)
"type": "slash_22", "role": "my_subnet", "site": "rennes", }] } } # path to the inventory inventory = os.path.join(os.getcwd(), "hosts") # claim the resources provider = G5k(provider_conf) roles, networks = provider.init() # generate an inventory compatible with ansible generate_inventory(roles, networks, inventory, check_networks=True) subnet = [n for n in networks if "my_subnet" in n["roles"]][0] mac_start = subnet["mac_start"] mac_end = subnet["mac_end"] vms = [] # Distribute mac addresses to vms for idx, (mac, ip) in enumerate(range_mac(mac_start, mac_end)): if len(vms) >= VMS: break name = "vm-%s" % idx vms.append({ "name": name, "cores": 1,
} } tc = { "enable": True, "default_delay": "20ms", "default_rate": "1gbit", } # claim the resources conf = Configuration.from_dictionnary(provider_conf) provider = Enos_vagrant(conf) roles, networks = provider.init() generate_inventory(roles, networks, "hosts.ini") check_networks(roles, networks) generate_inventory(roles, networks, "hosts.ini") # apply network constraints emulate_network(roles, tc) # validate network constraints validate_network(roles) # reset network constraints reset_network(roles) # validate network constraints and saving in an alternative validate_network(roles, output_dir="after_reset")
def deploy(conf, provider='g5k', force_deployment=False, xp_name=None, tags=['provide', 'inventory', 'scaffold'], env=None, **kwargs): """ usage: juice deploy [--conf CONFIG_PATH] [--provider PROVIDER] [--force-deployment] [--xp-name NAME] [--tags TAGS...] Claim resources from PROVIDER and configure them. Options: --conf CONFIG_PATH Path to the configuration file describing the deployment [default: ./conf.yaml] --provider PROVIDER Provider to target [default: g5k] --force-deployment Force provider to redo the deployment --xp-name NAME NAME of the folder generated by juice for this new deployment. --tags TAGS Only run tasks relative to the specific tags [default: provide inventory scaffold] """ # Read the configuration config = {} if isinstance(conf, str): # Get the config object from a yaml file with open(conf) as f: config = yaml.load(f) elif isinstance(conf, dict): # Get the config object from a dict config = conf else: # Data format error raise Exception( 'conf is type {!r} while it should be a yaml file or a dict'. format(type(conf))) env['db'] = config.get('database', 'cockroachdb') env['monitoring'] = config.get('monitoring', True) env['config'] = config # Claim resources on Grid'5000 if 'provide' in tags: if provider == 'g5k' and 'g5k' in config: env['provider'] = 'g5k' updated_env = g5k_deploy(config['g5k'], env=xp_name, force_deploy=force_deployment) env.update(updated_env) else: raise Exception( 'The provider {!r} is not supported or it lacks a configuration' .format(provider)) # Generate the Ansible inventory file if 'inventory' in tags: env['inventory'] = os.path.join(env['resultdir'], 'hosts') generate_inventory(env['roles'], env['networks'], env['inventory'], check_networks=True) _save_env(env) # Deploy the resources, requires both g5k and inventory executions if 'scaffold' in tags: run_ansible('scaffolding.yml', extra_vars={ 'registry': config['registry'], 'db': env['db'], 'monitoring': env['monitoring'], 'enos_action': 'deploy' })
conf = Configuration.from_settings(job_name=name_job, walltime=duration, image="/grid5000/virt-images/debian9-x64-base.qcow2")\ .add_machine(roles=[role_name], cluster=clusters[i], flavour_desc={"core": 2, "mem": 4096}, number=6)\ .finalize() provider = VMonG5k(conf) roles, networks = provider.init() roles = discover_networks(roles, networks) inventory_file = "kubefed_inventory_cluster" + str(i) + ".ini" inventory = generate_inventory(roles, networks, inventory_file) master_nodes.append(roles[role_name][0].address) # Make sure k8s is not already running run_ansible(["reset_k8s.yml"], inventory_path=inventory_file) # Deploy k8s and dependencies run_ansible(["deploy_k8s_clusters.yml"], inventory_path=inventory_file) # Master nodes of each cluster print("Master nodes ........") print(master_nodes) # Modify k8s conctext configurations to give them unique names subprocess.check_call("./modify_kube_config.sh %s %s %s %s" % (str(master_nodes[0]), str(master_nodes[1]), str(master_nodes[2]), str(master_nodes[3])), shell=True)