def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for network in networks: net = { "cidr": str(network["network"]), "gateway": str(network["gateway"]), "dns": "131.254.203.235", "roles": get_roles_as_list(network) } if network["type"] in KAVLAN_TYPE: # On the network, the first IP are reserved to g5k machines. # For a routed vlan I don't know exactly how many ip are # reserved. However, the specification is clear about global # vlan: "A global VLAN is a /18 subnet (16382 IP addresses). # It is split -- contiguously -- so that every site gets one # /23 (510 ip) in the global VLAN address space". There are 12 # site. This means that we could take ip from 13th subnetwork. # Lets consider the strategy is the same for routed vlan. See, # https://www.grid5000.fr/mediawiki/index.php/Grid5000:Network#KaVLAN # # First, split network in /23 this leads to 32 subnetworks. # Then, (i) drops the 12 first subnetworks because they are # dedicated to g5k machines, and (ii) drops the last one # because some of ips are used for specific stuff such as # gateway, kavlan server... subnets = IPNetwork(network["network"]) if network["vlan_id"] < 4: # vlan local subnets = list(subnets.subnet(24)) subnets = subnets[4:7] else: subnets = list(subnets.subnet(23)) subnets = subnets[13:31] # Finally, compute the range of available ips ips = IPSet(subnets).iprange() net.update({ "start": str(IPAddress(ips.first)), "end": str(IPAddress(ips.last)) }) elif network["type"] in SUBNET_TYPE: net.update({ "start": network["ipmac"][0], "end": network["ipmac"][-1] }) net.update({"roles": get_roles_as_list(network)}) nets.append(net) logger.debug(nets) return nets
def build_roles(resources, env, servers, keyfnc): result = {} pools = mk_pools(servers, keyfnc) machines = resources["machines"] for desc in machines: flavor = desc["flavor"] nb = desc["number"] roles = get_roles_as_list(desc) nodes = pick_things(pools, flavor, nb) for role in roles: result.setdefault(role, []).extend(nodes) return result
def check_servers(session, resources, extra_prefix="", force_deploy=False, key_name=None, image_id=None, flavors='m1.medium', network=None, ext_net=None, scheduler_hints=None): """Checks the servers status for the deployment. If needed, it creates new servers and add a floating ip to one of them. This server can be used as a gateway to the others. """ scheduler_hints = scheduler_hints or {} nclient = nova.Client(NOVA_VERSION, session=session) servers = nclient.servers.list( search_opts={'name': '-'.join([PREFIX, extra_prefix])}) wanted = _get_total_wanted_machines(resources) if force_deploy: for server in servers: server.delete() servers = [] if len(servers) == wanted: logger.info("[nova]: Reusing existing servers : %s", servers) return servers elif len(servers) > 0 and len(servers) < wanted: raise Exception("Only %s/%s servers found" % (servers, wanted)) # starting the servers total = 0 for machine in resources["machines"]: number = machine["number"] roles = get_roles_as_list(machine) logger.info("[nova]: Starting %s servers" % number) logger.info("[nova]: for roles %s" % roles) logger.info("[nova]: with extra hints %s" % scheduler_hints) for _ in range(number): flavor = machine["flavor"] if isinstance(flavors, str): flavor = flavors else: flavor_to_id, _ = flavors flavor = flavor_to_id[flavor] server = nclient.servers.create( name='-'.join([PREFIX, extra_prefix, str(total)]), image=image_id, flavor=flavor, nics=[{'net-id': network['id']}], key_name=key_name, security_groups=[SECGROUP_NAME], scheduler_hints=scheduler_hints) servers.append(server) total = total + 1 return servers
def init(self, force_deploy=False): resources = self.provider_conf["resources"] machines = resources["machines"] roles = {} for machine in machines: rs = get_roles_as_list(machine) for r in rs: roles.setdefault(r, []).append( Host(machine["address"], alias=machine.get("alias"), user=machine.get("user"), keyfile=machine.get("keyfile"), port=machine.get("port"), extra=machine.get("extra"))) return roles, resources["networks"]
def _update_hosts(roles, facts, extra_mapping=None): # Update every hosts in roles # NOTE(msimonin): due to the deserialization # between phases, hosts in rsc are unique instance so we need to update # every single host in every single role extra_mapping = extra_mapping or {} for hosts in roles.values(): for host in hosts: networks = facts[host.alias]['networks'] enos_devices = [] for network in networks: device = network["device"] if device: for role in get_roles_as_list(network): host.extra.update({role: device}) enos_devices.append(device) # Add the list of devices in used by Enos host.extra.update({'enos_devices': enos_devices})
def _update_hosts(roles, facts, extra_mapping=None): # Update every hosts in roles # NOTE(msimonin): due to the deserialization # between phases, hosts in rsc are unique instance so we need to update # every single host in every single role extra_mapping = extra_mapping or {} for hosts in roles.values(): for host in hosts: networks = facts[host.alias]['networks'] enos_devices = [] host.extra.update(extra_mapping) for network in networks: device = network['device'] if device: for role in get_roles_as_list(network): # backward compatibility: # network_role=eth_name host.extra.update({role: device}) # we introduce some shortcuts (avoid infinite ansible # templates) in other words, we sort of precompute them # network_role_dev=eth_name # network_role_ip=ip # # Use case: # - node1 has eth1 for role: r1, # - node2 has eth2 for role: r2 # the conf in node2 must point to the ip of eth1 in # node1 node2 can use hostvars[node1].r1_ip as a # template Note this can happen often in g5k between # nodes of different clusters host.extra.update({'%s_dev' % role: device}) key = 'ansible_%s' % device ip = facts[host.alias][key]['ipv4']['address'] host.extra.update({'%s_ip' % role: ip}) enos_devices.append(device) # Add the list of devices in used by Enos host.extra.update({'enos_devices': enos_devices})
def init(self, force_deploy=False): """Reserve and deploys the vagrant boxes. Args: force_deploy (bool): True iff new machines should be started Examples: .. code-block:: yaml # in yaml --- backend: virtualbox user: root box: debian/jessie64 resources: machines: - roles: [telegraf] flavor: tiny number: 1 networks: [control_network, internal_network] - roles: - control - registry - prometheus - grafana - telegraf flavor: medium number: 1 networks: [control_network] The above will return a tuple (roles, networks) where: .. code-block:: yaml roles: telegraf: - !!python/object:enoslib.host.Host address: 127.0.0.1 alias: enos-1 extra: enos_devices: [eth1, eth2] control_network: eth1 internal_network: eth2 keyfile: ... port: '2205' user: root - !!python/object:enoslib.host.Host address: 127.0.0.1 alias: enos-0 extra: enos_devices: [eth1, eth2] control_network: eth1 internal_network: eth2 keyfile: ... port: '2204' user: root control: # machine with role control networks: - cidr: 192.168.142.0/24 dns: 8.8.8.8 end: 192.168.142.243 gateway: 192.168.142.1 roles: [control_network] start: 192.168.142.10 - cidr: 192.168.143.0/24 dns: 8.8.8.8 end: 192.168.143.244 gateway: 192.168.143.1 roles: [internal_network] start: 192.168.143.10 """ # Arbitrary net pool size slash_24 = [142 + x for x in range(0, 100)] slash_24 = [IPNetwork("192.168.%s.1/24" % x) for x in slash_24] net_pool = [list(x)[10:-10] for x in slash_24] machines = self.provider_conf["resources"]["machines"] # build the mapping network_name -> pool networks = [set(machine.get("networks", [])) for machine in machines] networks = reduce(set.union, networks) networks = dict(zip(networks, net_pool)) vagrant_machines = [] vagrant_roles = {} j = 0 for machine in machines: for _ in range(machine["number"]): vagrant_machine = { "name": "enos-%s" % j, "cpu": FLAVORS[machine["flavor"]]["cpu"], "mem": FLAVORS[machine["flavor"]]["mem"], "ips": [networks[n].pop() for n in machine["networks"]], } vagrant_machines.append(vagrant_machine) # Assign the machines to the right roles for role in get_roles_as_list(machine): vagrant_roles.setdefault(role, []).append(vagrant_machine) j = j + 1 logging.debug(vagrant_roles) loader = FileSystemLoader(searchpath=TEMPLATE_DIR) env = Environment(loader=loader, autoescape=True) template = env.get_template('Vagrantfile.j2') vagrantfile = template.render(machines=vagrant_machines, provider_conf=self.provider_conf) vagrantfile_path = os.path.join(os.getcwd(), "Vagrantfile") with open(vagrantfile_path, 'w') as f: f.write(vagrantfile) # Build env for Vagrant with a copy of env variables (needed by # subprocess opened by vagrant v_env = dict(os.environ) v_env['VAGRANT_DEFAULT_PROVIDER'] = self.provider_conf['backend'] v = vagrant.Vagrant(root=os.getcwd(), quiet_stdout=False, quiet_stderr=False, env=v_env) if force_deploy: v.destroy() v.up() v.provision() roles = {} for role, machines in vagrant_roles.items(): for machine in machines: keyfile = v.keyfile(vm_name=machine['name']) port = v.port(vm_name=machine['name']) address = v.hostname(vm_name=machine['name']) roles.setdefault(role, []).append( Host(address, alias=machine['name'], user=self.provider_conf['user'], port=port, keyfile=keyfile)) networks = [{ 'cidr': str(ipnet.cidr), 'start': str(pool[0]), 'end': str(pool[-1]), 'dns': '8.8.8.8', 'gateway': str(ipnet.ip), 'roles': [net] } for ipnet, pool, net in zip( slash_24, net_pool[0: len(networks.keys())], networks.keys())] logging.debug(roles) logging.debug(networks) return (roles, networks)
def test_role(self): desc = { "role": "r1" } self.assertCountEqual(["r1"], get_roles_as_list(desc))
def test_role_and_roles(self): desc = { "role": "r1", "roles": ["r2", "r3"] } self.assertCountEqual(["r2", "r3"], get_roles_as_list(desc))
def _to_enos_networks(networks): """Transform the networks returned by deploy5k. Args: networks (dict): networks returned by :py:func:`enoslib.infra.provider.Provider.init` """ nets = [] for network in networks: net = { "cidr": str(network["network"]), "gateway": str(network["gateway"]), # NOTE(msimonin): This will point to the nameserver of the site # where the deployment is launched regardless the actual site in # the network description. Until now we used the global DNS IP # here. Finally this information couldn't be found in the API (dec. # 18) otherwise we'd move this logic in utils.concretize_networks # (like network and gateway) "dns": socket.gethostbyname(NAMESERVER), "roles": get_roles_as_list(network) } if network["type"] in KAVLAN_TYPE: # On the network, the first IP are reserved to g5k machines. # For a routed vlan I don't know exactly how many ip are # reserved. However, the specification is clear about global # vlan: "A global VLAN is a /18 subnet (16382 IP addresses). # It is split -- contiguously -- so that every site gets one # /23 (510 ip) in the global VLAN address space". There are 12 # site. This means that we could take ip from 13th subnetwork. # Lets consider the strategy is the same for routed vlan. See, # https://www.grid5000.fr/mediawiki/index.php/Grid5000:Network#KaVLAN # # First, split network in /23 this leads to 32 subnetworks. # Then, (i) drops the 12 first subnetworks because they are # dedicated to g5k machines, and (ii) drops the last one # because some of ips are used for specific stuff such as # gateway, kavlan server... subnets = IPNetwork(network["network"]) if network["vlan_id"] < 4: # vlan local subnets = list(subnets.subnet(24)) subnets = subnets[4:7] else: subnets = list(subnets.subnet(23)) subnets = subnets[13:31] # Finally, compute the range of available ips ips = IPSet(subnets).iprange() net.update({ "start": str(IPAddress(ips.first)), "end": str(IPAddress(ips.last)) }) elif network["type"] in SUBNET_TYPE: start_ip, start_mac = network["ipmac"][0] end_ip, end_mac = network["ipmac"][-1] net.update({ "start": start_ip, "end": end_ip, "mac_start": start_mac, "mac_end": end_mac }) net.update({"roles": get_roles_as_list(network)}) nets.append(net) logger.debug(nets) return nets