def before_preintsall(self, env): # Create a virtual interface for veth0 (if any) # - name: Installing bridge-utils # apt: name=bridge-utils state=present # # - name: Creating virtual interface veth0 # shell: ip link show veth0 || ip link add type veth peer # # - name: Creating a bridge # shell: brctl show | grep br0 || brctl addbr br0 # # - name: Setting IP {{ neutron_external_address }} for veth0 # shell: ip addr show | grep {{ neutron_external_address }} # || ip addr add {{ neutron_external_address }} dev veth0 # # - name: Turning veth0 up # shell: ip link set veth0 up # # - name: Turning veth1 up # shell: ip link set veth1 up # # - name: Connecting veth1 to br0 # shell: brctl addif br0 eth0 # # - name: Connecting eth0 to br0 # shell: brctl addif br0 veth1 # # - name: Turning br0 up # shell: ifconfig br0 up nodes = map(lambda n: EX.Host(n.address), sum(env['rsc'].values(), [])) if env['eths'][EXTERNAL_IFACE] == 'veth0': self._exec_command_on_nodes( nodes, 'ip link show veth0 || ip link add type veth peer', 'Creating a veth') # Bind volumes of docker self._exec_command_on_nodes( nodes, 'mkdir -p /tmp/docker/volumes ; mkdir -p /var/lib/docker/volumes', 'Creating docker volumes directory in /tmp') self._exec_command_on_nodes( nodes, '(mount | grep /tmp/docker/volumes) || mount --bind /tmp/docker/volumes /var/lib/docker/volumes', 'Bind mount') # Bind nova local storage if there is any nova compute # # FIXME: This does the hypotheses that nova is installed under # compute node, but this is not necessarily. Nova could be # installed on whatever the user choose. For this reason it # will be a better strategy to parse the inventory file. computes = map(lambda n: EX.Host(n.address), env['rsc'].get('compute', [])) self._exec_command_on_nodes( computes, 'mkdir -p /tmp/nova ; mkdir -p /var/lib/nova', 'Creating nova directory in /tmp') self._exec_command_on_nodes( computes, '(mount | grep /tmp/nova) || mount --bind /tmp/nova /var/lib/nova', 'Bind mount')
def _deploy(self): # we put the nodes in the first vlan we have vlan = self._get_primary_vlan() # Deploy all the nodes logging.info("Deploying %s on %d nodes %s" % (self.config['env_name'], len( self.nodes), '(forced)' if self.force_deploy else '')) deployed, undeployed = EX5.deploy( EX5.Deployment(self.nodes, env_name=self.config['env_name'], vlan=vlan[1]), check_deployed_command=not self.force_deploy) # Check the deployment if len(undeployed) > 0: logging.error("%d nodes where not deployed correctly:" % len(undeployed)) for n in undeployed: logging.error(n) # Updating nodes names with vlans self.nodes = sorted(self._translate_to_vlan(self.nodes, vlan[1]), key=lambda n: n.address) logging.info(self.nodes) self.deployed_nodes = sorted(self._translate_to_vlan( map(lambda n: EX.Host(n), deployed), vlan[1]), key=lambda n: n.address) logging.info(self.deployed_nodes) self._check_nodes(nodes=self.deployed_nodes, resources=self.config['resources'], mode=self.config['role_distribution']) return deployed, undeployed
def _deploy(self, conf, nodes, vlans, force_deploy=False): provider_conf = conf['provider'] # we put the nodes in the first vlan we have vlan = self._get_primary_vlan(vlans) # Deploy all the nodes logging.info("Deploying %s on %d nodes %s" % (provider_conf['env_name'], len(nodes), '(forced)' if force_deploy else '')) deployed, undeployed = EX5.deploy( EX5.Deployment(nodes, env_name=provider_conf['env_name'], vlan=vlan[1]), check_deployed_command=not force_deploy) # Check the deployment if len(undeployed) > 0: logging.error("%d nodes where not deployed correctly:" % len(undeployed)) for n in undeployed: logging.error(n) deployed_nodes_vlan = sorted(self._translate_to_vlan( map(lambda n: EX.Host(n), deployed), vlan[1]), key=lambda n: n.address) logging.info(deployed_nodes_vlan) # Checking the deployed nodes according to the # resource distribution policy self._check_nodes(nodes=deployed_nodes_vlan, resources=conf['resources'], mode=conf['provider']['role_distribution']) return deployed, deployed_nodes_vlan
def host(self): """ try to convert the ressource as an execo host use the properties : gateway, user, keyfile and port default gateway is localhost. :return: an execo Host :rtype: execo.Host """ if "gateway" in self.properties: address = self.properties["gateway"] else: address = "localhost" if "user" in self.properties: user = self.properties["user"] else: user = False if "keyfile" in self.properties: keyfile = self.properties["keyfile"] else: keyfile = False if "port" in self.properties: port = int(self.properties["port"]) else: port = False return execo.Host(address, user, keyfile, port)
def canonical_host_name(host): """Convert, if needed, the host name to its canonical form without kavlan part. Can be given a Host, will return a Host. Can be given a string, will return a string. Works with short or fqdn forms of hostnames. """ h = execo.Host(host) h.address = __canonical_host_name_regex.sub(__canonical_sub_func, h.address) if isinstance(host, execo.Host): return h else: return h.address
def _mount_cluster_nics(self, conf, cluster, nodes, kavlan_nodes, vlans): """Get the NIC devices of the reserved cluster. :param nodes: List of hostnames unmodified by the vlan """ provider_conf = conf['provider'] # XXX: this only works if all nodes are on the same cluster, # or if nodes from different clusters have the same devices site = EX5.get_cluster_site(cluster) nics = EX5.get_resource_attributes( "/sites/%s/clusters/%s/nodes" % (site, cluster) )['items'][0]['network_adapters'] interfaces = [nic['device'] for nic in nics if nic['mountable'] and nic['interface'] == 'Ethernet'] network_interface = str(interfaces[0]) external_interface = None if len(interfaces) > 1 and not provider_conf['single_interface']: external_interface = str(interfaces[1]) _, vlan = self._get_primary_vlan(vlans) api.set_nodes_vlan(site, map(lambda d: EX.Host(d), nodes), external_interface, vlan) self._exec_command_on_nodes( kavlan_nodes, "ifconfig %s up && dhclient -nw %s" % ( external_interface, external_interface), 'mounting secondary interface') else: # TODO(msimonin) fix the network in this case as well. external_interface = 'veth0' if provider_conf['single_interface']: logging.warning("Forcing the use of a one network interface") else: logging.warning("%s has only one NIC. The same interface " "will be used for network_interface and " "neutron_external_interface." % conf['resources'].keys()[0]) self._exec_command_on_nodes( kavlan_nodes, 'ip link show veth0 || ip link add type veth peer', 'Creating a veth') return (network_interface, external_interface)
def _deploy(self, conf, nodes, vlans, force_deploy=False): provider_conf = conf['provider'] # we put the nodes in the first vlan we have vlan = self._get_primary_vlan(vlans) kw = { 'hosts': nodes, 'vlan': vlan[1], } if provider_conf.get('env_file'): kw.update({'env_file': provider_conf.get('env_file')}) provider_conf.pop('env_name') if provider_conf.get('env_name'): kw.update({'env_name': provider_conf.get('env_name')}) logging.info("%s deploying %s nodes with %s" % ('(forced)' if force_deploy else '', len(nodes), kw)) deployed, undeployed = EX5.deploy( EX5.Deployment(**kw), check_deployed_command=not force_deploy) # Check the deployment if len(undeployed) > 0: logging.error("%d nodes where not deployed correctly:" % len(undeployed)) for n in undeployed: logging.error(n) deployed_nodes_vlan = sorted(self._translate_to_vlan( map(lambda n: EX.Host(n), deployed), vlan[1]), key=lambda n: n.address) logging.info(deployed_nodes_vlan) # Checking the deployed nodes according to the # resource distribution policy self._check_nodes(nodes=deployed_nodes_vlan, resources=conf['resources'], mode=conf['provider']['role_distribution']) return deployed, deployed_nodes_vlan
def translate(node): splitted = node.address.split(".") splitted[0] = "%s-kavlan-%s" % (splitted[0], vlan_id) return EX.Host(".".join(splitted))
def wait_until_vm_ready(self): prospective_vms = [execo.Host(ip, user='******') for ip in self.vm_ips] logger.debug('Waiting for {} VMs to become reachable...'.format( len(prospective_vms))) self.vm = check_hosts_up(prospective_vms, timeout=60) logger.debug('Result: {} VMs are reachable.'.format(len(self.vm)))