def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) cluster.update() except (ClusterNotFound, ConfigurationError) as ex: log.error("Setting up cluster %s: %s", cluster_name, ex) return # XXX: the default value of `self.params.ssh_to` should = the # default value for `ssh_to` in `Cluster.get_ssh_to_node()` frontend = cluster.get_ssh_to_node(self.params.ssh_to) host = frontend.connection_ip() if not host: log.error("No IP address known for node %s", frontend.name) sys.exit(1) addr, port = parse_ip_address_and_port(host) username = frontend.image_user knownhostsfile = (cluster.known_hosts_file if cluster.known_hosts_file else '/dev/null') sftp_cmdline = [ "sftp", "-P", "{0:d}".format(port), "-o", "UserKnownHostsFile={0}".format(knownhostsfile), "-o", "StrictHostKeyChecking=yes", "-o", "IdentityFile={0}".format(frontend.user_key_private), ] sftp_cmdline.extend(self.params.sftp_args) sftp_cmdline.append('{0}@{1}'.format(username, addr)) os.execlp("sftp", *sftp_cmdline)
def connect(self, keyfile=None): """Connect to the node via ssh using the paramiko library. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips=self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue try: log.debug("Trying to connect to host %s (%s)", self.name, ip) addr, port = parse_ip_address_and_port(ip, SSH_PORT) ssh.connect(str(addr), username=self.image_user, allow_agent=True, key_filename=self.user_key_private, timeout=Node.connection_timeout, port=port) log.debug("Connection to %s succeeded on port %d!", ip, port) if ip != self.preferred_ip: log.debug("Setting `preferred_ip` to %s", ip) self.preferred_ip = ip cluster_changed = True # Connection successful. return ssh except socket.error as ex: log.debug("Host %s (%s) not reachable: %s.", self.name, ip, ex) except paramiko.BadHostKeyException as ex: log.error("Invalid host key: host %s (%s); check keyfile: %s", self.name, ip, keyfile) except paramiko.SSHException as ex: log.debug("Ignoring error %s connecting to %s", str(ex), self.name) return None
def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as ex: log.error("Setting up cluster %s: %s", cluster_name, ex) return # XXX: the default value of `self.params.ssh_to` should = the # default value for `ssh_to` in `Cluster.get_ssh_to_node()` frontend = cluster.get_ssh_to_node(self.params.ssh_to) # ensure we can connect to the host try: if not frontend.connection_ip(): log.info( "No connection address known for node `%s`," " updating list of IP addresses ...", frontend.name) frontend.update_ips() log.debug("Checking that SSH connection to node `%s` works..", frontend.name) # Ensure we can connect to the node, and save the value of `preferred_ip` ssh = frontend.connect(keyfile=cluster.known_hosts_file) if ssh: ssh.close() cluster.repository.save_or_update(cluster) except Exception as err: log.error("Unable to connect to the frontend node: %s", err) sys.exit(os.EX_TEMPFAIL) # now delegate real connection to `ssh` host = frontend.connection_ip() if not host: log.error("No IP address known for node %s", frontend.name) sys.exit(1) addr, port = parse_ip_address_and_port(host) username = frontend.image_user knownhostsfile = cluster.known_hosts_file if cluster.known_hosts_file \ else '/dev/null' cmdline = [ self.command, "-i", frontend.user_key_private, "-o", "UserKnownHostsFile={0}".format(knownhostsfile), "-o", "StrictHostKeyChecking=yes", "-o", "Port={0:d}".format(port), '%s@%s' % (username, addr) ] if cluster.ssh_proxy_command: cmdline[1:1] = [ '-o', ('ProxyCommand=' + expand_ssh_proxy_command( cluster.ssh_proxy_command, username, addr, port)) ] cmdline.extend(self.params.cmds) log.debug("Running command `%s`", ' '.join(cmdline)) os.execlp(self.command, *cmdline)
def connect(self, keyfile=None): """Connect to the node via ssh using the paramiko library. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips = self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug("IP %s does not seem to belong to %s anymore. Ignoring!", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue try: log.debug("Trying to connect to host %s (%s)", self.name, ip) addr, port = parse_ip_address_and_port(ip, SSH_PORT) ssh.connect(str(addr), username=self.image_user, allow_agent=True, key_filename=self.user_key_private, timeout=Node.connection_timeout, port=port) log.debug("Connection to %s succeeded on port %d!", ip, port) if ip != self.preferred_ip: log.debug("Setting `preferred_ip` to %s", ip) self.preferred_ip = ip # Connection successful. return ssh except socket.error as ex: log.debug("Host %s (%s) not reachable: %s.", self.name, ip, ex) except paramiko.BadHostKeyException as ex: log.error("Invalid host key: host %s (%s); check keyfile: %s", self.name, ip, keyfile) except paramiko.SSHException as ex: log.debug("Ignoring error %s connecting to %s", str(ex), self.name) return None
def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.cluster try: cluster = creator.load_cluster(cluster_name) except (ClusterNotFound, ConfigurationError) as ex: log.error("Setting up cluster %s: %s", cluster_name, ex) return # XXX: the default value of `self.params.ssh_to` should = the # default value for `ssh_to` in `Cluster.get_ssh_to_node()` frontend = cluster.get_ssh_to_node(self.params.ssh_to) log.debug("Updating the ip addresses of `%s`.", frontend.name) frontend.update_ips() # ensure we can connect to the host try: if not frontend.preferred_ip: # Ensure we can connect to the node, and save the value of `preferred_ip` ssh = frontend.connect(keyfile=cluster.known_hosts_file) if ssh: ssh.close() cluster.repository.save_or_update(cluster) except NodeNotFound as ex: log.error("Unable to connect to the frontend node: %s", ex) sys.exit(1) # now delegate real connection to `ssh` host = frontend.connection_ip() if not host: log.error("No IP address known for node %s", frontend.name) sys.exit(1) addr, port = parse_ip_address_and_port(host) username = frontend.image_user knownhostsfile = cluster.known_hosts_file if cluster.known_hosts_file \ else '/dev/null' cmdline = [self.command, "-i", frontend.user_key_private, "-o", "UserKnownHostsFile={0}".format(knownhostsfile), "-o", "StrictHostKeyChecking=yes", "-o", "Port={0:d}".format(port), '%s@%s' % (username, addr)] if cluster.ssh_proxy_command: cmdline[1:1] = [ '-o', ('ProxyCommand=' + expand_ssh_proxy_command( cluster.ssh_proxy_command, username, addr, port))] cmdline.extend(self.params.cmds) log.debug("Running command `%s`", ' '.join(cmdline)) os.execlp(self.command, *cmdline)
def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.clustername try: cluster = creator.load_cluster(cluster_name) cluster.update() except (ClusterNotFound, ConfigurationError) as ex: log.error("Setting up cluster %s: %s", cluster_name, ex) return # XXX: the default value of `self.params.ssh_to` should = the # default value for `ssh_to` in `Cluster.get_ssh_to_node()` frontend = cluster.get_ssh_to_node(self.params.ssh_to) # ensure we can connect to the host try: if not frontend.preferred_ip: # Ensure we can connect to the node, and save the value of `preferred_ip` ssh = frontend.connect(keyfile=cluster.known_hosts_file) if ssh: ssh.close() cluster.repository.save_or_update(cluster) except NodeNotFound as ex: log.error("Unable to connect to the frontend node: %s", ex) sys.exit(1) # now delegate real connection to `ssh` host = frontend.connection_ip() if not host: log.error("No IP address known for node %s", frontend.name) sys.exit(1) addr, port = parse_ip_address_and_port(host) username = frontend.image_user knownhostsfile = cluster.known_hosts_file if cluster.known_hosts_file \ else '/dev/null' ssh_cmdline = [ "ssh", "-i", frontend.user_key_private, "-o", "UserKnownHostsFile={0}".format(knownhostsfile), "-o", "StrictHostKeyChecking=no", "-p", "{0:d}".format(port), '%s@%s' % (username, addr) ] ssh_cmdline.extend(self.params.ssh_args) log.debug("Running command `%s`", str.join(' ', ssh_cmdline)) os.execlp("ssh", *ssh_cmdline)
def execute(self): creator = make_creator(self.params.config, storage_path=self.params.storage) cluster_name = self.params.clustername try: cluster = creator.load_cluster(cluster_name) cluster.update() except (ClusterNotFound, ConfigurationError) as ex: log.error("Setting up cluster %s: %s", cluster_name, ex) return # XXX: the default value of `self.params.ssh_to` should = the # default value for `ssh_to` in `Cluster.get_ssh_to_node()` frontend = cluster.get_ssh_to_node(self.params.ssh_to) host = frontend.connection_ip() if not host: log.error("No IP address known for node %s", frontend.name) sys.exit(1) addr, port = parse_ip_address_and_port(host) username = frontend.image_user knownhostsfile = (cluster.known_hosts_file if cluster.known_hosts_file else '/dev/null') sftp_cmdline = [ "sftp", #"-P", "{0:d}".format(port), "-o", "Port={0}".format(port), "-o", "UserKnownHostsFile={0}".format(knownhostsfile), "-o", "StrictHostKeyChecking=no", "-o", "IdentityFile={0}".format(frontend.user_key_private), ] sftp_cmdline.extend(self.params.sftp_args) sftp_cmdline.append('{0}@{1}'.format(username, addr)) os.execlp("sftp", *sftp_cmdline)
def _build_inventory(self, cluster): """ Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster` """ inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.preferred_ip is None: log.warning("Ignoring node `{0}`: No IP address.".format( node.name)) continue if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? log.warning("Ignoring node `{0}`:" " Node kind `{1}` not defined in cluster!".format( node.name, node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] ip_addr, port = parse_ip_address_and_port(node.preferred_ip) if port != 22: extra_vars.append('ansible_port=%s' % port) # write additional `ansible_*` variables to inventory; # `ansible_python_interpreter` gets special treatment # since we need to tell script `install-py2.sh` that # it should create a wrapper script for running `eatmydata python` extra_conf = self.extra_conf.copy() ansible_python_interpreter = extra_conf.pop( 'ansible_python_interpreter', '/usr/bin/python') extra_vars.append( 'ansible_python_interpreter={python}{eatmydata}'.format( python=ansible_python_interpreter, eatmydata=('+eatmydata' if self.use_eatmydata else ''))) extra_vars.extend('%s=%s' % (k, v) for k, v in extra_conf.items() if k.startswith('ansible_')) if node.kind in self.environment: extra_vars.extend( '%s=%s' % (k, v) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: inventory_data[group].append( (node.name, ip_addr, ' '.join(extra_vars))) if not inventory_data: log.info("No inventory file was created.") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning("Writing inventory file to tmp dir `%s`", self._storage_path) inventory_path = os.path.join(self._storage_path, (cluster.name + '.inventory')) log.debug("Writing Ansible inventory to file `%s` ...", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error "argument of type 'NoneType' is not # iterable" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write("\n[" + section + "]\n") for host in hosts: hostline = "{0} ansible_host={1} {2}\n".format(*host) inventory_file.write(hostline) return inventory_path
def _build_inventory(self, cluster): """ Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster` """ inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.preferred_ip is None: log.warning( "Ignoring node `{0}`: No IP address." .format(node.name)) continue if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? log.warning( "Ignoring node `{0}`:" " Node kind `{1}` not defined in cluster!" .format(node.name, node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] ip_addr, port = parse_ip_address_and_port(node.preferred_ip) if port != 22: extra_vars.append('ansible_port=%s' % port) if node.kind in self.environment: extra_vars.extend('%s=%s' % (k, v) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: inventory_data[group].append( (node.name, ip_addr, str.join(' ', extra_vars))) if not inventory_data: log.info("No inventory file was created.") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning( "Writing inventory file to tmp dir `%s`", self._storage_path) inventory_path = os.path.join( self._storage_path, (cluster.name + '.inventory')) log.debug("Writing Ansible inventory to file `%s` ...", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error "argument of type 'NoneType' is not # iterable" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write("\n[" + section + "]\n") for host in hosts: hostline = "{0} ansible_host={1} {2}\n".format(*host) inventory_file.write(hostline) return inventory_path
def connect(self, keyfile=None, timeout=5): """ Connect to the node via SSH. :param keyfile: Path to the SSH host key. :param timeout: Maximum time to wait (in seconds) for the TCP connection to be established. :return: :py:class:`paramiko.SSHClient` - ssh connection or None on failure """ ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) if keyfile and os.path.exists(keyfile): ssh.load_host_keys(keyfile) # Try connecting using the `preferred_ip`, if # present. Otherwise, try all of them and set `preferred_ip` # using the first that is working. ips = self.ips[:] # This is done in order to "sort" the IPs and put the preferred_ip first. if self.preferred_ip: if self.preferred_ip in ips: ips.remove(self.preferred_ip) else: # Preferred is changed? log.debug( "IP address %s does not seem to belong to %s anymore." " Ignoring it.", self.preferred_ip, self.name) self.preferred_ip = ips[0] for ip in itertools.chain([self.preferred_ip], ips): if not ip: continue log.debug("Trying to connect to host %s (%s) ...", self.name, ip) try: addr, port = parse_ip_address_and_port(ip, SSH_PORT) extra = { 'allow_agent': True, 'key_filename': self.user_key_private, 'look_for_keys': False, 'timeout': timeout, 'username': self.image_user, } if self.ssh_proxy_command: proxy_command = expand_ssh_proxy_command( self.ssh_proxy_command, self.image_user, addr, port) from paramiko.proxy import ProxyCommand extra['sock'] = ProxyCommand(proxy_command) log.debug("Using proxy command `%s`.", proxy_command) ssh.connect(str(addr), port=port, **extra) log.debug( "Connection to %s succeeded on port %d," " will use this IP address for future connections.", ip, port) if ip != self.preferred_ip: self.preferred_ip = ip # Connection successful. return ssh except socket.error as ex: log.debug( "Host %s (%s) not reachable within %d seconds: %s -- %r", self.name, ip, timeout, ex, type(ex)) except paramiko.BadHostKeyException as ex: log.error( "Invalid SSH host key for %s (%s): %s.", self.name, ip, ex) except paramiko.SSHException as ex: log.debug( "Ignoring error connecting to %s: %s -- %r", self.name, ex, type(ex)) return None
def _build_inventory(self, cluster): """ Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster` """ inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.preferred_ip is None: log.warning( "Ignoring node `{0}`: No IP address." .format(node.name)) continue if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? log.warning( "Ignoring node `{0}`:" " Node kind `{1}` not defined in cluster!" .format(node.name, node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] ip_addr, port = parse_ip_address_and_port(node.preferred_ip) if port != 22: extra_vars.append('ansible_port=%s' % port) # write additional `ansible_*` variables to inventory; # `ansible_python_interpreter` gets special treatment # since we need to tell script `install-py2.sh` that # it should create a wrapper script for running `eatmydata python` extra_conf = self.extra_conf.copy() ansible_python_interpreter = extra_conf.pop( 'ansible_python_interpreter', '/usr/bin/python') extra_vars.append('ansible_python_interpreter={python}{eatmydata}' .format( python=ansible_python_interpreter, eatmydata=('+eatmydata' if self.use_eatmydata else ''))) # abuse Python's %r fomat to provide quotes around the # value, and \-escape any embedded quote chars extra_vars.extend('%s=%r' % (k, str(v)) for k, v in extra_conf.items() if k.startswith('ansible_')) if node.kind in self.environment: # abuse Python's %r fomat to provide quotes around the # value, and \-escape any embedded quote chars extra_vars.extend('%s=%r' % (k, str(v)) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: inventory_data[group].append( (node.name, ip_addr, ' '.join(extra_vars))) if not inventory_data: log.info("No inventory file was created.") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning( "Writing inventory file to tmp dir `%s`", self._storage_path) inventory_path = os.path.join( self._storage_path, (cluster.name + '.inventory')) log.debug("Writing Ansible inventory to file `%s` ...", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error "argument of type 'NoneType' is not # iterable" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write("\n[" + section + "]\n") for host in hosts: hostline = "{0} ansible_host={1} {2}\n".format(*host) inventory_file.write(hostline) return inventory_path
def _build_inventory(self, cluster): """ Builds the inventory for the given cluster and returns its path :param cluster: cluster to build inventory for :type cluster: :py:class:`elasticluster.cluster.Cluster` """ inventory_data = defaultdict(list) for node in cluster.get_all_nodes(): if node.preferred_ip is None: log.warning( "Ignoring node `{0}`: No IP address." .format(node.name)) continue if node.kind not in self.groups: # FIXME: should this raise a `ConfigurationError` instead? log.warning( "Ignoring node `{0}`:" " Node kind `{1}` not defined in cluster!" .format(node.name, node.kind)) continue extra_vars = ['ansible_user=%s' % node.image_user] ip_addr, port = parse_ip_address_and_port(node.preferred_ip) if port != 22: extra_vars.append('ansible_port=%s' % port) if node.kind in self.environment: extra_vars.extend('%s=%s' % (k, v) for k, v in self.environment[node.kind].items()) for group in self.groups[node.kind]: inventory_data[group].append( (node.name, ip_addr, str.join(' ', extra_vars))) if not inventory_data: log.info("No inventory file was created.") return None # create a temporary file to pass to ansible, since the # api is not stable yet... if self._storage_path_tmp: if not self._storage_path: self._storage_path = tempfile.mkdtemp() elasticluster.log.warning( "Writing inventory file to tmp dir `%s`", self._storage_path) inventory_path = os.path.join( self._storage_path, (cluster.name + '.inventory')) log.debug("Writing Ansible inventory to file `%s` ...", inventory_path) with open(inventory_path, 'w+') as inventory_file: for section, hosts in inventory_data.items(): # Ansible throws an error "argument of type 'NoneType' is not # iterable" if a section is empty, so ensure we have something # to write in there if hosts: inventory_file.write("\n[" + section + "]\n") for host in hosts: hostline = "{0} ansible_host={1} {2}\n".format(*host) inventory_file.write(hostline) return inventory_path