def wait(predicate, interval=5, timeout=60, timeout_msg="Waiting timed out"): """Wait until predicate will become True. returns number of seconds that is left or 0 if timeout is None. Options: interval - seconds between checks. timeout - raise error.TimeoutError if predicate won't become True after this amount of seconds. 'None' disables timeout. timeout_msg - text of the error.TimeoutError """ start_time = time.time() if not timeout: return predicate() while not predicate(): if start_time + timeout < time.time(): msg = ("{msg}\nWaited for pass {cmd}: {spent:0.3f} seconds." "".format(msg=timeout_msg, cmd=predicate.func_name, spent=time.time() - start_time)) logger.debug(msg) raise error.TimeoutError(timeout_msg) seconds_to_sleep = max( 0, min(interval, start_time + timeout - time.time())) time.sleep(seconds_to_sleep) return timeout + start_time - time.time()
def execute(self, command, verbose=False, timeout=None, **kwargs): """Execute command and wait for return code :type command: str :type verbose: bool :type timeout: int :rtype: ExecResult :raises: TimeoutError """ chan, _, stderr, stdout = self.execute_async(command, **kwargs) result = self.__exec_command(command, chan, stdout, stderr, timeout, verbose=verbose) message = ('\n{cmd!r} execution results: Exit code: {code!s}'.format( cmd=command, code=result.exit_code)) if verbose: logger.info(message) else: logger.debug(message) return result
def wait_peer(self, interval=8, timeout=600): self.is_connected = False start_time = time.time() while start_time + timeout > time.time(): # peer = `ntpq -pn 127.0.0.1` logger.debug("Node: {0}, ntpd peers: {1}".format( self.node_name, self.peers)) for peer in self.peers: p = peer.split() remote = str(p[0]) reach = int(p[6], 8) # From octal to int offset = float(p[8]) jitter = float(p[9]) # 1. offset and jitter should not be higher than 500 # Otherwise, time should be re-set. if (abs(offset) > 500) or (abs(jitter) > 500): return self.is_connected # 2. remote should be marked with tally '*' if remote[0] != '*': continue # 3. reachability bit array should have '1' at least in # two lower bits as the last two successful checks if reach & 3 == 3: self.is_connected = True return self.is_connected time.sleep(interval) return self.is_connected
def _get_sync_complete(self): peers = self._get_ntpq() logger.debug("Node: {0}, ntpd peers: {1}".format( self.node_name, peers)) for peer in peers: p = peer.split() remote = str(p[0]) reach = int(p[6], 8) # From octal to int offset = float(p[8]) jitter = float(p[9]) # 1. offset and jitter should not be higher than 500 # Otherwise, time should be re-set. if (abs(offset) > 500) or (abs(jitter) > 500): continue # 2. remote should be marked with tally '*' if remote[0] != '*': continue # 3. reachability bit array should have '1' at least in # two lower bits as the last two successful checks if reach & 3 != 3: continue return True return False
def _create_boot_menu(self, interface='eth0', ks_script='tftpboot/fuel/ks.cfg'): LOGGER.debug('Create PXE boot menu for booting from network') menu_boot = ("DEFAULT menu.c32\n" "prompt 0\n" "MENU TITLE Fuel Installer\n" "TIMEOUT 20\n" "LABEL LABEL fuel\n" "MENU LABEL Install ^FUEL\n" "MENU DEFAULT\n" "KERNEL /fuel/isolinux/vmlinuz\n" "INITRD /fuel/isolinux/initrd.img\n" "APPEND ks=nfs:{0}:{1}/{4} " "ks.device='{3}' " "repo=nfs:{0}:{1}/tftpboot/fuel/ ip={2} " "netmask=255.255.255.0 dns1={0} " "hostname=fuel.mirantis.com ".format( self.ip_install_server, self.ipmi_driver_root_dir, self.ip_node_admin, interface, ks_script)) with open('{0}/tftpboot/pxelinux.cfg/default'.format( self.ipmi_driver_root_dir, 'w')) as f: f.write(menu_boot) return True
def _create_boot_menu(self, interface='eth0', ks_script='tftpboot/fuel/ks.cfg'): LOGGER.debug('Create PXE boot menu for booting from network') menu_boot = ("DEFAULT menu.c32\n" "prompt 0\n" "MENU TITLE Fuel Installer\n" "TIMEOUT 20\n" "LABEL LABEL fuel\n" "MENU LABEL Install ^FUEL\n" "MENU DEFAULT\n" "KERNEL /fuel/isolinux/vmlinuz\n" "INITRD /fuel/isolinux/initrd.img\n" "APPEND ks=nfs:{0}:{1}/{4} " "ks.device='{3}' " "repo=nfs:{0}:{1}/tftpboot/fuel/ ip={2} " "netmask=255.255.255.0 dns1={0} " "hostname=fuel.mirantis.com ". format(self.ip_install_server, self.ipmi_driver_root_dir, self.ip_node_admin, interface, ks_script)) with open('{0}/tftpboot/pxelinux.cfg/default'. format(self.ipmi_driver_root_dir, 'w')) as f: f.write(menu_boot) return True
def _get_sync_complete(self): peers = self._get_ntpq() logger.debug("Node: {0}, ntpd peers: {1}".format( self.node_name, peers)) for peer in peers: p = peer.split() remote = str(p[0]) # reach = int(p[6], 8) # From octal to int offset = float(p[8]) jitter = float(p[9]) # 1. offset and jitter should not be higher than 500 # Otherwise, time should be re-set. if (abs(offset) > 500) or (abs(jitter) > 500): continue # 2. remote should be marked with tally '*' if remote[0] != '*': continue # 3. reachability bit array should have '1' at least in # two lower bits as the last two successful checks # TODO(sbog): we should improve this, as unstable reference # is still a reference and can be used for time sync. Moreover, # if there are other servers which can easily be a reference # one, that reachability of current one is not a problem at all. # if reach & 3 != 3: # continue return True return False
def wait_peer(self, interval=8, timeout=600): self.is_connected = False start_time = time.time() while start_time + timeout > time.time(): # peer = `ntpq -pn 127.0.0.1` self.peers = self.get_peers()[2:] # skip the header logger.debug("Node: {0}, ntpd peers: {1}".format( self.node_name, self.peers)) for peer in self.peers: p = peer.split() remote = str(p[0]) reach = int(p[6], 8) # From octal to int offset = float(p[8]) jitter = float(p[9]) # 1. offset and jitter should not be higher than 500 # Otherwise, time should be re-set. if (abs(offset) > 500) or (abs(jitter) > 500): return self.is_connected # 2. remote should be marked whith tally '*' if remote[0] != '*': continue # 3. reachability bit array should have '1' at least in # two lower bits as the last two sussesful checks if reach & 3 == 3: self.is_connected = True return self.is_connected time.sleep(interval) return self.is_connected
def upload(self, source, target): logger.debug("Copying '%s' -> '%s'", source, target) if self.isdir(target): target = posixpath.join(target, os.path.basename(source)) source = os.path.expanduser(source) if not os.path.isdir(source): self._sftp.put(source, target) return for rootdir, _, files in os.walk(source): targetdir = os.path.normpath( os.path.join( target, os.path.relpath(rootdir, source))).replace("\\", "/") self.mkdir(targetdir) for entry in files: local_path = os.path.join(rootdir, entry) remote_path = posixpath.join(targetdir, entry) if self.exists(remote_path): self._sftp.unlink(remote_path) self._sftp.put(local_path, remote_path)
def _safe_create_network(cls, name, pool, environment, **params): for ip_network in pool: if cls.objects.filter(net=str(ip_network)).exists(): continue new_params = deepcopy(params) new_params['net'] = ip_network try: with transaction.atomic(): return cls.objects.create( environment=environment, name=name, **new_params ) except IntegrityError as e: logger.debug(e) if 'name' in str(e): raise error.DevopsError( 'AddressPool with name "{}" already exists' ''.format(name)) continue raise error.DevopsError( "There is no network pool available for creating " "address pool {}".format(name))
def rm_rf(self, path): """run 'rm -rf path' on remote :type path: str """ logger.debug("rm -rf {}".format(path)) # noinspection PyTypeChecker self.execute("rm -rf {}".format(path))
def rm_rf(self, path): """run 'rm -rf path' on remote :type path: str """ logger.debug("rm -rf {}".format(self._path_esc(path))) # noinspection PyTypeChecker self.execute("rm -rf {}".format(self._path_esc(path)))
def conn(self): """Connection to ironic api""" logger.debug("Ironic client is connecting to {0}".format( self.ironic_url)) kwargs = { 'os_auth_token': self.os_auth_token, 'ironic_url': self.ironic_url } return client.get_client(1, **kwargs)
def main(args=None): if args is None: args = sys.argv[1:] try: shell = Shell(args) shell.execute() except error.DevopsError as exc: logger.debug(exc, exc_info=True) sys.exit('Error: {}'.format(exc))
def mkdir(self, path): """run 'mkdir -p path' on remote :type path: str """ if self.exists(path): return logger.debug("Creating directory: {}".format(self._path_esc(path))) # noinspection PyTypeChecker self.execute("mkdir -p {}\n".format(self._path_esc(path)))
def clear_cache(mcs): """Clear cached connections for initialize new instance on next call""" n_count = 3 if six.PY3 else 4 # PY3: cache, ssh, temporary # PY4: cache, values mapping, ssh, temporary for ssh in mcs.__cache.values(): if sys.getrefcount(ssh) == n_count: logger.debug('Closing {} as unused'.format(ssh)) ssh.close() mcs.__cache = {}
def _close_remotes(self): """Call close cached ssh connections for current node""" for network_name in {'admin', 'public', 'internal'}: try: SSHClient.close_connections( hostname=self.get_ip_address_by_network_name(network_name)) except BaseException: logger.debug( '{0}._close_remotes for {1} failed'.format( self.name, network_name))
def mkdir(self, path): """run 'mkdir -p path' on remote :type path: str """ if self.exists(path): return logger.debug("Creating directory: {}".format(path)) # noinspection PyTypeChecker self.execute("mkdir -p {}\n".format(path))
def _stop_nfs(self): if os.path.isfile('/etc/exports-devops-last'): cmd = ['sudo', 'mv', '/etc/exports-devops-last', '/etc/exports'] subprocess.call(cmd) if self.system_init.find('systemd') == 1: cmd = ['sudo', 'systemctl', 'stop', 'nfsd'] else: cmd = ['sudo', 'service', 'nfs-kernel-server', 'stop'] output = subprocess.check_output(cmd) LOGGER.debug('NFS server stopped, output is {0}'.format(output)) return True
def ip_range_end(self, range_name): """Return the IP address of end the IP range 'range_name' :return: str(IP) or None """ if range_name in self.ip_ranges: return str(self.ip_ranges.get(range_name)[1]) else: logger.debug("IP range '{0}' not found in the " "address pool {1}".format(range_name, self.name)) return None
def ext(self): try: # noinspection PyPep8Naming ExtCls = loader.load_class( 'devops.models.node_ext.{ext_name}:NodeExtension' ''.format(ext_name=self.role or 'default')) return ExtCls(node=self) except ImportError: logger.debug('NodeExtension is not found for role: {!r}' ''.format(self.role)) return None
def set_node_boot(self, device): """ Valid are: pxe, disk, """ LOGGER.debug('Set boot device to %s' % device) cmd = self.ipmi_cmd + ['chassis', 'bootdev', device] output = subprocess.check_output(cmd) LOGGER.debug('Set boot server output: {0}'.format(output)) return True
def set_actual_time(self, timeout=600): # Waiting for parent server until it starts providing the time cmd = "ntpdate -p 4 -t 0.2 -bu {0}".format(self.server) self.is_synchronized = False try: wait(lambda: not self.remote.execute(cmd)['exit_code'], timeout) self.remote.execute('hwclock -w') self.is_synchronized = True except TimeoutError as e: logger.debug('Time sync failed with {}'.format(e)) return self.is_synchronized
def __init__( self, host, port=22, username=None, password=None, private_keys=None, auth=None, verbose=True ): """SSHClient helper :type host: str :type port: int :type username: str :type password: str :type private_keys: list :type auth: SSHAuth :type verbose: bool, show additional error/warning messages """ self.__lock = threading.RLock() self.__hostname = host self.__port = port self.sudo_mode = False self.__ssh = paramiko.SSHClient() self.__ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) self.__sftp = None self.__auth = auth if auth is None else auth.copy() self.__verbose = verbose if auth is None: msg = ( 'SSHClient(host={host}, port={port}, username={username}): ' 'initialization by username/password/private_keys ' 'is deprecated in favor of SSHAuth usage. ' 'Please update your code'.format( host=host, port=port, username=username )) warnings.warn(msg, DeprecationWarning) logger.debug(msg) self.__auth = SSHAuth( username=username, password=password, keys=private_keys ) self.__connect() _MemorizedSSH.record(ssh=self) if auth is None: logger.info( '{0}:{1}> SSHAuth was made from old style creds: ' '{2}'.format(self.hostname, self.port, self.auth))
def get_ip(self, ip_name): """Return the reserved IP For example, 'gateway' is one of the common reserved IPs :return: str(IP) or None """ if ip_name in self.ip_reserved: return str(self.ip_reserved.get(ip_name)) else: logger.debug("Reserved IP '{0}' not found in the " "address pool {1}".format(ip_name, self.name)) return None
def _sftp(self): """SFTP channel access for inheritance :rtype: paramiko.sftp_client.SFTPClient :raises: paramiko.SSHException """ if self.__sftp is not None: return self.__sftp logger.debug('SFTP is not connected, try to connect...') self.__connect_sftp() if self.__sftp is not None: return self.__sftp raise paramiko.SSHException('SFTP connection failed')
def get_ip_from_json(js, mac): def poor_mac(mac_addr): return \ [m.lower() for m in mac_addr if m.lower() in '01234546789abcdef'] for node in js: for interface in node['meta']['interfaces']: if poor_mac(interface['mac']) == poor_mac(mac): logger.debug("For mac {0} found ip {1}".format( mac, node['ip'])) return node['ip'] raise DevopsError( 'There is no match between MAC {0} and Nailgun MACs'.format(mac))
def _stop_dhcp_tftp(self): if os.path.isfile('{0}/dnsmasq.pid'.format(self.ipmi_driver_root_dir)): try: pid_file = open('{0}/dnsmasq.pid'. format(self.ipmi_driver_root_dir), 'r') for line in pid_file: pid = line.strip().lower() cmd = ['sudo', 'kill', pid] subprocess.call(cmd) pid_file.close() LOGGER.debug('dnsmasq killed') except subprocess.CalledProcessError, e: LOGGER.warning("Can't stop dnsmasq: {0}".format(e.output))
def do_sync_time(self, ntps): # 0. 'ntps' can be filled by __init__() or outside the class if not ntps: raise ValueError("No servers were provided to synchronize " "the time in self.ntps") # 1. Stop NTPD service on nodes logger.debug("Stop NTPD service on nodes {0}".format( self.report_node_names(ntps))) [ntp.stop() for ntp in ntps] # 2. Set actual time on all nodes via 'ntpdate' logger.debug( "Set actual time on all nodes via 'ntpdate' on nodes {0}".format( self.report_node_names(ntps))) [ntp.set_actual_time() for ntp in ntps] if not self.is_synchronized(ntps): raise TimeoutError( "Time on nodes was not set with 'ntpdate':\n{0}".format( self.report_not_synchronized(ntps))) # 3. Start NTPD service on nodes logger.debug("Start NTPD service on nodes {0}".format( self.report_node_names(ntps))) [ntp.start() for ntp in ntps] # 4. Wait for established peers logger.debug("Wait for established peers on nodes {0}".format( self.report_node_names(ntps))) [ntp.wait_peer() for ntp in ntps] if not self.is_connected(ntps): raise TimeoutError("NTPD on nodes was not synchronized:\n" "{0}".format(self.report_not_connected(ntps)))
def do_sync_time(self, ntps): # 1. Stop NTPD service on nodes logger.debug("Stop NTPD service on nodes {0}" .format(self.report_node_names(ntps))) for ntp in ntps: ntp.stop() # 2. Set actual time on all nodes via 'ntpdate' logger.debug("Set actual time on all nodes via 'ntpdate' on nodes {0}" .format(self.report_node_names(ntps))) for ntp in ntps: ntp.set_actual_time() # 3. Start NTPD service on nodes logger.debug("Start NTPD service on nodes {0}" .format(self.report_node_names(ntps))) for ntp in ntps: ntp.start() # 4. Wait for established peers logger.debug("Wait for established peers on nodes {0}" .format(self.report_node_names(ntps))) for ntp in ntps: ntp.wait_peer()
def do_sync_time(self, ntps): # 0. 'ntps' can be filled by __init__() or outside the class if not ntps: raise ValueError("No servers were provided to synchronize " "the time in self.ntps") # 1. Stop NTPD service on nodes logger.debug("Stop NTPD service on nodes {0}" .format(self.report_node_names(ntps))) [ntp.stop() for ntp in ntps] # 2. Set actual time on all nodes via 'ntpdate' logger.debug("Set actual time on all nodes via 'ntpdate' on nodes {0}" .format(self.report_node_names(ntps))) [ntp.set_actual_time() for ntp in ntps] if not self.is_synchronized(ntps): raise TimeoutError("Time on nodes was not set with 'ntpdate':\n{0}" .format(self.report_not_synchronized(ntps))) # 3. Start NTPD service on nodes logger.debug("Start NTPD service on nodes {0}" .format(self.report_node_names(ntps))) [ntp.start() for ntp in ntps] # 4. Wait for established peers logger.debug("Wait for established peers on nodes {0}" .format(self.report_node_names(ntps))) [ntp.wait_peer() for ntp in ntps] if not self.is_connected(ntps): raise TimeoutError("NTPD on nodes was not synchronized:\n" "{0}".format(self.report_not_connected(ntps)))
def get_slave_ip_by_mac(self, mac): nodes = self.get_nodes_json() def poor_mac(mac_addr): return [m.lower() for m in mac_addr if m.lower() in '01234546789abcdef'] for node in nodes: for interface in node['meta']['interfaces']: if poor_mac(interface['mac']) == poor_mac(mac): logger.debug('For mac {0} found ip {1}' .format(mac, node['ip'])) return node['ip'] raise error.DevopsError( 'There is no match between MAC {0} and Nailgun MACs'.format(mac))
def wait_pass(raising_predicate, expected=Exception, interval=5, timeout=60, timeout_msg="Waiting timed out", predicate_args=None, predicate_kwargs=None): """Wait for successful return from predicate ignoring expected exception Options: :param interval: - seconds between checks. :param timeout: - raise TimeoutError if predicate still throwing expected exception after this amount of seconds. :param timeout_msg: - text of the TimeoutError :param predicate_args: - positional arguments for given predicate wrapped in list or tuple :param predicate_kwargs: - dict with named arguments for the predicate :param expected_exc: Exception that can be ignored while waiting (its possible to pass several using list/tuple """ predicate_args = predicate_args or [] predicate_kwargs = predicate_kwargs or {} _check_wait_args(raising_predicate, predicate_args, predicate_kwargs, interval, timeout) msg = ("{msg}\nWaited for pass {cmd}: {spent} seconds." "".format(msg=timeout_msg, cmd=repr(raising_predicate), spent="{spent:0.3f}")) start_time = time.time() with RunLimit(timeout, msg): while True: try: result = raising_predicate(*predicate_args, **predicate_kwargs) logger.debug( "wait_pass() completed with result='{0}'".format(result)) return result except expected as e: if start_time + timeout < time.time(): err_msg = msg.format(spent=time.time() - start_time) logger.error(err_msg) raise error.TimeoutError(err_msg) logger.debug("Got expected exception {!r}, continue".format(e)) time.sleep(interval)
def execute_async(self, command): logger.debug("Executing command: '{}'".format(command.rstrip())) chan = self._ssh.get_transport().open_session() stdin = chan.makefile('wb') stdout = chan.makefile('rb') stderr = chan.makefile_stderr('rb') cmd = "%s\n" % command if self.sudo_mode: cmd = 'sudo -S bash -c "%s"' % cmd.replace('"', '\\"') chan.exec_command(cmd) if stdout.channel.closed is False: stdin.write('%s\n' % self.password) stdin.flush() else: chan.exec_command(cmd) return chan, stdin, stderr, stdout
def get_slave_ip_by_mac(self, mac): nodes = self.get_nodes_json() def poor_mac(mac_addr): return [ m.lower() for m in mac_addr if m.lower() in '01234546789abcdef' ] for node in nodes: for interface in node['meta']['interfaces']: if poor_mac(interface['mac']) == poor_mac(mac): logger.debug('For mac {0} found ip {1}'.format( mac, node['ip'])) return node['ip'] raise error.DevopsError( 'There is no match between MAC {0} and Nailgun MACs'.format(mac))
def wait_pass(raising_predicate, expected=Exception, interval=5, timeout=60, timeout_msg="Waiting timed out", predicate_args=None, predicate_kwargs=None): """Wait for successful return from predicate ignoring expected exception Options: :param interval: - seconds between checks. :param timeout: - raise TimeoutError if predicate still throwing expected exception after this amount of seconds. :param timeout_msg: - text of the TimeoutError :param predicate_args: - positional arguments for given predicate wrapped in list or tuple :param predicate_kwargs: - dict with named arguments for the predicate :param expected_exc: Exception that can be ignored while waiting (its possible to pass several using list/tuple """ predicate_args = predicate_args or [] predicate_kwargs = predicate_kwargs or {} _check_wait_args(raising_predicate, predicate_args, predicate_kwargs, interval, timeout) msg = ( "{msg}\nWaited for pass {cmd}: {spent} seconds." "".format( msg=timeout_msg, cmd=repr(raising_predicate), spent="{spent:0.3f}" )) start_time = time.time() with RunLimit(timeout, msg): while True: try: result = raising_predicate(*predicate_args, **predicate_kwargs) logger.debug("wait_pass() completed with result='{0}'" .format(result)) return result except expected as e: if start_time + timeout < time.time(): err_msg = msg.format(spent=time.time() - start_time) logger.error(err_msg) raise error.TimeoutError(err_msg) logger.debug("Got expected exception {!r}, continue".format(e)) time.sleep(interval)
def connect(self, client, hostname=None, port=22, log=True): """Connect SSH client object using credentials :type client: paramiko.client.SSHClient paramiko.transport.Transport :type log: bool :raises paramiko.AuthenticationException """ kwargs = { 'username': self.username, 'password': self.__password} if hostname is not None: kwargs['hostname'] = hostname kwargs['port'] = port keys = [self.__key] keys.extend([k for k in self.__keys if k != self.__key]) for key in keys: kwargs['pkey'] = key try: client.connect(**kwargs) if self.__key != key: self.__key = key logger.debug( 'Main key has been updated, public key is: \n' '{}'.format(self.public_key)) return except paramiko.PasswordRequiredException: if self.__password is None: logger.exception('No password has been set!') raise else: logger.critical( 'Unexpected PasswordRequiredException, ' 'when password is set!') raise except (paramiko.AuthenticationException, paramiko.BadHostKeyException): continue msg = 'Connection using stored authentication info failed!' if log: logger.exception( 'Connection using stored authentication info failed!') raise paramiko.AuthenticationException(msg)
def __call__(cls, host, port=22, username=None, password=None, private_keys=None, auth=None): """Main memorize method: check for cached instance and return it :type host: str :type port: int :type username: str :type password: str :type private_keys: list :type auth: SSHAuth :rtype: SSHClient """ if (host, port) in cls.__cache: key = host, port if auth is None: auth = SSHAuth(username=username, password=password, keys=private_keys) if hash((cls, host, port, auth)) == hash(cls.__cache[key]): ssh = cls.__cache[key] # noinspection PyBroadException try: ssh.execute('cd ~', timeout=5) except BaseException: # Note: Do not change to lower level! logger.debug('Reconnect {}'.format(ssh)) ssh.reconnect() return ssh if sys.getrefcount(cls.__cache[key]) == 2: # If we have only cache reference and temporary getrefcount # reference: close connection before deletion logger.debug('Closing {} as unused'.format(cls.__cache[key])) cls.__cache[key].close() del cls.__cache[key] # noinspection PyArgumentList return super(_MemorizedSSH, cls).__call__(host=host, port=port, username=username, password=password, private_keys=private_keys, auth=auth)
def execute_async(self, command, get_pty=False): """Execute command in async mode and return channel with IO objects :type command: str :type get_pty: bool :rtype: tuple( paramiko.Channel, paramiko.ChannelFile, paramiko.ChannelFile, paramiko.ChannelFile ) """ message = log_templates.CMD_EXEC.format(cmd=command.rstrip()) logger.debug(message) chan = self._ssh.get_transport().open_session() if get_pty: # Open PTY chan.get_pty( term='vt100', width=80, height=24, width_pixels=0, height_pixels=0 ) stdin = chan.makefile('wb') stdout = chan.makefile('rb') stderr = chan.makefile_stderr('rb') cmd = "{}\n".format(command) if self.sudo_mode: encoded_cmd = base64.b64encode(cmd.encode('utf-8')).decode('utf-8') cmd = ("sudo -S bash -c 'eval \"$(base64 -d " "<(echo \"{0}\"))\"'").format( encoded_cmd ) chan.exec_command(cmd) if stdout.channel.closed is False: self.auth.enter_password(stdin) stdin.flush() else: chan.exec_command(cmd) return chan, stdin, stderr, stdout
def wait(predicate, interval=5, timeout=60, timeout_msg="Waiting timed out", predicate_args=None, predicate_kwargs=None): """Wait until predicate will become True. Options: :param interval: - seconds between checks. :param timeout: - raise TimeoutError if predicate won't become True after this amount of seconds. :param timeout_msg: - text of the TimeoutError :param predicate_args: - positional arguments for given predicate wrapped in list or tuple :param predicate_kwargs: - dict with named arguments for the predicate """ predicate_args = predicate_args or [] predicate_kwargs = predicate_kwargs or {} _check_wait_args(predicate, predicate_args, predicate_kwargs, interval, timeout) msg = ( "{msg}\nWaited for pass {cmd}: {spent} seconds." "".format( msg=timeout_msg, cmd=repr(predicate), spent="{spent:0.3f}" )) start_time = time.time() with RunLimit(timeout, msg): while True: result = predicate(*predicate_args, **predicate_kwargs) if result: logger.debug("wait() completed with result='{0}'" .format(result)) return result if start_time + timeout < time.time(): err_msg = msg.format(spent=time.time() - start_time) logger.error(err_msg) raise error.TimeoutError(err_msg) time.sleep(interval)
def __call__( cls, host, port=22, username=None, password=None, private_keys=None, auth=None, verbose=True ): """Main memorize method: check for cached instance and return it :type host: str :type port: int :type username: str :type password: str :type private_keys: list :type auth: SSHAuth :rtype: SSHClient """ if (host, port) in cls.__cache: key = host, port if auth is None: auth = SSHAuth( username=username, password=password, keys=private_keys) if hash((cls, host, port, auth)) == hash(cls.__cache[key]): ssh = cls.__cache[key] # noinspection PyBroadException try: ssh.execute('cd ~', timeout=5) except BaseException: # Note: Do not change to lower level! logger.debug('Reconnect {}'.format(ssh)) ssh.reconnect() return ssh if sys.getrefcount(cls.__cache[key]) == 2: # If we have only cache reference and temporary getrefcount # reference: close connection before deletion logger.debug('Closing {} as unused'.format(cls.__cache[key])) cls.__cache[key].close() del cls.__cache[key] # noinspection PyArgumentList return super( _MemorizedSSH, cls).__call__( host=host, port=port, username=username, password=password, private_keys=private_keys, auth=auth, verbose=verbose)
def connect(self): logger.debug( "Connect to '{0}:{1}' as '{2}:{3}'".format( self.host, self.port, self.username, self.password)) for private_key in self.private_keys: try: self._ssh.connect( self.host, port=self.port, username=self.username, password=self.password, pkey=private_key) self.__actual_pkey = private_key return except paramiko.AuthenticationException: continue if self.private_keys: logger.error("Authentication with keys failed") self.__actual_pkey = None self._ssh.connect( self.host, port=self.port, username=self.username, password=self.password)