def _pipes_isauthenticated(self, pipes_session: "PopenBytes") -> bool: """ Private method to check initial authentication when using subprocess.Popen Since we always run ssh with `-v` we can simply check the stderr (where verbose output goes) to see if `Authenticated to [our host]` is in the output. Args: pipes_session: Popen pipes session object Returns: bool: True/False session was authenticated Raises: ScrapliTimeout: if `Operation timed out` in stderr output """ if pipes_session.stderr is None: raise ScrapliTimeout( f"Could not read stderr while connecting to host {self.host}") output = b"" while True: output += pipes_session.stderr.read(65535) if f"Authenticated to {self.host}".encode() in output: self._isauthenticated = True return True if "Operation timed out".encode() in output: raise ScrapliTimeout( f"Timed opening connection to host {self.host}")
def _handle_timeout(self) -> None: """ Timeout handler method to release locks/raise exception consistently between timeout methods Args: N/A Returns: N/A # noqa: DAR202 Raises: ScrapliTimeout: always, if we hit this method we have already timed out! """ from scrapli.channel import AsyncChannel, Channel # pylint: disable=C0415 if self.timeout_exit: self.scrapli_obj.logger.info( "timeout_exit is True, closing transport") if self.session_lock.locked(): self.session_lock.release() self.close() if not isinstance(self.scrapli_obj, (AsyncChannel, Channel)): # if system transport is timing out then we can encounter a condition where timeout # happens in system, and we close the transport, however we then still have to deal # with unlocking the lock in `_send_input` (for example) which causes a RuntimeError # if the lock has been unlocked during closing of the session, so re-acquire the # lock if this timeout occurred in NOT a channel object - there needs to be a big # overhaul to all the lock handling but that will be a fairly significant project # that has to touch all the scrapli libraries self.session_lock.acquire() raise ScrapliTimeout(self.message)
def socket_open(self) -> None: """ Open underlying socket Args: N/A Returns: N/A # noqa: DAR202 Raises: ConnectionRefusedError: if socket refuses connection ScrapliTimeout: if socket connection times out """ if not self.socket_isalive(): self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) try: self.sock.connect((self.host, self.port)) except ConnectionRefusedError: LOG.critical( f"Connection refused trying to open socket to {self.host} on port {self.port}" ) raise ConnectionRefusedError( f"Connection refused trying to open socket to {self.host} on port {self.port}" ) except socket.timeout: LOG.critical( f"Timed out trying to open socket to {self.host} on port {self.port}" ) raise ScrapliTimeout( f"Timed out trying to open socket to {self.host} on port {self.port}" ) LOG.debug(f"Socket to host {self.host} opened")
async def _authenticate_password(self, common_args: Dict[str, Any]) -> bool: """ Attempt to authenticate with password/kbd-interactive authentication Args: common_args: Dict of kwargs that are common between asyncssh auth/open methods Returns: bool: True if authentication succeeds, otherwise False Raises: ScrapliTimeout: if authentication times out Exception: if unknown (i.e. not auth failed) exception occurs """ try: self.session = await asyncio.wait_for(connect( password=self.auth_password, **common_args), timeout=self.timeout_socket) return True except asyncio.TimeoutError: msg = f"Password authentication with host {self.host} failed. Authentication Timed Out." self.logger.exception(msg) raise ScrapliTimeout(msg) except PermissionDenied: self.logger.critical( f"Password authentication with host {self.host} failed. Authentication Error." ) return False except Exception as exc: self.logger.critical( f"Password authentication with host {self.host} failed. Exception: {exc}." ) raise exc
async def read(self) -> bytes: """ Read data from the channel Args: N/A Returns: bytes: bytes read from the telnet channel Raises: ScrapliTimeout: if async read does not complete within timeout_transport interval """ read_timeout = self.timeout_transport or None try: output = await asyncio.wait_for(self.stdout.read(65535), timeout=read_timeout) except asyncio.TimeoutError as exc: msg = f"Timed out reading from transport, transport timeout: {self.timeout_transport}" self.logger.exception(msg) raise ScrapliTimeout(msg) from exc if self._stdout_binary_transmission: output = output.replace(b"\x00", b"") return output
def _pipes_isauthenticated(self, pipes_session: "PopenBytes") -> bool: """ Private method to check initial authentication when using subprocess.Popen Since we always run ssh with `-v` we can simply check the stderr (where verbose output goes) to see if `Authenticated to [our host]` is in the output. Args: pipes_session: Popen pipes session object Returns: bool: True/False session was authenticated Raises: ScrapliTimeout: if we cant read from stderr of the session """ if pipes_session.stderr is None: raise ScrapliTimeout( f"Could not read stderr while connecting to host {self.host}") output = b"" while True: output += pipes_session.stderr.read(65535) if f"authenticated to {self.host}".encode() in output.lower(): self._isauthenticated = True return True if (b"next authentication method: keyboard-interactive" in output.lower() or b"next authentication method: password" in output.lower()): return False self._ssh_message_handler(output=output)
def socket_open(self) -> None: """ Open underlying socket Args: N/A Returns: N/A # noqa: DAR202 Raises: ConnectionNotOpened: if cant fetch socket addr info ConnectionRefusedError: if socket refuses connection ScrapliTimeout: if socket connection times out """ sock_info = None try: sock_info = socket.getaddrinfo(self.host, self.port) if sock_info: socket_af = sock_info[0][0] except socket.gaierror: pass if not sock_info: # this will likely need to be clearer just dont know what failure scenarios exist for # this yet... raise ConnectionNotOpened("Failed to determine socket address family for host") if not self.socket_isalive(): self.sock = socket.socket(socket_af, socket.SOCK_STREAM) self.sock.settimeout(self.timeout) try: self.sock.connect((self.host, self.port)) except ConnectionRefusedError as exc: self.logger.critical( f"Connection refused trying to open socket to {self.host} on port {self.port}" ) raise ConnectionRefusedError( f"Connection refused trying to open socket to {self.host} on port {self.port}" ) from exc except socket.timeout as exc: self.logger.critical( f"Timed out trying to open socket to {self.host} on port {self.port}" ) raise ScrapliTimeout( f"Timed out trying to open socket to {self.host} on port {self.port}" ) from exc self.logger.debug(f"Socket to host {self.host} opened")
def _pipes_isauthenticated(self, pipes_session: "PopenBytes") -> bool: """ Private method to check initial authentication when using subprocess.Popen Since we always run ssh with `-v` we can simply check the stderr (where verbose output goes) to see if `Authenticated to [our host]` is in the output. Args: pipes_session: Popen pipes session object Returns: bool: True/False session was authenticated Raises: ScrapliTimeout: if `Operation timed out` in stderr output ScrapliAuthenticationFailed: if private key permissions are too open """ if pipes_session.stderr is None: raise ScrapliTimeout(f"Could not read stderr while connecting to host {self.host}") output = b"" while True: output += pipes_session.stderr.read(65535) if f"Authenticated to {self.host}".encode() in output: self._isauthenticated = True return True if b"Operation timed out" in output: msg = f"Timed opening connection to host {self.host}" raise ScrapliTimeout(msg) if b"WARNING: UNPROTECTED PRIVATE KEY FILE!" in output: msg = ( f"Permissions for private key `{self.auth_private_key}` are too open, " "authentication failed!" ) raise ScrapliAuthenticationFailed(msg)
def timeout_wrapper( channel_or_transport: Union["Channel", "Transport"], *args: Any, **kwargs: Dict[str, Union[str, int]], ) -> Any: # import here to avoid circular dependency from scrapli.channel import AsyncChannel, Channel # pylint: disable=C0415 timeout_duration = getattr(channel_or_transport, attribute, None) if not timeout_duration: channel_or_transport.logger.info( f"Could not find {attribute} value of {channel_or_transport}, continuing " "without timeout decorator" ) return wrapped_func(channel_or_transport, *args, **kwargs) # as this can be called from transport or channel get the appropriate objects # to unlock and close the session. we need to unlock as the close will block # forever if the session is locked, and the session very likely is locked while # waiting for output from the device if isinstance(channel_or_transport, (AsyncChannel, Channel)): timeout_exit = channel_or_transport.transport.timeout_exit session_lock = channel_or_transport.transport.session_lock close = channel_or_transport.transport.close else: timeout_exit = channel_or_transport.timeout_exit session_lock = channel_or_transport.session_lock close = channel_or_transport.close pool = multiprocessing.pool.ThreadPool(processes=1) func_args = [channel_or_transport, *args] future = pool.apply_async(wrapped_func, func_args, kwargs) try: result = future.get(timeout=timeout_duration) pool.terminate() return result except multiprocessing.context.TimeoutError: pool.terminate() channel_or_transport.logger.info(message) if timeout_exit: channel_or_transport.logger.info("timeout_exit is True, closing transport") if session_lock.locked(): session_lock.release() close() raise ScrapliTimeout(message)
def _handle_timeout(self) -> None: """ Timeout handler method to close connections and raise ScrapliTimeout Args: N/A Returns: None Raises: ScrapliTimeout: always, if we hit this method we have already timed out! """ self.channel_logger.critical( "channel operation timed out, closing transport") self.transport_instance.close() raise ScrapliTimeout(self.message)
def _handle_timeout(self) -> None: """ Timeout handler method to close connections and raise ScrapliTimeout Args: N/A Returns: N/A # noqa: DAR202 Raises: ScrapliTimeout: always, if we hit this method we have already timed out! """ if self.timeout_exit: self.scrapli_obj.logger.info("timeout_exit is True, closing transport") self.close() raise ScrapliTimeout(self.message)
def _handle_timeout(self) -> None: """ Timeout handler method to release locks/raise exception consistently between timeout methods Args: N/A Returns: N/A # noqa: DAR202 Raises: ScrapliTimeout: always, if we hit this method we have already timed out! """ if self.timeout_exit: self.scrapli_obj.logger.info( "timeout_exit is True, closing transport") self.close() raise ScrapliTimeout(self.message)
async def timeout_wrapper( channel_or_transport: Union["AsyncChannel", "Transport"], *args: Any, **kwargs: Dict[str, Union[str, int]], ) -> Any: # import here to avoid circular dependency from scrapli.channel import AsyncChannel # pylint: disable=C0415 timeout_duration = getattr(channel_or_transport, attribute, None) if not timeout_duration: channel_or_transport.logger.info( f"Could not find {attribute} value of {channel_or_transport}, continuing " "without timeout decorator" ) return await wrapped_func(channel_or_transport, *args, **kwargs) # as this can be called from transport or channel get the appropriate objects # to unlock and close the session. we need to unlock as the close will block # forever if the session is locked, and the session very likely is locked while # waiting for output from the device if isinstance(channel_or_transport, AsyncChannel): timeout_exit = channel_or_transport.transport.timeout_exit session_lock = channel_or_transport.transport.session_lock close = channel_or_transport.transport.close else: timeout_exit = channel_or_transport.timeout_exit session_lock = channel_or_transport.session_lock close = channel_or_transport.close try: return await asyncio.wait_for( wrapped_func(channel_or_transport, *args, **kwargs), timeout=timeout_duration ) except asyncio.TimeoutError: channel_or_transport.logger.info(message) if timeout_exit: channel_or_transport.logger.info("timeout_exit is True, closing transport") if session_lock.locked(): session_lock.release() close() raise ScrapliTimeout(message)
async def read(self) -> bytes: """ Read data from the channel Args: N/A Returns: bytes: bytes output as read from channel Raises: ScrapliTimeout: if async read does not complete within timeout_transport interval """ try: output: bytes = await asyncio.wait_for( self.stdout.read(65535), timeout=self.timeout_transport) return output except asyncio.TimeoutError: msg = f"Timed out reading from transport, transport timeout: {self.timeout_transport}" self.logger.exception(msg) raise ScrapliTimeout(msg)
async def _authenticate_private_key(self, common_args: Dict[str, Any]) -> bool: """ Attempt to authenticate with key based authentication Args: common_args: Dict of kwargs that are common between asyncssh auth/open methods Returns: bool: True if authentication succeeds, otherwise False Raises: ScrapliTimeout: if authentication times out Exception: if unknown (i.e. not auth failed) exception occurs """ try: self.session = await asyncio.wait_for( connect(client_keys=self.auth_private_key, preferred_auth=("publickey", ), **common_args), timeout=self.timeout_socket, ) return True except asyncio.TimeoutError as exc: msg = (f"Private key authentication with host {self.host} failed. " "Authentication Timed Out.") self.logger.exception(msg) raise ScrapliTimeout(msg) from exc except PermissionDenied: self.logger.critical( f"Private key authentication with host {self.host} failed. Authentication Error." ) return False except Exception as exc: self.logger.critical( f"Private key authentication with host {self.host} failed. Exception: {exc}." ) raise exc
def send_input_netconf(self, channel_input: str) -> bytes: """ Send inputs to netconf server Args: channel_input: string of the base xml message to send to netconf server Returns: bytes: bytes result of message sent to netconf server Raises: ScrapliTimeout: re-raises channel timeouts with additional message if channel input may be big enough to require setting `use_compressed_parser` to false -- note that this has only been seen as an issue with NXOS so far. """ bytes_final_channel_input = channel_input.encode() buf: bytes buf, _ = super().send_input(channel_input=channel_input, strip_prompt=False, eager=True) if bytes_final_channel_input in buf: # if we got the input AND the rpc-reply we can strip out our inputs so we just have the # reply remaining buf = buf.split(bytes_final_channel_input)[1] try: buf = self._read_until_prompt(buf=buf) except ScrapliTimeout as exc: if len(channel_input) >= 4096: msg = ( "timed out finding prompt after sending input, input is greater than 4096 " "chars, try setting 'use_compressed_parser' to False") self.logger.info(msg) raise ScrapliTimeout(msg) from exc raise ScrapliTimeout from exc if self._server_echo is None: # At least per early drafts of the netconf over ssh rfcs the netconf servers MUST NOT # echo the input commands back to the client. In the case of "normal" scrapli netconf # with the system transport this happens anyway because we combine the stdin and stdout # fds into a single pty, however for other transports we have an actual stdin and # stdout fd to read/write. It seems that at the very least IOSXE with NETCONF 1.1 seems # to want to echo inputs back onto to the stdout for the channel. This is totally ok # and we can deal with it, we just need to *know* that it is happening, so while the # _server_echo attribute is still `None`, we can go ahead and see if the input we sent # is in the output we read off the channel. If it is *not* we know the server does *not* # echo and we can move on. If it *is* in the output, we know the server echoes, and we # also have one additional step in that we need to read "until prompt" again in order to # capture the reply to our rpc. # # See: https://tools.ietf.org/html/draft-ietf-netconf-ssh-02 (search for "echo") self.logger.debug( "server echo is unset, determining if server echoes inputs now" ) if bytes_final_channel_input in buf: self.logger.debug( "server echoes inputs, setting _server_echo to 'true'") self._server_echo = True # since echo is True and we only read until our input (because our inputs always end # with a "prompt" that we read until) we need to once again read until prompt, this # read will read all the way up through the *reply* to the prompt at end of the # reply message buf = self._read_until_prompt(buf=b"") else: self.logger.debug( "server does *not* echo inputs, setting _server_echo to 'false'" ) self._server_echo = False if self._netconf_base_channel_args.netconf_version == NetconfVersion.VERSION_1_1: # netconf 1.1 with "chunking" style message format needs an extra return char here self.send_return() return buf
def read_callback( self, callbacks: List["ReadCallback"], initial_input: Optional[str] = None, read_output: bytes = b"", read_delay: float = 0.1, read_timeout: float = -1.0, ) -> "ReadCallbackReturnable": r""" Read from a channel and react to the output with some callback. This method is kind of like an "advanced" send_interactive -- the idea is simple: send some "stuff" to the channel (optionally), and then read from the channel. Based on the output do something. The callbacks is a list of `ReadCallback` which is an object containing the actual callback to execute, some info about when to trigger that callback (also when *not* to trigger that callback), as well as some attributes to control the next (if desired) iteration of read_callback. You could in theory do basically everything with this method by chaining callbacks forever, but you probably don't want to do that for real! Example usage: ``` from scrapli.driver.core import IOSXEDriver from scrapli.driver.generic.base_driver import ReadCallback from scrapli.driver.generic.sync_driver import GenericDriver device = { "host": "rtr1", "auth_strict_key": False, "ssh_config_file": True, } def callback_one(cls: GenericDriver, read_output: str): cls.acquire_priv("configuration") cls.channel.send_return() def callback_two(cls: GenericDriver, read_output: str): print(f"previous read output : {read_output}") r = cls.send_command("show run | i hostname") print(f"result: {r.result}") with IOSXEDriver(**device) as conn: callbacks = [ ReadCallback( contains="rtr1#", callback=callback_one, name="call1", case_insensitive=False ), ReadCallback( contains_re=r"^rtr1\(config\)#", callback=callback_two, complete=True, ) ] conn.read_callback(callbacks=callbacks, initial_input="show run | i hostname") ``` Args: callbacks: a list of ReadCallback objects initial_input: optional string to send to "kick off" the read_callback method read_output: optional bytes to append any new reads to read_delay: sleep interval between reads read_timeout: value to set the `transport_timeout` to for the duration of the reading portion of this method. If left default (-1.0) or set to anything below 0, the transport timeout value will be left alone (whatever the timeout_transport value is) otherwise, the provided value will be temporarily set as the timeout_transport for duration of the reading. Returns: ReadCallbackReturnable: either None or call to read_callback again Raises: ScrapliTimeout: if the read operation times out (base don the read_timeout value) during the read callback check. """ if initial_input is not None: self.channel.write( channel_input=f"{initial_input}{self.comms_return_char}") return self.read_callback(callbacks=callbacks, initial_input=None) original_transport_timeout = self.timeout_transport # if the read_timeout value is -1.0 or just less than 0, that indicates we should use # the "normal" transport timeout and not modify anything self.timeout_transport = read_timeout if read_timeout >= 0 else self.timeout_transport _read_delay = 0.1 if read_delay <= 0 else read_delay while True: try: read_output += self.channel.read() except ScrapliTimeout as exc: self.timeout_transport = original_transport_timeout raise ScrapliTimeout( "timeout during read in read_callback operation") from exc for callback in callbacks: _run_callback = callback.check(read_output=read_output) if (callback.only_once is True and callback._triggered is True # pylint: disable=W0212 ): self.logger.warning( f"callback {callback.name} matches but is set to 'only_once', " "skipping this callback") continue if _run_callback is True: self.logger.info( f"callback {callback.name} matched, executing") self.timeout_transport = original_transport_timeout callback.run(driver=self) if callback.complete: self.logger.debug( "callback complete is true, done with read_callback" ) return None if callback.reset_output: read_output = b"" return self.read_callback( callbacks=callbacks, initial_input=None, read_output=read_output, read_delay=callback.next_delay, read_timeout=callback.next_timeout, ) time.sleep(_read_delay)