def _build_with_defaults(self, default_type: str = "report-all") -> Element: """ Create with-defaults element for a given operation Args: default_type: enumeration of with-defaults; report-all|trim|explicit|report-all-tagged Returns: Element: lxml with-defaults element to use for netconf operation Raises: CapabilityNotSupported: if default_type provided but not supported by device ScrapliValueError: if default_type is not one of report-all|trim|explicit|report-all-tagged """ if default_type in [ "report-all", "trim", "explicit", "report-all-tagged" ]: if ("urn:ietf:params:netconf:capability:with-defaults:1.0" not in self.server_capabilities): msg = "with-defaults requested, but is not supported by the server" self.logger.exception(msg) raise CapabilityNotSupported(msg) xml_with_defaults_element = etree.fromstring( NetconfBaseOperations.WITH_DEFAULTS_SUBTREE.value.format( default_type=default_type), parser=PARSER, ) else: raise ScrapliValueError( "'default_type' should be one of report-all|trim|explicit|report-all-tagged, " f"got '{default_type}'") return xml_with_defaults_element
def __new__( # pylint: disable=R0914 cls, platform: str, host: str, privilege_levels: Optional[Dict[str, PrivilegeLevel]] = None, default_desired_privilege_level: Optional[str] = None, port: Optional[int] = None, auth_username: Optional[str] = None, auth_password: Optional[str] = None, auth_private_key: Optional[str] = None, auth_private_key_passphrase: Optional[str] = None, auth_strict_key: Optional[bool] = None, auth_bypass: Optional[bool] = None, timeout_socket: Optional[float] = None, timeout_transport: Optional[float] = None, timeout_ops: Optional[float] = None, comms_return_char: Optional[str] = None, ssh_config_file: Optional[Union[str, bool]] = None, ssh_known_hosts_file: Optional[Union[str, bool]] = None, on_init: Optional[Callable[..., Any]] = None, on_open: Optional[Callable[..., Any]] = None, on_close: Optional[Callable[..., Any]] = None, transport: Optional[str] = None, transport_options: Optional[Dict[str, Any]] = None, channel_log: Optional[Union[str, bool, BytesIO]] = None, channel_log_mode: Optional[str] = None, channel_lock: Optional[bool] = None, logging_uid: Optional[str] = None, auth_secondary: Optional[str] = None, failed_when_contains: Optional[List[str]] = None, textfsm_platform: Optional[str] = None, genie_platform: Optional[str] = None, variant: Optional[str] = None, **kwargs: Dict[Any, Any], ) -> "AsyncScrapli": r""" Scrapli Factory method for asynchronous drivers Args: platform: name of the scrapli platform to return a connection object for; should be one of the "core" platforms or a valid community platform name host: host ip/name to connect to port: port to connect to auth_username: username for authentication auth_private_key: path to private key for authentication auth_private_key_passphrase: passphrase for decrypting ssh key if necessary auth_password: password for authentication auth_strict_key: strict host checking or not auth_bypass: bypass "in channel" authentication -- only supported with telnet, asynctelnet, and system transport plugins timeout_socket: timeout for establishing socket/initial connection in seconds timeout_transport: timeout for ssh|telnet transport in seconds timeout_ops: timeout for ssh channel operations comms_return_char: character to use to send returns to host ssh_config_file: string to path for ssh config file, True to use default ssh config file or False to ignore default ssh config file ssh_known_hosts_file: string to path for ssh known hosts file, True to use default known file locations. Only applicable/needed if `auth_strict_key` is set to True on_init: callable that accepts the class instance as its only argument. this callable, if provided, is executed as the last step of object instantiation -- its purpose is primarily to provide a mechanism for scrapli community platforms to have an easy way to modify initialization arguments/object attributes without needing to create a class that extends the driver, instead allowing the community platforms to simply build from the GenericDriver or NetworkDriver classes, and pass this callable to do things such as appending to a username (looking at you RouterOS!!). Note that this is *always* a synchronous function (even for asyncio drivers)! on_open: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately after authentication is completed. Common use cases for this callable would be to disable paging or accept any kind of banner message that prompts a user upon connection on_close: callable that accepts the class instance as its only argument. this callable, if provided, is executed immediately prior to closing the underlying transport. Common use cases for this callable would be to save configurations prior to exiting, or to logout properly to free up vtys or similar transport: name of the transport plugin to use for the actual telnet/ssh/netconf connection. Available "core" transports are: - system - telnet - asynctelnet - ssh2 - paramiko - asyncssh Please see relevant transport plugin section for details. Additionally third party transport plugins may be available. transport_options: dictionary of options to pass to selected transport class; see docs for given transport class for details of what to pass here channel_lock: True/False to lock the channel (threading.Lock/asyncio.Lock) during any channel operations, defaults to False channel_log: True/False or a string path to a file of where to write out channel logs -- these are not "logs" in the normal logging module sense, but only the output that is read from the channel. In other words, the output of the channel log should look similar to what you would see as a human connecting to a device channel_log_mode: "write"|"append", all other values will raise ValueError, does what it sounds like it should by setting the channel log to the provided mode logging_uid: unique identifier (string) to associate to log messages; useful if you have multiple connections to the same device (i.e. one console, one ssh, or one to each supervisor module, etc.) failed_when_contains: list of strings indicating command/config failure textfsm_platform: string to use to fetch ntc-templates templates for textfsm parsing genie_platform: string to use to fetch genie parser templates privilege_levels: optional user provided privilege levels, if left None will default to scrapli standard privilege levels default_desired_privilege_level: string of name of default desired priv, this is the priv level that is generally used to disable paging/set terminal width and things like that upon first login, and is also the priv level scrapli will try to acquire for normal "command" operations (`send_command`, `send_commands`) auth_secondary: password to use for secondary authentication (enable) failed_when_contains: List of strings that indicate a command/config has failed variant: name of the community platform variant if desired **kwargs: should be unused, but here to accept any additional kwargs from users Returns: final_driver: asynchronous driver class for provided driver Raises: ScrapliValueError: if provided transport is asyncio ScrapliTypeError: if `platform` not in keyword arguments """ logger.debug("AsyncScrapli factory initialized") if transport not in ASYNCIO_TRANSPORTS: raise ScrapliValueError("Use 'Scrapli' if using a synchronous transport!") if not isinstance(platform, str): raise ScrapliTypeError(f"Argument 'platform' must be 'str' got '{type(platform)}'") provided_kwargs = _build_provided_kwargs_dict( host=host, port=port, auth_username=auth_username, auth_password=auth_password, auth_private_key=auth_private_key, auth_private_key_passphrase=auth_private_key_passphrase, auth_strict_key=auth_strict_key, auth_bypass=auth_bypass, timeout_socket=timeout_socket, timeout_transport=timeout_transport, timeout_ops=timeout_ops, comms_return_char=comms_return_char, ssh_config_file=ssh_config_file, ssh_known_hosts_file=ssh_known_hosts_file, on_init=on_init, on_open=on_open, on_close=on_close, transport=transport, transport_options=transport_options, channel_log=channel_log, channel_log_mode=channel_log_mode, channel_lock=channel_lock, logging_uid=logging_uid, privilege_levels=privilege_levels, default_desired_privilege_level=default_desired_privilege_level, auth_secondary=auth_secondary, failed_when_contains=failed_when_contains, textfsm_platform=textfsm_platform, genie_platform=genie_platform, **kwargs, ) final_driver, additional_kwargs = cls._get_driver(platform=platform, variant=variant) # at this point will need to merge the additional kwargs in (for community drivers), # ensure that kwargs passed by user supersede the ones coming from community platform if additional_kwargs: final_kwargs = {**additional_kwargs, **provided_kwargs} else: final_kwargs = provided_kwargs final_conn = final_driver(**final_kwargs) # cast the final conn to type Scrapli to appease mypy -- we know it will be a NetworkDriver # or GenericDriver, but thats ok =) final_conn = cast(AsyncScrapli, final_conn) return final_conn
def _pre_commit( self, confirmed: bool = False, timeout: Optional[int] = None, persist: Optional[Union[int, str]] = None, persist_id: Optional[Union[int, str]] = None, ) -> NetconfResponse: """ Handle pre "commit" tasks for consistency between sync/async versions Args: confirmed: whether this is a confirmed commit timeout: specifies the confirm timeout in seconds persist: make the confirmed commit survive a session termination, and set a token on the ongoing confirmed commit persist_id: value must be equal to the value given in the <persist> parameter to the original <commit> operation. Returns: NetconfResponse: scrapli_netconf NetconfResponse object containing all the necessary channel inputs (string and xml) Raises: ScrapliValueError: if persist and persist_id are provided (cannot combine) ScrapliValueError: if confirmed and persist_id are provided (cannot combine) CapabilityNotSupported: if device does not have confirmed-commit capability """ self.logger.debug("Building payload for 'commit' operation") xml_request = self._build_base_elem() xml_commit_element = etree.fromstring( NetconfBaseOperations.COMMIT.value, parser=self.xml_parser) if persist and persist_id: raise ScrapliValueError( "Invalid combination - 'persist' cannot be present with 'persist-id'" ) if confirmed and persist_id: raise ScrapliValueError( "Invalid combination - 'confirmed' cannot be present with 'persist-id'" ) if confirmed or persist_id: if not any(cap in self.server_capabilities for cap in ( "urn:ietf:params:netconf:capability:confirmed-commit:1.0", "urn:ietf:params:netconf:capability:confirmed-commit:1.1", )): msg = "confirmed-commit requested, but is not supported by the server" self.logger.exception(msg) raise CapabilityNotSupported(msg) if confirmed: xml_confirmed_element = etree.fromstring( NetconfBaseOperations.COMMIT_CONFIRMED.value, parser=self.xml_parser) xml_commit_element.append(xml_confirmed_element) if timeout is not None: xml_timeout_element = etree.fromstring( NetconfBaseOperations.COMMIT_CONFIRMED_TIMEOUT.value. format(timeout=timeout), parser=self.xml_parser, ) xml_commit_element.append(xml_timeout_element) if persist is not None: xml_persist_element = etree.fromstring( NetconfBaseOperations.COMMIT_CONFIRMED_PERSIST.value. format(persist=persist), parser=self.xml_parser, ) xml_commit_element.append(xml_persist_element) if persist_id is not None: xml_persist_id_element = etree.fromstring( NetconfBaseOperations.COMMIT_PERSIST_ID.value.format( persist_id=persist_id), parser=self.xml_parser, ) xml_commit_element.append(xml_persist_id_element) xml_request.insert(0, xml_commit_element) channel_input = self._finalize_channel_input(xml_request=xml_request) response = NetconfResponse( host=self.host, channel_input=channel_input.decode(), xml_input=xml_request, netconf_version=self.netconf_version, strip_namespaces=self.strip_namespaces, ) self.logger.debug( f"Built payload for 'commit' operation. Payload: {channel_input.decode()}" ) return response
def _build_filter(self, filter_: str, filter_type: str = "subtree") -> _Element: """ Create filter element for a given rpc The `filter_` string may contain multiple xml elements at its "root" (subtree filters); we will simply place the payload into a temporary "tmp" outer tag so that when we cast it to an etree object the elements are all preserved; without this outer "tmp" tag, lxml will scoop up only the first element provided as it appears to be the root of the document presumably. An example valid (to scrapli netconf at least) xml filter would be: ``` <interface-configurations xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-ifmgr-cfg"> <interface-configuration> <active>act</active> </interface-configuration> </interface-configurations> <netconf-yang xmlns="http://cisco.com/ns/yang/Cisco-IOS-XR-man-netconf-cfg"> </netconf-yang> ``` Args: filter_: strings of filters to build into a filter element or (for subtree) a full filter string (in filter tags) filter_type: type of filter; subtree|xpath Returns: _Element: lxml filter element to use for netconf operation Raises: CapabilityNotSupported: if xpath selected and not supported on server ScrapliValueError: if filter_type is not one of subtree|xpath """ if filter_type == "subtree": # tmp tags to place the users kinda not valid xml filter into _filter_ = f"<tmp>{filter_}</tmp>" # "validate" subtree filter by forcing it into xml, parser "flattens" it as well tmp_xml_filter_element = etree.fromstring(_filter_, parser=self.xml_parser) if tmp_xml_filter_element.getchildren()[0].tag == "filter": # if the user filter was already wrapped in filter tags we'll end up here, we will # blindly reuse the users filter but we'll make sure that the filter "type" is set xml_filter_elem = tmp_xml_filter_element.getchildren()[0] xml_filter_elem.attrib["type"] = "subtree" else: xml_filter_elem = etree.fromstring( NetconfBaseOperations.FILTER_SUBTREE.value.format( filter_type=filter_type), ) # iterate through the children inside the tmp tags and insert *those* elements into # the actual final filter payload for xml_filter_element in tmp_xml_filter_element: # insert the subtree filter into the parent filter element xml_filter_elem.insert(1, xml_filter_element) elif filter_type == "xpath": if "urn:ietf:params:netconf:capability:xpath:1.0" not in self.server_capabilities: msg = "xpath filter requested, but is not supported by the server" self.logger.exception(msg) raise CapabilityNotSupported(msg) xml_filter_elem = etree.fromstring( NetconfBaseOperations.FILTER_XPATH.value.format( filter_type=filter_type, xpath=filter_), parser=self.xml_parser, ) else: raise ScrapliValueError( f"'filter_type' should be one of subtree|xpath, got '{filter_type}'" ) return xml_filter_elem
def spawn( cls: Type[PtyProcessType], spawn_command: List[str], echo: bool = True, rows: int = 80, cols: int = 256, ) -> PtyProcessType: """ Start the given command in a child process in a pseudo terminal. This does all the fork/exec type of stuff for a pty, and returns an instance of PtyProcess. For some devices setting terminal width strictly in the operating system (the actual network operating system) does not seem to be sufficient by itself for setting terminal length or width -- so we have optional values for rows/cols that can be passed here as well. Args: spawn_command: command to execute with arguments (if applicable), as a list echo: enable/disable echo -- defaults to True, should be left as True for "normal" scrapli operations, optionally disable for scrapli_netconf operations. rows: integer number of rows for ptyprocess "window" cols: integer number of cols for ptyprocess "window" Returns: PtyProcessType: instantiated PtyProcess object Raises: ScrapliValueError: if no ssh binary found on PATH Exception: IOError - if unable to set window size of child process Exception: OSError - if unable to spawn command in child process IOError: failing to reset window size exception: if we get an exception decoding output """ # Note that it is difficult for this method to fail. # You cannot detect if the child process cannot start. # So the only way you can tell if the child process started # or not is to try to read from the file descriptor. If you get # EOF immediately then it means that the child is already dead. # That may not necessarily be bad because you may have spawned a child # that performs some task; creates no stdout output; and then dies. import fcntl import pty import resource import termios from pty import CHILD, STDIN_FILENO spawn_executable = which(spawn_command[0]) if spawn_executable is None: raise ScrapliValueError("ssh executable not found!") spawn_command[0] = spawn_executable # [issue #119] To prevent the case where exec fails and the user is # stuck interacting with a python child process instead of whatever # was expected, we implement the solution from # http://stackoverflow.com/a/3703179 to pass the exception to the # parent process # [issue #119] 1. Before forking, open a pipe in the parent process. exec_err_pipe_read, exec_err_pipe_write = os.pipe() pid, fd = pty.fork() # Some platforms must call setwinsize() and setecho() from the # child process, and others from the master process. We do both, # allowing IOError for either. if pid == CHILD: try: _setwinsize(fd=STDIN_FILENO, rows=rows, cols=cols) except IOError as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY): raise # disable echo if requested if echo is False: try: _setecho(STDIN_FILENO, False) except (IOError, termios.error) as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY): raise # [issue #119] 3. The child closes the reading end and sets the # close-on-exec flag for the writing end. os.close(exec_err_pipe_read) fcntl.fcntl(exec_err_pipe_write, fcntl.F_SETFD, fcntl.FD_CLOEXEC) # Do not allow child to inherit open file descriptors from parent, # with the exception of the exec_err_pipe_write of the pipe. # Impose ceiling on max_fd: AIX bugfix for users with unlimited # nofiles where resource.RLIMIT_NOFILE is 2^63-1 and os.closerange() # occasionally raises out of range error max_fd = min(1048576, resource.getrlimit(resource.RLIMIT_NOFILE)[0]) pass_fds = sorted({exec_err_pipe_write}) for pair in zip([2] + pass_fds, pass_fds + [max_fd]): os.closerange(pair[0] + 1, pair[1]) try: os.execv(spawn_executable, spawn_command) except OSError as err: # [issue #119] 5. If exec fails, the child writes the error # code back to the parent using the pipe, then exits. tosend = f"OSError:{err.errno}:{str(err)}".encode() os.write(exec_err_pipe_write, tosend) os.close(exec_err_pipe_write) os._exit(os.EX_OSERR) # Parent inst = cls(pid, fd) # [issue #119] 2. After forking, the parent closes the writing end # of the pipe and reads from the reading end. os.close(exec_err_pipe_write) exec_err_data = os.read(exec_err_pipe_read, 4096) os.close(exec_err_pipe_read) # [issue #119] 6. The parent reads eof (a zero-length read) if the # child successfully performed exec, since close-on-exec made # successful exec close the writing end of the pipe. Or, if exec # failed, the parent reads the error code and can proceed # accordingly. Either way, the parent blocks until the child calls # exec. if len(exec_err_data) != 0: try: errclass, errno_s, errmsg = exec_err_data.split(b":", 2) exctype = getattr(builtins, errclass.decode("ascii"), Exception) exception = exctype(errmsg.decode("utf-8", "replace")) if exctype is OSError: exception.errno = int(errno_s) except Exception: raise Exception("Subprocess failed, got bad error data: %r" % exec_err_data) else: raise exception try: inst.setwinsize(rows=rows, cols=cols) except IOError as err: if err.args[0] not in (errno.EINVAL, errno.ENOTTY, errno.ENXIO): raise return inst
def send_interactive( self, interact_events: List[Tuple[str, str, Optional[bool]]], *, failed_when_contains: Optional[Union[str, List[str]]] = None, privilege_level: str = "", timeout_ops: Optional[float] = None, ) -> Response: """ Interact with a device with changing prompts per input. Used to interact with devices where prompts change per input, and where inputs may be hidden such as in the case of a password input. This can be used to respond to challenges from devices such as the confirmation for the command "clear logging" on IOSXE devices for example. You may have as many elements in the "interact_events" list as needed, and each element of that list should be a tuple of two or three elements. The first element is always the input to send as a string, the second should be the expected response as a string, and the optional third a bool for whether or not the input is "hidden" (i.e. password input) An example where we need this sort of capability: ``` 3560CX#copy flash: scp: Source filename []? test1.txt Address or name of remote host []? 172.31.254.100 Destination username [carl]? Writing test1.txt Password: Password: Sink: C0644 639 test1.txt ! 639 bytes copied in 12.066 secs (53 bytes/sec) 3560CX# ``` To accomplish this we can use the following: ``` interact = conn.channel.send_inputs_interact( [ ("copy flash: scp:", "Source filename []?", False), ("test1.txt", "Address or name of remote host []?", False), ("172.31.254.100", "Destination username [carl]?", False), ("carl", "Password:"******"super_secure_password", prompt, True), ] ) ``` If we needed to deal with more prompts we could simply continue adding tuples to the list of interact "events". Args: interact_events: list of tuples containing the "interactions" with the device each list element must have an input and an expected response, and may have an optional bool for the third and final element -- the optional bool specifies if the input that is sent to the device is "hidden" (ex: password), if the hidden param is not provided it is assumed the input is "normal" (not hidden) failed_when_contains: list of strings that, if present in final output, represent a failed command/interaction privilege_level: ignored in this base class; for LSP reasons for subclasses timeout_ops: timeout ops value for this operation; only sets the timeout_ops value for the duration of the operation, value is reset to initial value after operation is completed. Note that this is the timeout value PER COMMAND sent, not for the total of the commands being sent! Returns: Response: scrapli Response object Raises: ScrapliValueError: if _base_transport_args is None for some reason """ # decorator cares about timeout_ops, but nothing else does, assign to _ to appease linters _ = timeout_ops # privilege level only matters "up" in the network driver layer _ = privilege_level if not self._base_transport_args: # should not happen! :) raise ScrapliValueError( "driver _base_transport_args not set for some reason") response = self._pre_send_interactive( host=self._base_transport_args.host, interact_events=interact_events, failed_when_contains=failed_when_contains, ) raw_response, processed_response = self.channel.send_inputs_interact( interact_events=interact_events) return self._post_send_command(raw_response=raw_response, processed_response=processed_response, response=response)