class Host: def __init__(self, name): self.name = name self.ip = None self.ssh_pkey = None self._ssh = None def __str__(self): return self.name def transport(self): """Returns the ssh transport for this host.""" if self._ssh == None: self._ssh = SSHClient() # import keys using paramikos method self._ssh.load_system_host_keys() # import openssh system host keys host_keys_file = '/etc/ssh/ssh_known_hosts' if os.path.exists(host_keys_file): self._ssh.load_system_host_keys(host_keys_file) # import saved host keys from/for multiapt host_keys_file = os.getenv('HOME','/')+'/.ssh/known_hosts_multiapt' if os.path.exists(host_keys_file): self._ssh.load_host_keys(host_keys_file) # now set our own filename for key save purposes self._ssh._host_keys_filename = host_keys_file # enable our own policy for host key problems self._ssh.set_missing_host_key_policy(SSHAskHostKeyPolicy()) if Main.debug: print 'D: ssh.connect()' self._ssh.connect(self.ip, username=config.remote_username, pkey=self.ssh_pkey) return self._ssh
class Host: def __init__(self, name): self.name = name self.ip = None self.ssh_pkey = None self._ssh = None def __str__(self): return self.name def transport(self): """Returns the ssh transport for this host.""" if self._ssh == None: self._ssh = SSHClient() # import keys using paramikos method self._ssh.load_system_host_keys() # import openssh system host keys host_keys_file = '/etc/ssh/ssh_known_hosts' if os.path.exists(host_keys_file): self._ssh.load_system_host_keys(host_keys_file) # import saved host keys from/for multiapt host_keys_file = os.getenv('HOME', '/') + '/.ssh/known_hosts_multiapt' if os.path.exists(host_keys_file): self._ssh.load_host_keys(host_keys_file) # now set our own filename for key save purposes self._ssh._host_keys_filename = host_keys_file # enable our own policy for host key problems self._ssh.set_missing_host_key_policy(SSHAskHostKeyPolicy()) if Main.debug: print 'D: ssh.connect()' self._ssh.connect(self.ip, username=config.remote_username, pkey=self.ssh_pkey) return self._ssh
class TransferAgent(object): def __init__(self): self.client = SSHClient() self.client.load_system_host_keys() self.sftp_client = None def load_host_keys(self, filepath): self.client.load_host_keys(filepath) def connect(self, host, username=None, port=22, private_key_file=None): self.client.connect(host, username=username, port=port, key_filename=private_key_file) self.sftp_client = self.client.open_sftp() return self def close(self): self.sftp_client.close() self.client.close() def dest_file_exists(self, destpath): try: self.sftp_client.stat(destpath) except: return False return True def transfer(self, srcpath, destdir, callback=None, randomize=True, name_length=7, max_attempts=4): if not randomize: destpath = self.transform_path(srcpath, destdir) return self.do_transfer(srcpath, destpath, callback=callback) for _ in range(max_attempts): name = generate_random_name(name_length) destpath = self.transform_path(srcpath, destdir, rename=name) if self.dest_file_exists(destpath): continue return self.do_transfer(srcpath, destpath, callback=callback) raise TransferError("Exceeded max transfer attempts") def do_transfer(self, src, dest, callback=None): self.sftp_client.put(src, dest, callback=callback) return dest def transform_path(self, srcpath, destdir, rename=None): basename = os.path.basename(srcpath) _, ext = os.path.splitext(basename) if rename: basename = rename + ext return posixpath.join(destdir, basename)
def run_ssh_command(self, command): key = task_key(self.request.id) redis.set(key, "") client = SSHClient() client.set_missing_host_key_policy(AutoAddPolicy) known_hosts = os.path.expanduser('~/.ssh/known_hosts') try: # So that we also save back the new host client.load_host_keys(known_hosts) except FileNotFoundError: if not os.path.exists(os.path.dirname(known_hosts)): os.mkdir(os.path.dirname(known_hosts)) # so connect doesn't barf when trying to save open(known_hosts, "w").write("") if type(command) == list: commands = command else: commands = [command] for c in commands: if os.path.exists(keyfile): pkey = RSAKey.from_private_key_file(keyfile) else: pkey = None client.connect(settings.DOKKU_HOST, port=settings.DOKKU_SSH_PORT, username="******", pkey=pkey, allow_agent=False, look_for_keys=False) transport = client.get_transport() channel = transport.open_session() channel.exec_command(c) while True: anything = False while channel.recv_ready(): data = channel.recv(1024) handle_data(key, data) anything = True while channel.recv_stderr_ready(): data = channel.recv_stderr(1024) handle_data(key, data) anything = True if not anything: if channel.exit_status_ready(): break time.sleep(0.1) return redis.get(key).decode("utf-8")
def connect(self, ssh_spec, client): """ create a connection to a server """ if ssh_spec in self.connections: connection = self.connections[ssh_spec] connection.open(client) return connection username, server, port = re.match( r"ssh://([a-z_][a-z0-9_-]*\${0,1})@([^:]*):{0,1}(\d*){0,1}", ssh_spec).groups() password = keyring.get_password(server, username) if not password: return None ssh_client = SSHClient() ssh_client.load_system_host_keys() local_keys = os.path.expanduser('~/.ssh/known_hosts') if os.path.exists(local_keys): try: ssh_client.load_host_keys(local_keys) except: pass ssh_client.connect(hostname=server, port=int(port if port else 22), username=username, password=password) if self.setup(ssh_client): session, stdin, stdout, stderror = self.start_command( ssh_client, "~/.local/bin/dashboard --server") connection = Connection(self, ssh_client, session, stdin, stdout, stderror) connection.open(client) self.connections[ssh_spec] = connection return connection raise Exception("Setup of remote dashboard failed")
class FDSConnection(): def __init__(self): self.client = SSHClient() self.client.load_host_keys(settings.FDS_HOST_KEY) self.client.connect(settings.FDS_HOST, username=settings.FDS_USER, key_filename=settings.FDS_PRIVATE_KEY, port=settings.FDS_PORT) self.sftp = self.client.open_sftp() log.info("Connected to FDS") def get_files(self): log.info("Receiving files from FDS...") fds_data_path = os.path.join(settings.BASE_DIR, settings.FDS_DATA_PATH) local_files = os.listdir(fds_data_path) self.sftp.chdir('yellow-net-reports') for file in self.sftp.listdir(): if file not in local_files and file + '.processed' not in local_files: log.info("Receiving {}".format(file)) self.sftp.get(file, os.path.join(fds_data_path, file)) # self.sftp.remove(file) else: log.debug("Skipping already present file: {}".format(file))
def handle(self, *labels, **options): init_logging(logger, 3) try: # Make sure the directory and the file exist dirname = os.path.dirname(settings.SSH_KNOWN_HOSTS) os.makedirs(dirname, exist_ok=True) open(settings.SSH_KNOWN_HOSTS, 'a') client = SSHClient() client.load_host_keys(settings.SSH_KNOWN_HOSTS) client.set_missing_host_key_policy(AutoAddPolicy()) client.set_log_channel('paramiko') # Filter Paramiko message to only show host key messages logging.getLogger('paramiko').addFilter(ParamikoLogFilter()) hostnames = [ settings.V4_HOST, settings.V6_HOST, settings.NAT64_HOST ] for hostname in hostnames: logger.info("Connecting to {}".format(hostname)) client.connect(hostname, key_filename=settings.SSH_PRIVATE_KEY, allow_agent=False, look_for_keys=False) stdin, stdout, stderr = client.exec_command('phantomjs --version') stdout_lines = stdout.readlines() if stdout_lines: logger.info("PhantomJS version: {}".format(stdout_lines[0].strip())) else: logger.error(''.join([line.strip() for line in stderr.readlines()][:1])) client.close() except Exception as e: logger.critical(str(e))
#!/usr/bin/env python # -*- coding: utf-8 -*- "Not used" import os import time from paramiko.client import SSHClient, AutoAddPolicy, socket hosts_filename = os.path.expanduser("~/.ssh/paramiko_known_hosts") #print hosts_filename client = SSHClient() if os.path.isfile(hosts_filename): client.load_host_keys(hosts_filename) #'~/.ssh/known_hosts' client.set_missing_host_key_policy(AutoAddPolicy()) print client.get_host_keys() client.connect('localhost', 2200, username='******', password='******') client.save_host_keys(hosts_filename) #print client.get_host_keys() channel = client.invoke_shell() channel.settimeout(0) #time.sleep(1) def datas(data=''): while True: try: res = channel.recv(10000) if len(res) == 0: exit(0) data += res
def run_browser_tests(self): common_options = [ 'phantomjs', '--debug=true', '--ignore-ssl-errors=true', '--local-url-access=false', '--local-storage-path=/dev/null', '--offline-storage-path=/dev/null', '/dev/stdin', ] browser_command = ' '.join(common_options + [shlex.quote(self.idna_url)]) # Read the private key private_key = RSAKey.from_private_key_file(settings.SSH_PRIVATE_KEY) # Do the v4-only, v6-only and the NAT64 request in parallel v4only_client = SSHClient() v4only_client.load_host_keys(settings.SSH_KNOWN_HOSTS) v4only_client.connect(settings.V4_HOST, username=settings.SSH_USERNAME, pkey=private_key, allow_agent=False, look_for_keys=False) logger.debug("Running '{}' on {}".format(browser_command, settings.V4_HOST)) v4only_stdin, v4only_stdout, v4only_stderr = v4only_client.exec_command( browser_command, timeout=120) if self.ipv6_dns_results: v6only_client = SSHClient() v6only_client.load_host_keys(settings.SSH_KNOWN_HOSTS) v6only_client.connect(settings.V6_HOST, username=settings.SSH_USERNAME, pkey=private_key, allow_agent=False, look_for_keys=False) logger.debug("Running '{}' on {}".format(browser_command, settings.V4_HOST)) v6only_stdin, v6only_stdout, v6only_stderr = v6only_client.exec_command( browser_command, timeout=120) else: v6only_client = v6only_stdin = v6only_stdout = v6only_stderr = None nat64_client = SSHClient() nat64_client.load_host_keys(settings.SSH_KNOWN_HOSTS) nat64_client.connect(settings.NAT64_HOST, username=settings.SSH_USERNAME, pkey=private_key, allow_agent=False, look_for_keys=False) logger.debug("Running '{}' on {}".format(browser_command, settings.V4_HOST)) nat64_stdin, nat64_stdout, nat64_stderr = nat64_client.exec_command( browser_command, timeout=120) # Placeholders v4only_img = None v4only_img_bytes = None v6only_img = None v6only_img_bytes = None nat64_img = None nat64_img_bytes = None self.v4only_data = {} self.v6only_data = {} self.nat64_data = {} # Push the test script to the workers script_filename = os.path.realpath( os.path.join(os.path.dirname(__file__), 'render_page.js')) script = open(script_filename, 'rb').read() v4only_stdin.write(script) v4only_stdin.close() v4only_stdin.channel.shutdown_write() if v6only_client: v6only_stdin.write(script) v6only_stdin.close() v6only_stdin.channel.shutdown_write() nat64_stdin.write(script) nat64_stdin.close() nat64_stdin.channel.shutdown_write() # Wait for tests to finish try: logger.debug("Receiving data from IPv4-only test") v4only_json = v4only_stdout.read() v4only_debug = v4only_stderr.read() v4only_exit = v4only_stdout.channel.recv_exit_status() self.v4only_data = json.loads( v4only_json.decode('utf-8'), object_pairs_hook=OrderedDict) if v4only_json else {} self.v4only_debug = v4only_debug.decode('utf-8') self.v4only_data['exit_code'] = v4only_exit if 'image' in self.v4only_data: if self.v4only_data['image']: v4only_img_bytes = base64.decodebytes( self.v4only_data['image'].encode('ascii')) # noinspection PyTypeChecker v4only_img = skimage.io.imread( io.BytesIO(v4only_img_bytes)) del self.v4only_data['image'] except socket.timeout: logger.error("{}: IPv4-only load timed out".format(self.url)) if v6only_client: try: logger.debug("Receiving data from IPv6-only test") v6only_json = v6only_stdout.read() v6only_debug = v6only_stderr.read() v6only_exit = v6only_stdout.channel.recv_exit_status() self.v6only_data = json.loads( v6only_json.decode('utf-8'), object_pairs_hook=OrderedDict) if v6only_json else {} self.v6only_debug = v6only_debug.decode('utf-8') self.v6only_data['exit_code'] = v6only_exit if 'image' in self.v6only_data: if self.v6only_data['image']: v6only_img_bytes = base64.decodebytes( self.v6only_data['image'].encode('ascii')) # noinspection PyTypeChecker v6only_img = skimage.io.imread( io.BytesIO(v6only_img_bytes)) del self.v6only_data['image'] except socket.timeout: logger.error("{}: IPv6-only load timed out".format(self.url)) else: logger.info("{}: Not running IPv6-only test".format(self.url)) try: logger.debug("Receiving data from NAT64 test") nat64_json = nat64_stdout.read() nat64_debug = nat64_stderr.read() nat64_exit = nat64_stdout.channel.recv_exit_status() self.nat64_data = json.loads( nat64_json.decode('utf-8'), object_pairs_hook=OrderedDict) if nat64_json else {} self.nat64_debug = nat64_debug.decode('utf-8') self.nat64_data['exit_code'] = nat64_exit if 'image' in self.nat64_data: if self.nat64_data['image']: nat64_img_bytes = base64.decodebytes( self.nat64_data['image'].encode('ascii')) # noinspection PyTypeChecker nat64_img = skimage.io.imread(io.BytesIO(nat64_img_bytes)) del self.nat64_data['image'] except socket.timeout: logger.error("{}: NAT64 load timed out".format(self.url)) # Done talking to workers, close connections if v4only_client: v4only_client.close() if v6only_client: v6only_client.close() if nat64_client: nat64_client.close() # Calculate score based on resources v4only_resources_ok = self.v4only_resources[0] if v4only_resources_ok > 0: self.v6only_resource_score = min( self.v6only_resources[0] / v4only_resources_ok, 1) logger.info("{}: IPv6-only Resource Score = {:0.2f}".format( self.url, self.v6only_resource_score)) self.nat64_resource_score = min( self.nat64_resources[0] / v4only_resources_ok, 1) logger.info("{}: NAT64 Resource Score = {:0.2f}".format( self.url, self.nat64_resource_score)) else: logger.error( "{}: did not load over IPv4-only, unable to perform resource test" .format(self.url)) return_value = 0 if v4only_img_bytes: # Store the image self.v4only_image.save('v4.png', ContentFile(v4only_img_bytes), save=False) else: return_value |= 1 if v6only_img_bytes: # Store the image self.v6only_image.save('v6.png', ContentFile(v6only_img_bytes), save=False) else: return_value |= 2 if nat64_img_bytes: # Store the image self.nat64_image.save('nat64.png', ContentFile(nat64_img_bytes), save=False) else: return_value |= 4 if v4only_img is not None: logger.debug("{}: Loading IPv4-only screenshot".format(self.url)) if v6only_img is not None: logger.debug("{}: Loading IPv6-only screenshot".format( self.url)) # Suppress stupid warnings with warnings.catch_warnings(record=True): self.v6only_image_score = compare_ssim(v4only_img, v6only_img, multichannel=True) logger.info("{}: IPv6-only Image Score = {:0.2f}".format( self.url, self.v6only_image_score)) else: logger.warning( "{}: did not load over IPv6-only, 0 score".format( self.url)) self.v6only_image_score = 0.0 if nat64_img is not None: logger.debug("{}: Loading NAT64 screenshot".format(self.url)) # Suppress stupid warnings with warnings.catch_warnings(record=True): self.nat64_image_score = compare_ssim(v4only_img, nat64_img, multichannel=True) logger.info("{}: NAT64 Image Score = {:0.2f}".format( self.url, self.nat64_image_score)) else: logger.warning("{}: did not load over NAT64, 0 score".format( self.url)) self.nat64_image_score = 0.0 else: logger.error( "{}: did not load over IPv4-only, unable to perform image test" .format(self.url)) self.save() return return_value
class Connection(Context): """ A connection to an SSH daemon, with methods for commands and file transfer. **Basics** This class inherits from Invoke's `~invoke.context.Context`, as it is a context within which commands, tasks etc can operate. It also encapsulates a Paramiko `~paramiko.client.SSHClient` instance, performing useful high level operations with that `~paramiko.client.SSHClient` and `~paramiko.channel.Channel` instances generated from it. .. _connect_kwargs: .. note:: Many SSH specific options -- such as specifying private keys and passphrases, timeouts, disabling SSH agents, etc -- are handled directly by Paramiko and should be specified via the :ref:`connect_kwargs argument <connect_kwargs-arg>` of the constructor. **Lifecycle** `.Connection` has a basic "`create <__init__>`, `connect/open <open>`, `do work <run>`, `disconnect/close <close>`" lifecycle: - `Instantiation <__init__>` imprints the object with its connection parameters (but does **not** actually initiate the network connection). - An alternate constructor exists for users :ref:`upgrading piecemeal from Fabric 1 <from-v1>`: `from_v1` - Methods like `run`, `get` etc automatically trigger a call to `open` if the connection is not active; users may of course call `open` manually if desired. - Connections do not always need to be explicitly closed; much of the time, Paramiko's garbage collection hooks or Python's own shutdown sequence will take care of things. **However**, should you encounter edge cases (for example, sessions hanging on exit) it's helpful to explicitly close connections when you're done with them. This can be accomplished by manually calling `close`, or by using the object as a contextmanager:: with Connection('host') as c: c.run('command') c.put('file') .. note:: This class rebinds `invoke.context.Context.run` to `.local` so both remote and local command execution can coexist. **Configuration** Most `.Connection` parameters honor :doc:`Invoke-style configuration </concepts/configuration>` as well as any applicable :ref:`SSH config file directives <connection-ssh-config>`. For example, to end up with a connection to ``admin@myhost``, one could: - Use any built-in config mechanism, such as ``/etc/fabric.yml``, ``~/.fabric.json``, collection-driven configuration, env vars, etc, stating ``user: admin`` (or ``{"user": "******"}``, depending on config format.) Then ``Connection('myhost')`` would implicitly have a ``user`` of ``admin``. - Use an SSH config file containing ``User admin`` within any applicable ``Host`` header (``Host myhost``, ``Host *``, etc.) Again, ``Connection('myhost')`` will default to an ``admin`` user. - Leverage host-parameter shorthand (described in `.Config.__init__`), i.e. ``Connection('admin@myhost')``. - Give the parameter directly: ``Connection('myhost', user='******')``. The same applies to agent forwarding, gateways, and so forth. .. versionadded:: 2.0 """ # NOTE: these are initialized here to hint to invoke.Config.__setattr__ # that they should be treated as real attributes instead of config proxies. # (Additionally, we're doing this instead of using invoke.Config._set() so # we can take advantage of Sphinx's attribute-doc-comment static analysis.) # Once an instance is created, these values will usually be non-None # because they default to the default config values. host = None original_host = None user = None port = None ssh_config = None gateway = None forward_agent = None connect_timeout = None connect_kwargs = None client = None transport = None _sftp = None _agent_handler = None default_host_key_policy = AutoAddPolicy @classmethod def from_v1(cls, env, **kwargs): """ Alternate constructor which uses Fabric 1's ``env`` dict for settings. All keyword arguments besides ``env`` are passed unmolested into the primary constructor. .. warning:: Because your own config overrides will win over data from ``env``, make sure you only set values you *intend* to change from your v1 environment! For details on exactly which ``env`` vars are imported and what they become in the new API, please see :ref:`v1-env-var-imports`. :param env: An explicit Fabric 1 ``env`` dict (technically, any ``fabric.utils._AttributeDict`` instance should work) to pull configuration from. .. versionadded:: 2.4 """ # TODO: import fabric.state.env (need good way to test it first...) # TODO: how to handle somebody accidentally calling this in a process # where 'fabric' is fabric 2, and there's no fabric 1? Probably just a # re-raise of ImportError?? # Our only requirement is a non-empty host_string if not env.host_string: raise InvalidV1Env( "Supplied v1 env has an empty `host_string` value! Please make sure you're calling Connection.from_v1 within a connected Fabric 1 session." # noqa ) # TODO: detect collisions with kwargs & except instead of overwriting? # (More Zen of Python compliant, but also, effort, and also, makes it # harder for users to intentionally overwrite!) connect_kwargs = kwargs.setdefault("connect_kwargs", {}) kwargs.setdefault("host", env.host_string) shorthand = derive_shorthand(env.host_string) # TODO: don't we need to do the below skipping for user too? kwargs.setdefault("user", env.user) # Skip port if host string seemed to have it; otherwise we hit our own # ambiguity clause in __init__. v1 would also have been doing this # anyways (host string wins over other settings). if not shorthand["port"]: # Run port through int(); v1 inexplicably has a string default... kwargs.setdefault("port", int(env.port)) # key_filename defaults to None in v1, but in v2, we expect it to be # either unset, or set to a list. Thus, we only pull it over if it is # not None. if env.key_filename is not None: connect_kwargs.setdefault("key_filename", env.key_filename) # Obtain config values, if not given, from its own from_v1 # NOTE: not using setdefault as we truly only want to call # Config.from_v1 when necessary. if "config" not in kwargs: kwargs["config"] = Config.from_v1(env) return cls(**kwargs) # TODO: should "reopening" an existing Connection object that has been # closed, be allowed? (See e.g. how v1 detects closed/semi-closed # connections & nukes them before creating a new client to the same host.) # TODO: push some of this into paramiko.client.Client? e.g. expand what # Client.exec_command does, it already allows configuring a subset of what # we do / will eventually do / did in 1.x. It's silly to have to do # .get_transport().open_session(). def __init__( self, host, user=None, port=None, config=None, gateway=None, forward_agent=None, connect_timeout=None, connect_kwargs=None, inline_ssh_env=None, ): """ Set up a new object representing a server connection. :param str host: the hostname (or IP address) of this connection. May include shorthand for the ``user`` and/or ``port`` parameters, of the form ``user@host``, ``host:port``, or ``user@host:port``. .. note:: Due to ambiguity, IPv6 host addresses are incompatible with the ``host:port`` shorthand (though ``user@host`` will still work OK). In other words, the presence of >1 ``:`` character will prevent any attempt to derive a shorthand port number; use the explicit ``port`` parameter instead. .. note:: If ``host`` matches a ``Host`` clause in loaded SSH config data, and that ``Host`` clause contains a ``Hostname`` directive, the resulting `.Connection` object will behave as if ``host`` is equal to that ``Hostname`` value. In all cases, the original value of ``host`` is preserved as the ``original_host`` attribute. Thus, given SSH config like so:: Host myalias Hostname realhostname a call like ``Connection(host='myalias')`` will result in an object whose ``host`` attribute is ``realhostname``, and whose ``original_host`` attribute is ``myalias``. :param str user: the login user for the remote connection. Defaults to ``config.user``. :param int port: the remote port. Defaults to ``config.port``. :param config: configuration settings to use when executing methods on this `.Connection` (e.g. default SSH port and so forth). Should be a `.Config` or an `invoke.config.Config` (which will be turned into a `.Config`). Default is an anonymous `.Config` object. :param gateway: An object to use as a proxy or gateway for this connection. This parameter accepts one of the following: - another `.Connection` (for a ``ProxyJump`` style gateway); - a shell command string (for a ``ProxyCommand`` style style gateway). Default: ``None``, meaning no gatewaying will occur (unless otherwise configured; if one wants to override a configured gateway at runtime, specify ``gateway=False``.) .. seealso:: :ref:`ssh-gateways` :param bool forward_agent: Whether to enable SSH agent forwarding. Default: ``config.forward_agent``. :param int connect_timeout: Connection timeout, in seconds. Default: ``config.timeouts.connect``. .. _connect_kwargs-arg: :param dict connect_kwargs: Keyword arguments handed verbatim to `SSHClient.connect <paramiko.client.SSHClient.connect>` (when `.open` is called). `.Connection` tries not to grow additional settings/kwargs of its own unless it is adding value of some kind; thus, ``connect_kwargs`` is currently the right place to hand in paramiko connection parameters such as ``pkey`` or ``key_filename``. For example:: c = Connection( host="hostname", user="******", connect_kwargs={ "key_filename": "/home/myuser/.ssh/private.key", }, ) Default: ``config.connect_kwargs``. :param bool inline_ssh_env: Whether to send environment variables "inline" as prefixes in front of command strings (``export VARNAME=value && mycommand here``), instead of trying to submit them through the SSH protocol itself (which is the default behavior). This is necessary if the remote server has a restricted ``AcceptEnv`` setting (which is the common default). The default value is the value of the ``inline_ssh_env`` :ref:`configuration value <default-values>` (which itself defaults to ``False``). .. warning:: This functionality does **not** currently perform any shell escaping on your behalf! Be careful when using nontrivial values, and note that you can put in your own quoting, backslashing etc if desired. Consider using a different approach (such as actual remote shell scripts) if you run into too many issues here. .. note:: When serializing into prefixed ``FOO=bar`` format, we apply the builtin `sorted` function to the env dictionary's keys, to remove what would otherwise be ambiguous/arbitrary ordering. .. note:: This setting has no bearing on *local* shell commands; it only affects remote commands, and thus, methods like `.run` and `.sudo`. :raises ValueError: if user or port values are given via both ``host`` shorthand *and* their own arguments. (We `refuse the temptation to guess`_). .. _refuse the temptation to guess: http://zen-of-python.info/ in-the-face-of-ambiguity-refuse-the-temptation-to-guess.html#12 .. versionchanged:: 2.3 Added the ``inline_ssh_env`` parameter. """ # NOTE: parent __init__ sets self._config; for now we simply overwrite # that below. If it's somehow problematic we would want to break parent # __init__ up in a manner that is more cleanly overrideable. super(Connection, self).__init__(config=config) #: The .Config object referenced when handling default values (for e.g. #: user or port, when not explicitly given) or deciding how to behave. if config is None: config = Config() # Handle 'vanilla' Invoke config objects, which need cloning 'into' one # of our own Configs (which grants the new defaults, etc, while not # squashing them if the Invoke-level config already accounted for them) elif not isinstance(config, Config): config = config.clone(into=Config) self._set(_config=config) # TODO: when/how to run load_files, merge, load_shell_env, etc? # TODO: i.e. what is the lib use case here (and honestly in invoke too) shorthand = self.derive_shorthand(host) host = shorthand["host"] err = "You supplied the {} via both shorthand and kwarg! Please pick one." # noqa if shorthand["user"] is not None: if user is not None: raise ValueError(err.format("user")) user = shorthand["user"] if shorthand["port"] is not None: if port is not None: raise ValueError(err.format("port")) port = shorthand["port"] # NOTE: we load SSH config data as early as possible as it has # potential to affect nearly every other attribute. #: The per-host SSH config data, if any. (See :ref:`ssh-config`.) self.ssh_config = self.config.base_ssh_config.lookup(host) self.original_host = host #: The hostname of the target server. self.host = host if "hostname" in self.ssh_config: # TODO: log that this occurred? self.host = self.ssh_config["hostname"] #: The username this connection will use to connect to the remote end. self.user = user or self.ssh_config.get("user", self.config.user) # TODO: is it _ever_ possible to give an empty user value (e.g. # user='')? E.g. do some SSH server specs allow for that? #: The network port to connect on. self.port = port or int(self.ssh_config.get("port", self.config.port)) # Gateway/proxy/bastion/jump setting: non-None values - string, # Connection, even eg False - get set directly; None triggers seek in # config/ssh_config #: The gateway `.Connection` or ``ProxyCommand`` string to be used, #: if any. self.gateway = gateway if gateway is not None else self.get_gateway() # NOTE: we use string above, vs ProxyCommand obj, to avoid spinning up # the ProxyCommand subprocess at init time, vs open() time. # TODO: make paramiko.proxy.ProxyCommand lazy instead? if forward_agent is None: # Default to config... forward_agent = self.config.forward_agent # But if ssh_config is present, it wins if "forwardagent" in self.ssh_config: # TODO: SSHConfig really, seriously needs some love here, god map_ = {"yes": True, "no": False} forward_agent = map_[self.ssh_config["forwardagent"]] #: Whether agent forwarding is enabled. self.forward_agent = forward_agent if connect_timeout is None: connect_timeout = self.ssh_config.get("connecttimeout", self.config.timeouts.connect) if connect_timeout is not None: connect_timeout = int(connect_timeout) #: Connection timeout self.connect_timeout = connect_timeout #: Keyword arguments given to `paramiko.client.SSHClient.connect` when #: `open` is called. self.connect_kwargs = self.resolve_connect_kwargs(connect_kwargs) #: The `paramiko.client.SSHClient` instance this connection wraps. self.client = SSHClient() self.setup_ssh_client() #: A convenience handle onto the return value of #: ``self.client.get_transport()``. self.transport = None if inline_ssh_env is None: inline_ssh_env = self.config.inline_ssh_env #: Whether to construct remote command lines with env vars prefixed #: inline. self.inline_ssh_env = inline_ssh_env def setup_ssh_client(self): if self.default_host_key_policy is not None: logging.debug("host key policy: %s", self.default_host_key_policy) self.client.set_missing_host_key_policy( self.default_host_key_policy()) known_hosts = self.ssh_config.get("UserKnownHostsFile".lower(), "~/.ssh/known_hosts") logging.debug("loading host keys from %s", known_hosts) # multiple keys, seperated by whitespace, can be provided for filename in [os.path.expanduser(f) for f in known_hosts.split()]: if os.path.exists(filename): self.client.load_host_keys(filename) def resolve_connect_kwargs(self, connect_kwargs): # Grab connect_kwargs from config if not explicitly given. if connect_kwargs is None: # TODO: is it better to pre-empt conflicts w/ manually-handled # connect() kwargs (hostname, username, etc) here or in open()? # We're doing open() for now in case e.g. someone manually modifies # .connect_kwargs attributewise, but otherwise it feels better to # do it early instead of late. connect_kwargs = self.config.connect_kwargs # Special case: key_filename gets merged instead of overridden. # TODO: probably want some sorta smart merging generally, special cases # are bad. elif "key_filename" in self.config.connect_kwargs: kwarg_val = connect_kwargs.get("key_filename", []) conf_val = self.config.connect_kwargs["key_filename"] # Config value comes before kwarg value (because it may contain # CLI flag value.) connect_kwargs["key_filename"] = conf_val + kwarg_val # SSH config identityfile values come last in the key_filename # 'hierarchy'. if "identityfile" in self.ssh_config: connect_kwargs.setdefault("key_filename", []) connect_kwargs["key_filename"].extend( self.ssh_config["identityfile"]) return connect_kwargs def get_gateway(self): # SSH config wins over Invoke-style config if "proxyjump" in self.ssh_config: # Reverse hop1,hop2,hop3 style ProxyJump directive so we start # with the final (itself non-gatewayed) hop and work up to # the front (actual, supplied as our own gateway) hop hops = reversed(self.ssh_config["proxyjump"].split(",")) prev_gw = None for hop in hops: # Short-circuit if we appear to be our own proxy, which would # be a RecursionError. Implies SSH config wildcards. # TODO: in an ideal world we'd check user/port too in case they # differ, but...seriously? They can file a PR with those extra # half dozen test cases in play, E_NOTIME if self.derive_shorthand(hop)["host"] == self.host: return None # Happily, ProxyJump uses identical format to our host # shorthand... kwargs = dict(config=self.config.clone()) if prev_gw is not None: kwargs["gateway"] = prev_gw cxn = Connection(hop, **kwargs) prev_gw = cxn return prev_gw elif "proxycommand" in self.ssh_config: # Just a string, which we interpret as a proxy command.. return self.ssh_config["proxycommand"] # Fallback: config value (may be None). return self.config.gateway def __repr__(self): # Host comes first as it's the most common differentiator by far bits = [("host", self.host)] # TODO: maybe always show user regardless? Explicit is good... if self.user != self.config.user: bits.append(("user", self.user)) # TODO: harder to make case for 'always show port'; maybe if it's # non-22 (even if config has overridden the local default)? if self.port != self.config.port: bits.append(("port", self.port)) # NOTE: sometimes self.gateway may be eg False if someone wants to # explicitly override a configured non-None value (as otherwise it's # impossible for __init__ to tell if a None means "nothing given" or # "seriously please no gatewaying". So, this must always be a vanilla # truth test and not eg "is not None". if self.gateway: # Displaying type because gw params would probs be too verbose val = "proxyjump" if isinstance(self.gateway, string_types): val = "proxycommand" bits.append(("gw", val)) return "<Connection {}>".format(" ".join("{}={}".format(*x) for x in bits)) def _identity(self): # TODO: consider including gateway and maybe even other init kwargs? # Whether two cxns w/ same user/host/port but different # gateway/keys/etc, should be considered "the same", is unclear. return (self.host, self.user, self.port) def __eq__(self, other): if not isinstance(other, Connection): return False return self._identity() == other._identity() def __lt__(self, other): return self._identity() < other._identity() def __hash__(self): # NOTE: this departs from Context/DataProxy, which is not usefully # hashable. return hash(self._identity()) def derive_shorthand(self, host_string): # NOTE: used to be defined inline; preserving API call for both # backwards compatibility and because it seems plausible we may want to # modify behavior later, using eg config or other attributes. return derive_shorthand(host_string) @property def is_connected(self): """ Whether or not this connection is actually open. .. versionadded:: 2.0 """ return self.transport.active if self.transport else False def open(self): """ Initiate an SSH connection to the host/port this object is bound to. This may include activating the configured gateway connection, if one is set. Also saves a handle to the now-set Transport object for easier access. Various connect-time settings (and/or their corresponding :ref:`SSH config options <ssh-config>`) are utilized here in the call to `SSHClient.connect <paramiko.client.SSHClient.connect>`. (For details, see :doc:`the configuration docs </concepts/configuration>`.) .. versionadded:: 2.0 """ # Short-circuit if self.is_connected: return err = "Refusing to be ambiguous: connect() kwarg '{}' was given both via regular arg and via connect_kwargs!" # noqa # These may not be given, period for key in """ hostname port username """.split(): if key in self.connect_kwargs: raise ValueError(err.format(key)) # These may be given one way or the other, but not both if ("timeout" in self.connect_kwargs and self.connect_timeout is not None): raise ValueError(err.format("timeout")) # No conflicts -> merge 'em together kwargs = dict( self.connect_kwargs, username=self.user, hostname=self.host, port=self.port, ) if self.gateway: kwargs["sock"] = self.open_gateway() if self.connect_timeout: kwargs["timeout"] = self.connect_timeout # Strip out empty defaults for less noisy debugging if "key_filename" in kwargs and not kwargs["key_filename"]: del kwargs["key_filename"] # Actually connect! self.client.connect(**kwargs) self.transport = self.client.get_transport() def open_gateway(self): """ Obtain a socket-like object from `gateway`. :returns: A ``direct-tcpip`` `paramiko.channel.Channel`, if `gateway` was a `.Connection`; or a `~paramiko.proxy.ProxyCommand`, if `gateway` was a string. .. versionadded:: 2.0 """ # ProxyCommand is faster to set up, so do it first. if isinstance(self.gateway, string_types): # Leverage a dummy SSHConfig to ensure %h/%p/etc are parsed. # TODO: use real SSH config once loading one properly is # implemented. ssh_conf = SSHConfig() dummy = "Host {}\n ProxyCommand {}" ssh_conf.parse(StringIO(dummy.format(self.host, self.gateway))) return ProxyCommand(ssh_conf.lookup(self.host)["proxycommand"]) # Handle inner-Connection gateway type here. # TODO: logging self.gateway.open() # TODO: expose the opened channel itself as an attribute? (another # possible argument for separating the two gateway types...) e.g. if # someone wanted to piggyback on it for other same-interpreter socket # needs... # TODO: and the inverse? allow users to supply their own socket/like # object they got via $WHEREEVER? # TODO: how best to expose timeout param? reuse general connection # timeout from config? return self.gateway.transport.open_channel( kind="direct-tcpip", dest_addr=(self.host, int(self.port)), # NOTE: src_addr needs to be 'empty but not None' values to # correctly encode into a network message. Theoretically Paramiko # could auto-interpret None sometime & save us the trouble. src_addr=("", 0), ) def close(self): """ Terminate the network connection to the remote end, if open. If no connection is open, this method does nothing. .. versionadded:: 2.0 """ if self.is_connected: self.client.close() if self.forward_agent and self._agent_handler is not None: self._agent_handler.close() def __enter__(self): return self def __exit__(self, *exc): self.close() @opens def create_session(self): channel = self.transport.open_session() if self.forward_agent: self._agent_handler = AgentRequestHandler(channel) return channel def _remote_runner(self): return self.config.runners.remote(self, inline_env=self.inline_ssh_env) @opens def run(self, command, **kwargs): """ Execute a shell command on the remote end of this connection. This method wraps an SSH-capable implementation of `invoke.runners.Runner.run`; see its documentation for details. .. warning:: There are a few spots where Fabric departs from Invoke's default settings/behaviors; they are documented under `.Config.global_defaults`. .. versionadded:: 2.0 """ return self._run(self._remote_runner(), command, **kwargs) @opens def sudo(self, command, **kwargs): """ Execute a shell command, via ``sudo``, on the remote end. This method is identical to `invoke.context.Context.sudo` in every way, except in that -- like `run` -- it honors per-host/per-connection configuration overrides in addition to the generic/global ones. Thus, for example, per-host sudo passwords may be configured. .. versionadded:: 2.0 """ return self._sudo(self._remote_runner(), command, **kwargs) def local(self, *args, **kwargs): """ Execute a shell command on the local system. This method is effectively a wrapper of `invoke.run`; see its docs for details and call signature. .. versionadded:: 2.0 """ # Superclass run() uses runners.local, so we can literally just call it # straight. return super(Connection, self).run(*args, **kwargs) @opens def sftp(self): """ Return a `~paramiko.sftp_client.SFTPClient` object. If called more than one time, memoizes the first result; thus, any given `.Connection` instance will only ever have a single SFTP client, and state (such as that managed by `~paramiko.sftp_client.SFTPClient.chdir`) will be preserved. .. versionadded:: 2.0 """ if self._sftp is None: self._sftp = self.client.open_sftp() return self._sftp def get(self, *args, **kwargs): """ Get a remote file to the local filesystem or file-like object. Simply a wrapper for `.Transfer.get`. Please see its documentation for all details. .. versionadded:: 2.0 """ return Transfer(self).get(*args, **kwargs) def put(self, *args, **kwargs): """ Put a remote file (or file-like object) to the remote filesystem. Simply a wrapper for `.Transfer.put`. Please see its documentation for all details. .. versionadded:: 2.0 """ return Transfer(self).put(*args, **kwargs) # TODO: yield the socket for advanced users? Other advanced use cases # (perhaps factor out socket creation itself)? # TODO: probably push some of this down into Paramiko @contextmanager @opens def forward_local( self, local_port, remote_port=None, remote_host="localhost", local_host="localhost", ): """ Open a tunnel connecting ``local_port`` to the server's environment. For example, say you want to connect to a remote PostgreSQL database which is locked down and only accessible via the system it's running on. You have SSH access to this server, so you can temporarily make port 5432 on your local system act like port 5432 on the server:: import psycopg2 from fabric import Connection with Connection('my-db-server').forward_local(5432): db = psycopg2.connect( host='localhost', port=5432, database='mydb' ) # Do things with 'db' here This method is analogous to using the ``-L`` option of OpenSSH's ``ssh`` program. :param int local_port: The local port number on which to listen. :param int remote_port: The remote port number. Defaults to the same value as ``local_port``. :param str local_host: The local hostname/interface on which to listen. Default: ``localhost``. :param str remote_host: The remote hostname serving the forwarded remote port. Default: ``localhost`` (i.e., the host this `.Connection` is connected to.) :returns: Nothing; this method is only useful as a context manager affecting local operating system state. .. versionadded:: 2.0 """ if not remote_port: remote_port = local_port # TunnelManager does all of the work, sitting in the background (so we # can yield) and spawning threads every time somebody connects to our # local port. finished = Event() manager = TunnelManager( local_port=local_port, local_host=local_host, remote_port=remote_port, remote_host=remote_host, # TODO: not a huge fan of handing in our transport, but...? transport=self.transport, finished=finished, ) manager.start() # Return control to caller now that things ought to be operational try: yield # Teardown once user exits block finally: # Signal to manager that it should close all open tunnels finished.set() # Then wait for it to do so manager.join() # Raise threading errors from within the manager, which would be # one of: # - an inner ThreadException, which was created by the manager on # behalf of its Tunnels; this gets directly raised. # - some other exception, which would thus have occurred in the # manager itself; we wrap this in a new ThreadException. # NOTE: in these cases, some of the metadata tracking in # ExceptionHandlingThread/ExceptionWrapper/ThreadException (which # is useful when dealing with multiple nearly-identical sibling IO # threads) is superfluous, but it doesn't feel worth breaking # things up further; we just ignore it for now. wrapper = manager.exception() if wrapper is not None: if wrapper.type is ThreadException: raise wrapper.value else: raise ThreadException([wrapper]) # TODO: cancel port forward on transport? Does that even make sense # here (where we used direct-tcpip) vs the opposite method (which # is what uses forward-tcpip)? # TODO: probably push some of this down into Paramiko @contextmanager @opens def forward_remote( self, remote_port, local_port=None, remote_host="127.0.0.1", local_host="localhost", ): """ Open a tunnel connecting ``remote_port`` to the local environment. For example, say you're running a daemon in development mode on your workstation at port 8080, and want to funnel traffic to it from a production or staging environment. In most situations this isn't possible as your office/home network probably blocks inbound traffic. But you have SSH access to this server, so you can temporarily make port 8080 on that server act like port 8080 on your workstation:: from fabric import Connection c = Connection('my-remote-server') with c.forward_remote(8080): c.run("remote-data-writer --port 8080") # Assuming remote-data-writer runs until interrupted, this will # stay open until you Ctrl-C... This method is analogous to using the ``-R`` option of OpenSSH's ``ssh`` program. :param int remote_port: The remote port number on which to listen. :param int local_port: The local port number. Defaults to the same value as ``remote_port``. :param str local_host: The local hostname/interface the forwarded connection talks to. Default: ``localhost``. :param str remote_host: The remote interface address to listen on when forwarding connections. Default: ``127.0.0.1`` (i.e. only listen on the remote localhost). :returns: Nothing; this method is only useful as a context manager affecting local operating system state. .. versionadded:: 2.0 """ if not local_port: local_port = remote_port # Callback executes on each connection to the remote port and is given # a Channel hooked up to said port. (We don't actually care about the # source/dest host/port pairs at all; only whether the channel has data # to read and suchlike.) # We then pair that channel with a new 'outbound' socket connection to # the local host/port being forwarded, in a new Tunnel. # That Tunnel is then added to a shared data structure so we can track # & close them during shutdown. # # TODO: this approach is less than ideal because we have to share state # between ourselves & the callback handed into the transport's own # thread handling (which is roughly analogous to our self-controlled # TunnelManager for local forwarding). See if we can use more of # Paramiko's API (or improve it and then do so) so that isn't # necessary. tunnels = [] def callback(channel, src_addr_tup, dst_addr_tup): sock = socket.socket() # TODO: handle connection failure such that channel, etc get closed sock.connect((local_host, local_port)) # TODO: we don't actually need to generate the Events at our level, # do we? Just let Tunnel.__init__ do it; all we do is "press its # button" on shutdown... tunnel = Tunnel(channel=channel, sock=sock, finished=Event()) tunnel.start() # Communication between ourselves & the Paramiko handling subthread tunnels.append(tunnel) # Ask Paramiko (really, the remote sshd) to call our callback whenever # connections are established on the remote iface/port. # transport.request_port_forward(remote_host, remote_port, callback) try: self.transport.request_port_forward(address=remote_host, port=remote_port, handler=callback) yield finally: # TODO: see above re: lack of a TunnelManager # TODO: and/or also refactor with TunnelManager re: shutdown logic. # E.g. maybe have a non-thread TunnelManager-alike with a method # that acts as the callback? At least then there's a tiny bit more # encapsulation...meh. for tunnel in tunnels: tunnel.finished.set() tunnel.join() self.transport.cancel_port_forward(address=remote_host, port=remote_port)