def _execute_command(self, command, env=None, cwd=None): # TODO -- command_env is not used etc... # command_env = self.get_updated_env(env) if env: raise NotImplementedError("passing env variables to execution") if cwd: raise NotImplementedError("implement cwd support") # if command_env: # TODO: might not work - not tested it # command = ['export %s=%s;' % k for k in command_env.items()] + command escaped_command = ' '.join(quote(s) for s in command) stdin, stdout, stderr = self.ssh.exec_command(escaped_command) exit_code = stdout.channel.recv_exit_status() stdout = utils.to_unicode(stdout.read(), "utf-8") stderr = utils.to_unicode(stderr.read(), "utf-8") if exit_code not in [0, None]: msg = "Failed to run %r. Exit code=%d. out=%s err=%s" \ % (command, exit_code, stdout, stderr) raise CommandError(str(command), msg, exit_code, stdout, stderr) else: lgr.log(8, "Finished running %r with status %s", command, exit_code) return (stdout, stderr)
def _run_dpkg_query(self, subfiles): try: out, err = self._session.execute_command( ['dpkg-query', '-S'] + subfiles, # TODO: what should we do about those additional flags we have # in Runner but not yet in execute_command for all sessions #expect_stderr=True, expect_fail=True ) except CommandError as exc: stderr = utils.to_unicode(exc.stderr, "utf-8") if 'no path found matching pattern' in stderr: out = exc.stdout # One file not found, so continue else: raise # some other fault -- handle it above out = utils.to_unicode(out, "utf-8") return out
def _get_pkg_versions_and_sources(self, name, architecture): query = name if not architecture \ else "%s:%s" % (name, architecture) out, _ = self._session.execute_command(['apt-cache', 'policy', query]) out = utils.to_unicode(out, "utf-8") # dpkg -s uses the same output as apt-cache show pkg ver = parse_apt_cache_policy_pkgs_output(out) if not ver: return None _, ver = ver.popitem() # Pull out first (and only) result ver_dict = {} for v in ver.get("versions"): key = v.get("version") ver_dict[key] = [] for s in v.get("sources"): s = s["source"] # If we haven't named the source yet, name it if s not in self._source_line_to_name_map: # Make sure we can find the source if s not in self._all_apt_sources: lgr.warning("Cannot find source %s" % s) continue # Grab and name the source source = self._all_apt_sources[s] src_name = self._get_apt_source_name(source) source.name = src_name # Now add the source to our used sources self._apt_sources[src_name] = source # add the name for easy future lookup self._source_line_to_name_map[s] = src_name # Look up and add the short name for the source ver_dict[key].append(self._source_line_to_name_map[s]) return ver_dict
def _parse_envvars_output(self, out): """Decode a JSON string into an object Parameters ---------- out : string JSON string to decode. Returns ------- object Decoded representation of the JSON string """ output = {} for line in to_unicode(out).split('\0'): if not line: continue split = line.split('=', 1) if len(split) != 2: lgr.warning( "Failed to split envvar definition into key=value. Got %s", line) continue output[split[0]] = split[1] return output
def _parse_envvars_output(self, out): """Decode a JSON string into an object Parameters ---------- out : string JSON string to decode. Returns ------- object Decoded representation of the JSON string """ return json.loads(to_unicode(out))
def _get_pkg_details(self, name, architecture, version): # Now use "apt-cache show pkg:arch=version" to get more detail query = "%s=%s" % (name, version) if not architecture \ else "%s:%s=%s" % (name, architecture, version) try: out, _ = self._session.execute_command( ['apt-cache', 'show', query]) out = utils.to_unicode(out, "utf-8") # dpkg -s uses the same output as apt-cache show pkg info = parse_apt_cache_show_pkgs_output(out) if info: _, info = info.popitem() # Pull out first (and only) result except CommandError as _: return None return info
def _get_pkg_arch_and_version(self, name, architecture): # Use "dpkg -s pkg" to get the installed version and arch query = name if not architecture \ else "%s:%s" % (name, architecture) try: out, _ = self._session.execute_command(['dpkg', '-s', query]) out = utils.to_unicode(out, "utf-8") # dpkg -s uses the same output as apt-cache show pkg info = parse_apt_cache_show_pkgs_output(out) if info: _, info = info.popitem() # Pull out first (and only) result architecture = info.get("Architecture") version = info.get("Version") else: version = None except CommandError as _: return None, None return architecture, version
def _find_all_sources(self): # Use apt-cache policy to get all sources out, _ = self._session.execute_command(['apt-cache', 'policy']) out = utils.to_unicode(out, "utf-8") src_info = parse_apt_cache_policy_source_info(out) for src_name in src_info: src_vals = src_info[src_name] date = self._get_date_from_release_file( src_vals.get("archive_uri"), src_vals.get("uri_suite")) self._all_apt_sources[src_name] = \ APTSource( name=src_name, component=src_vals.get("component"), codename=src_vals.get("codename"), archive=src_vals.get("archive"), architecture=src_vals.get("architecture"), origin=src_vals.get("origin"), label=src_vals.get("label"), site=src_vals.get("site"), date=date, archive_uri=src_vals.get("archive_uri"))
def _execute_command(self, command, env=None, cwd=None): #command_env = self.get_updated_env(env) if env: raise NotImplementedError( "passing env variables to docker session execution") if cwd: # TODO: implement # raise NotImplementedError("handle cwd for docker") lgr.warning("cwd is not handled in docker yet") pass # if command_env: # TODO: might not work - not tested it # command = ['export %s=%s' % k for k in command_env.items()] + command # The following call may throw the following exception: # docker.errors.APIError - If the server returns an error. lgr.debug('Running command %r', command) out = [] execute = self.client.exec_create(container=self.container, cmd=command) out = '' for i, line in enumerate( self.client.exec_start(exec_id=execute['Id'], stream=True)): if line.startswith(b'rpc error'): raise CommandError(cmd=command, msg="Docker error - %s" % line) out += utils.to_unicode(line, "utf-8") lgr.debug("exec#%i: %s", i, line.rstrip()) exit_code = self.client.exec_inspect(execute['Id'])['ExitCode'] if exit_code not in [0, None]: msg = "Failed to run %r. Exit code=%d. out=%s err=%s" \ % (command, exit_code, out, out) raise CommandError(str(command), msg, exit_code, '', out) else: lgr.log(8, "Finished running %r with status %s", command, exit_code) return (out, '')
def create(self): """ Create a baseline Docker image and run it to create the container. Returns ------- dict : config parameters to capture in the inventory file """ if self._container: raise ResourceError( "Container '{}' (ID {}) already exists in Docker".format( self.name, self.id)) # image might be of the form repository:tag -- pull would split them # if needed for line in self._client.pull(repository=self.image, stream=True): status = json.loads(utils.to_unicode(line, "utf-8")) output = status['status'] if 'progress' in status: output += ' ' + status['progress'] lgr.info(output) args = { 'name': self.name, 'image': self.image, 'stdin_open': True, 'tty': True, 'command': '/bin/bash' } # When running the rztracer binary in a Docker container, it is # necessary to suspend the kernel's security facility when creating # the container. Since it is a security issue, the default is to # *not* turn it off. if self.seccomp_unconfined: args['host_config'] = {'SecurityOpt': ['seccomp:unconfined']} self._container = self._client.create_container(**args) self.id = self._container.get('Id') self._client.start(container=self.id) self.status = 'running' return {'id': self.id, 'status': self.status}
def _parse_envvars_output(self, out): return json.loads(to_unicode(out))