def _parse_envvars_output(self, out): """Decode a JSON string into an object Parameters ---------- out : string JSON string to decode. Returns ------- object Decoded representation of the JSON string """ output = {} for line in to_unicode(out).split('\0'): if not line: continue split = line.split('=', 1) if len(split) != 2: lgr.warning( "Failed to split envvar definition into key=value. Got %s", line) continue output[split[0]] = split[1] return output
def _execute_command(self, command, env=None, cwd=None): #command_env = self.get_updated_env(env) if env: raise NotImplementedError( "passing env variables to docker session execution") if cwd: # TODO: implement # raise NotImplementedError("handle cwd for docker") lgr.warning("cwd is not handled in docker yet") pass # if command_env: # TODO: might not work - not tested it # command = ['export %s=%s' % k for k in command_env.items()] + command # The following call may throw the following exception: # docker.errors.APIError - If the server returns an error. lgr.debug('Running command %r', command) out = [] execute = self.client.exec_create(container=self.container, cmd=command) out = '' for i, line in enumerate( self.client.exec_start(exec_id=execute['Id'], stream=True)): if line.startswith(b'rpc error'): raise CommandError(cmd=command, msg="Docker error - %s" % line) out += utils.to_unicode(line, "utf-8") lgr.debug("exec#%i: %s", i, line.rstrip()) exit_code = self.client.exec_inspect(execute['Id'])['ExitCode'] if exit_code not in [0, None]: msg = "Failed to run %r. Exit code=%d. out=%s err=%s" \ % (command, exit_code, out, out) raise CommandError(str(command), msg, exit_code, '', out) else: lgr.log(8, "Finished running %r with status %s", command, exit_code) return (out, '')
def _find_all_sources(self): # Use apt-cache policy to get all sources out, _ = self._session.execute_command(['apt-cache', 'policy']) out = utils.to_unicode(out, "utf-8") src_info = parse_apt_cache_policy_source_info(out) for src_name in src_info: src_vals = src_info[src_name] date = self._get_date_from_release_file( src_vals.get("archive_uri"), src_vals.get("uri_suite")) self._all_apt_sources[src_name] = \ APTSource( name=src_name, component=src_vals.get("component"), codename=src_vals.get("codename"), archive=src_vals.get("archive"), architecture=src_vals.get("architecture"), origin=src_vals.get("origin"), label=src_vals.get("label"), site=src_vals.get("site"), date=date, archive_uri=src_vals.get("archive_uri"))
def create(self): """ Create a baseline Docker image and run it to create the container. Returns ------- dict : config parameters to capture in the inventory file """ if self._container: raise ResourceError( "Container '{}' (ID {}) already exists in Docker".format( self.name, self.id)) # image might be of the form repository:tag -- pull would split them # if needed for line in self._client.pull(repository=self.image, stream=True): status = json.loads(utils.to_unicode(line, "utf-8")) output = status['status'] if 'progress' in status: output += ' ' + status['progress'] lgr.info(output) args = { 'name': self.name, 'image': self.image, 'stdin_open': True, 'tty': True, 'command': '/bin/bash' } # When running the rztracer binary in a Docker container, it is # necessary to suspend the kernel's security facility when creating # the container. Since it is a security issue, the default is to # *not* turn it off. if self.seccomp_unconfined: args['host_config'] = {'SecurityOpt': ['seccomp:unconfined']} self._container = self._client.create_container(**args) self.id = self._container.get('Id') self._client.start(container=self.id) self.status = 'running' return {'id': self.id, 'status': self.status}