def _service_profile_enabled(self, service): """Returns `True` if the service has no profiles defined or has a profile which is among the profiles passed to the `docker compose up` command. Otherwise returns `False`. """ if LooseVersion(compose_version) < LooseVersion('1.28.0'): return True return service.enabled_for_profiles(self.profiles or [])
def create_network(self): if not self.existing_network: params = dict( driver=self.parameters.driver, options=self.parameters.driver_options, ) ipam_pools = [] if self.parameters.ipam_config: for ipam_pool in self.parameters.ipam_config: if LooseVersion(docker_version) >= LooseVersion('2.0.0'): ipam_pools.append(IPAMPool(**ipam_pool)) else: ipam_pools.append(utils.create_ipam_pool(**ipam_pool)) if self.parameters.ipam_driver or self.parameters.ipam_driver_options or ipam_pools: # Only add ipam parameter if a driver was specified or if IPAM parameters # were specified. Leaving this parameter away can significantly speed up # creation; on my machine creation with this option needs ~15 seconds, # and without just a few seconds. if LooseVersion(docker_version) >= LooseVersion('2.0.0'): params['ipam'] = IPAMConfig( driver=self.parameters.ipam_driver, pool_configs=ipam_pools, options=self.parameters.ipam_driver_options) else: params['ipam'] = utils.create_ipam_config( driver=self.parameters.ipam_driver, pool_configs=ipam_pools) if self.parameters.enable_ipv6 is not None: params['enable_ipv6'] = self.parameters.enable_ipv6 if self.parameters.internal is not None: params['internal'] = self.parameters.internal if self.parameters.scope is not None: params['scope'] = self.parameters.scope if self.parameters.attachable is not None: params['attachable'] = self.parameters.attachable if self.parameters.labels: params['labels'] = self.parameters.labels if not self.check_mode: resp = self.client.create_network(self.parameters.name, **params) self.client.report_warnings(resp, ['Warning']) self.existing_network = self.client.get_network( network_id=resp['Id']) self.results['actions'].append( "Created network %s with driver %s" % (self.parameters.name, self.parameters.driver)) self.results['changed'] = True
def _get_minimal_versions(self, option_minimal_versions, ignore_params=None): self.option_minimal_versions = dict() for option in self.module.argument_spec: if ignore_params is not None: if option in ignore_params: continue self.option_minimal_versions[option] = dict() self.option_minimal_versions.update(option_minimal_versions) for option, data in self.option_minimal_versions.items(): # Test whether option is supported, and store result support_docker_py = True support_docker_api = True if 'docker_py_version' in data: support_docker_py = self.docker_py_version >= LooseVersion(data['docker_py_version']) if 'docker_api_version' in data: support_docker_api = self.docker_api_version >= LooseVersion(data['docker_api_version']) data['supported'] = support_docker_py and support_docker_api # Fail if option is not supported but used if not data['supported']: # Test whether option is specified if 'detect_usage' in data: used = data['detect_usage'](self) else: used = self.module.params.get(option) is not None if used and 'default' in self.module.argument_spec[option]: used = self.module.params[option] != self.module.argument_spec[option]['default'] if used: # If the option is used, compose error message. if 'usage_msg' in data: usg = data['usage_msg'] else: usg = 'set %s option' % (option, ) if not support_docker_api: msg = 'Docker API version is %s. Minimum version required is %s to %s.' msg = msg % (self.docker_api_version_str, data['docker_api_version'], usg) elif not support_docker_py: msg = "Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s to %s. " if LooseVersion(data['docker_py_version']) < LooseVersion('2.0.0'): msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER elif self.docker_py_version < LooseVersion('2.0.0'): msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER else: msg += DOCKERPYUPGRADE_UPGRADE_DOCKER msg = msg % (docker_version, platform.node(), sys.executable, data['docker_py_version'], usg) else: # should not happen msg = 'Cannot %s with your configuration.' % (usg, ) self.fail(msg)
def docker_version(self): if not self._version: self._set_docker_args() self._version = self._get_docker_version() if self._version == u'dev': display.warning( u'Docker version number is "dev". Will assume latest version.' ) if self._version != u'dev' and LooseVersion( self._version) < LooseVersion(u'1.3'): raise AnsibleError( 'docker connection type requires docker 1.3 or higher') return self._version
def get_connect_params(auth, fail_function): if is_using_tls(auth): auth['docker_host'] = auth['docker_host'].replace('tcp://', 'https://') result = dict( base_url=auth['docker_host'], version=auth['api_version'], timeout=auth['timeout'], ) if auth['tls_verify']: # TLS with verification tls_config = dict( verify=True, assert_hostname=auth['tls_hostname'], ssl_version=auth['ssl_version'], fail_function=fail_function, ) if auth['cert_path'] and auth['key_path']: tls_config['client_cert'] = (auth['cert_path'], auth['key_path']) if auth['cacert_path']: tls_config['ca_cert'] = auth['cacert_path'] result['tls'] = _get_tls_config(**tls_config) elif auth['tls']: # TLS without verification tls_config = dict( verify=False, ssl_version=auth['ssl_version'], fail_function=fail_function, ) if auth['cert_path'] and auth['key_path']: tls_config['client_cert'] = (auth['cert_path'], auth['key_path']) result['tls'] = _get_tls_config(**tls_config) if auth.get('use_ssh_client'): if LooseVersion(docker_version) < LooseVersion('4.4.0'): fail_function("use_ssh_client=True requires Docker SDK for Python 4.4.0 or newer") result['use_ssh_client'] = True # No TLS return result
def __init__(self, min_docker_version=None, min_docker_api_version=None): if min_docker_version is None: min_docker_version = MIN_DOCKER_VERSION NEEDS_DOCKER_PY2 = (LooseVersion(min_docker_version) >= LooseVersion('2.0.0')) self.docker_py_version = LooseVersion(docker_version) if HAS_DOCKER_MODELS and HAS_DOCKER_SSLADAPTER: self.fail("Cannot have both the docker-py and docker python modules (old and new version of Docker " "SDK for Python) installed together as they use the same namespace and cause a corrupt " "installation. Please uninstall both packages, and re-install only the docker-py or docker " "python module (for %s's Python %s). It is recommended to install the docker module if no " "support for Python 2.6 is required. Please note that simply uninstalling one of the modules " "can leave the other module in a broken state." % (platform.node(), sys.executable)) if not HAS_DOCKER_PY: if NEEDS_DOCKER_PY2: msg = missing_required_lib("Docker SDK for Python: docker above 5.0.0 (Python >= 3.6) or " "docker before 5.0.0 (Python 2.7)") msg = msg + ", for example via `pip install docker` (Python >= 3.6) or " \ + "`pip install docker==4.4.4` (Python 2.7). The error was: %s" else: msg = missing_required_lib("Docker SDK for Python: docker above 5.0.0 (Python >= 3.6) or " "docker before 5.0.0 (Python 2.7) or docker-py (Python 2.6)") msg = msg + ", for example via `pip install docker` (Python >= 3.6) or `pip install docker==4.4.4` (Python 2.7) " \ + "or `pip install docker-py` (Python 2.6). The error was: %s" self.fail(msg % HAS_DOCKER_ERROR, exception=HAS_DOCKER_TRACEBACK) if self.docker_py_version < LooseVersion(min_docker_version): msg = "Error: Docker SDK for Python version is %s (%s's Python %s). Minimum version required is %s." if not NEEDS_DOCKER_PY2: # The minimal required version is < 2.0 (and the current version as well). # Advertise docker (instead of docker-py) for non-Python-2.6 users. msg += DOCKERPYUPGRADE_RECOMMEND_DOCKER elif docker_version < LooseVersion('2.0'): msg += DOCKERPYUPGRADE_SWITCH_TO_DOCKER else: msg += DOCKERPYUPGRADE_UPGRADE_DOCKER self.fail(msg % (docker_version, platform.node(), sys.executable, min_docker_version)) self._connect_params = get_connect_params(self.auth_params, fail_function=self.fail) try: super(AnsibleDockerClientBase, self).__init__(**self._connect_params) self.docker_api_version_str = self.version()['ApiVersion'] except APIError as exc: self.fail("Docker API error: %s" % exc) except Exception as exc: self.fail("Error connecting: %s" % exc) self.docker_api_version = LooseVersion(self.docker_api_version_str) if min_docker_api_version is not None: if self.docker_api_version < LooseVersion(min_docker_api_version): self.fail('Docker API version is %s. Minimum version required is %s.' % (self.docker_api_version_str, min_docker_api_version))
def _get_actual_user(self): if self.remote_user is not None: # An explicit user is provided if self.docker_version == u'dev' or LooseVersion( self.docker_version) >= LooseVersion(u'1.7'): # Support for specifying the exec user was added in docker 1.7 return self.remote_user else: self.remote_user = None actual_user = self._get_docker_remote_user() if actual_user != self.get_option('remote_user'): display.warning( u'docker {0} does not support remote_user, using container default: {1}' .format(self.docker_version, self.actual_user or u'?')) return actual_user elif self._display.verbosity > 2: # Since we're not setting the actual_user, look it up so we have it for logging later # Only do this if display verbosity is high enough that we'll need the value # This saves overhead from calling into docker when we don't need to. return self._get_docker_remote_user() else: return None
def inspect_distribution(self, image, **kwargs): ''' Get image digest by directly calling the Docker API when running Docker SDK < 4.0.0 since prior versions did not support accessing private repositories. ''' if self.docker_py_version < LooseVersion('4.0.0'): registry = auth.resolve_repository_name(image)[0] header = auth.get_config_header(self, registry) if header: return self._result(self._get( self._url('/distribution/{0}/json', image), headers={'X-Registry-Auth': header} ), json=True) return super(AnsibleDockerClientBase, self).inspect_distribution(image, **kwargs)
def archive_image(self, name, tag): ''' Archive an image to a .tar file. Called when archive_path is passed. :param name - name of the image. Type: str :return None ''' if not tag: tag = "latest" if is_image_name_id(name): image = self.client.find_image_by_id(name, accept_missing_image=True) image_name = name else: image = self.client.find_image(name=name, tag=tag) image_name = "%s:%s" % (name, tag) if not image: self.log("archive image: image %s not found" % image_name) return self.results['actions'].append('Archived image %s to %s' % (image_name, self.archive_path)) self.results['changed'] = True if not self.check_mode: self.log("Getting archive of image %s" % image_name) try: saved_image = self.client.get_image(image_name) except Exception as exc: self.fail("Error getting image %s - %s" % (image_name, to_native(exc))) try: with open(self.archive_path, 'wb') as fd: if self.client.docker_py_version >= LooseVersion('3.0.0'): for chunk in saved_image: fd.write(chunk) else: for chunk in saved_image.stream(2048, decode_content=False): fd.write(chunk) except Exception as exc: self.fail("Error writing image archive %s - %s" % (self.archive_path, to_native(exc))) if image: self.results['image'] = image
def cmd_up(self): start_deps = self.dependencies service_names = self.services detached = True result = dict(changed=False, actions=[], services=dict()) up_options = { u'--no-recreate': False, u'--build': False, u'--no-build': False, u'--no-deps': False, u'--force-recreate': False, } if self.recreate == 'never': up_options[u'--no-recreate'] = True elif self.recreate == 'always': up_options[u'--force-recreate'] = True if self.remove_orphans: up_options[u'--remove-orphans'] = True converge = convergence_strategy_from_opts(up_options) self.log("convergence strategy: %s" % converge) if self.pull: pull_output = self.cmd_pull() result['changed'] |= pull_output['changed'] result['actions'] += pull_output['actions'] if self.build: build_output = self.cmd_build() result['changed'] |= build_output['changed'] result['actions'] += build_output['actions'] if self.remove_orphans: containers = self.client.containers( filters={ 'label': [ '{0}={1}'.format(LABEL_PROJECT, self.project.name), '{0}={1}'.format(LABEL_ONE_OFF, "False") ], } ) orphans = [] for container in containers: service_name = container.get('Labels', {}).get(LABEL_SERVICE) if service_name not in self.project.service_names: orphans.append(service_name) if orphans: result['changed'] = True for service in self.project.services: if not service_names or service.name in service_names: plan = service.convergence_plan(strategy=converge) if plan.action == 'start' and self.stopped: # In case the only action is starting, and the user requested # that the service should be stopped, ignore this service. continue if not self._service_profile_enabled(service): continue if plan.action != 'noop': result['changed'] = True result_action = dict(service=service.name) result_action[plan.action] = [] for container in plan.containers: result_action[plan.action].append(dict( id=container.id, name=container.name, short_id=container.short_id, )) result['actions'].append(result_action) if not self.check_mode and result['changed']: out_redir_name, err_redir_name = make_redirection_tempfiles() try: with stdout_redirector(out_redir_name): with stderr_redirector(err_redir_name): do_build = build_action_from_opts(up_options) self.log('Setting do_build to %s' % do_build) up_kwargs = { 'service_names': service_names, 'start_deps': start_deps, 'strategy': converge, 'do_build': do_build, 'detached': detached, 'remove_orphans': self.remove_orphans, 'timeout': self.timeout, } if LooseVersion(compose_version) >= LooseVersion('1.17.0'): up_kwargs['start'] = not self.stopped elif self.stopped: self.client.module.warn( "The 'stopped' option requires docker-compose version >= 1.17.0. " + "This task was run with docker-compose version %s." % compose_version ) self.project.up(**up_kwargs) except Exception as exc: fail_reason = get_failure_info(exc, out_redir_name, err_redir_name, msg_format="Error starting project %s") self.client.fail(**fail_reason) else: cleanup_redirection_tempfiles(out_redir_name, err_redir_name) if self.stopped: stop_output = self.cmd_stop(service_names) result['changed'] |= stop_output['changed'] result['actions'] += stop_output['actions'] if self.restarted: restart_output = self.cmd_restart(service_names) result['changed'] |= restart_output['changed'] result['actions'] += restart_output['actions'] if self.scale: scale_output = self.cmd_scale() result['changed'] |= scale_output['changed'] result['actions'] += scale_output['actions'] for service in self.project.services: service_facts = dict() result['services'][service.name] = service_facts for container in service.containers(stopped=True): inspection = container.inspect() # pare down the inspection data to the most useful bits facts = dict( cmd=[], labels=dict(), image=None, state=dict( running=None, status=None ), networks=dict() ) if inspection['Config'].get('Cmd', None) is not None: facts['cmd'] = inspection['Config']['Cmd'] if inspection['Config'].get('Labels', None) is not None: facts['labels'] = inspection['Config']['Labels'] if inspection['Config'].get('Image', None) is not None: facts['image'] = inspection['Config']['Image'] if inspection['State'].get('Running', None) is not None: facts['state']['running'] = inspection['State']['Running'] if inspection['State'].get('Status', None) is not None: facts['state']['status'] = inspection['State']['Status'] if inspection.get('NetworkSettings') and inspection['NetworkSettings'].get('Networks'): networks = inspection['NetworkSettings']['Networks'] for key in networks: facts['networks'][key] = dict( aliases=[], globalIPv6=None, globalIPv6PrefixLen=0, IPAddress=None, IPPrefixLen=0, links=None, macAddress=None, ) if networks[key].get('Aliases', None) is not None: facts['networks'][key]['aliases'] = networks[key]['Aliases'] if networks[key].get('GlobalIPv6Address', None) is not None: facts['networks'][key]['globalIPv6'] = networks[key]['GlobalIPv6Address'] if networks[key].get('GlobalIPv6PrefixLen', None) is not None: facts['networks'][key]['globalIPv6PrefixLen'] = networks[key]['GlobalIPv6PrefixLen'] if networks[key].get('IPAddress', None) is not None: facts['networks'][key]['IPAddress'] = networks[key]['IPAddress'] if networks[key].get('IPPrefixLen', None) is not None: facts['networks'][key]['IPPrefixLen'] = networks[key]['IPPrefixLen'] if networks[key].get('Links', None) is not None: facts['networks'][key]['links'] = networks[key]['Links'] if networks[key].get('MacAddress', None) is not None: facts['networks'][key]['macAddress'] = networks[key]['MacAddress'] service_facts[container.name] = facts return result
def __init__(self, client): super(ContainerManager, self).__init__() self.client = client self.project_src = None self.files = None self.project_name = None self.state = None self.definition = None self.hostname_check = None self.timeout = None self.remove_images = None self.remove_orphans = None self.remove_volumes = None self.stopped = None self.restarted = None self.recreate = None self.build = None self.dependencies = None self.services = None self.scale = None self.debug = None self.pull = None self.nocache = None for key, value in client.module.params.items(): setattr(self, key, value) self.check_mode = client.check_mode if not self.debug: self.debug = client.module._debug self.options = dict() self.options.update(self._get_auth_options()) self.options[u'--skip-hostname-check'] = (not self.hostname_check) if self.project_name: self.options[u'--project-name'] = self.project_name if self.env_file: self.options[u'--env-file'] = self.env_file if self.files: self.options[u'--file'] = self.files if self.profiles: self.options[u'--profile'] = self.profiles if not HAS_COMPOSE: self.client.fail("Unable to load docker-compose. Try `pip install docker-compose`. Error: %s" % to_native(HAS_COMPOSE_EXC)) if LooseVersion(compose_version) < LooseVersion(MINIMUM_COMPOSE_VERSION): self.client.fail("Found docker-compose version %s. Minimum required version is %s. " "Upgrade docker-compose to a min version of %s." % (compose_version, MINIMUM_COMPOSE_VERSION, MINIMUM_COMPOSE_VERSION)) if self.restarted and self.stopped: self.client.fail("Cannot use restarted and stopped at the same time.") self.log("options: ") self.log(self.options, pretty_print=True) if self.definition: if not HAS_YAML: self.client.fail("Unable to load yaml. Try `pip install PyYAML`. Error: %s" % to_native(HAS_YAML_EXC)) if not self.project_name: self.client.fail("Parameter error - project_name required when providing definition.") self.project_src = tempfile.mkdtemp(prefix="ansible") compose_file = os.path.join(self.project_src, "docker-compose.yml") try: self.log('writing: ') self.log(yaml.dump(self.definition, default_flow_style=False)) with open(compose_file, 'w') as f: f.write(yaml.dump(self.definition, default_flow_style=False)) except Exception as exc: self.client.fail("Error writing to %s - %s" % (compose_file, to_native(exc))) else: if not self.project_src: self.client.fail("Parameter error - project_src required.") try: self.log("project_src: %s" % self.project_src) self.project = project_from_options(self.project_src, self.options) except Exception as exc: self.client.fail("Configuration error - %s" % to_native(exc))
def load_image(self): ''' Load an image from a .tar archive :return: image dict ''' # Load image(s) from file load_output = [] has_output = False try: self.log("Opening image %s" % self.load_path) with open(self.load_path, 'rb') as image_tar: self.log("Loading image from %s" % self.load_path) output = self.client.load_image(image_tar) if output is not None: # Old versions of Docker SDK of Python (before version 2.5.0) do not return anything. # (See https://github.com/docker/docker-py/commit/7139e2d8f1ea82340417add02090bfaf7794f159) # Note that before that commit, something else than None was returned, but that was also # only introduced in a commit that first appeared in 2.5.0 (see # https://github.com/docker/docker-py/commit/9e793806ff79559c3bc591d8c52a3bbe3cdb7350). # So the above check works for every released version of Docker SDK for Python. has_output = True for line in output: self.log(line, pretty_print=True) self._extract_output_line(line, load_output) else: if LooseVersion(docker_version) < LooseVersion('2.5.0'): self.client.module.warn( 'The installed version of the Docker SDK for Python does not return the loading results' ' from the Docker daemon. Therefore, we cannot verify whether the expected image was' ' loaded, whether multiple images where loaded, or whether the load actually succeeded.' ' If you are not stuck with Python 2.6, *please* upgrade to a version newer than 2.5.0' ' (2.5.0 was released in August 2017).') else: self.client.module.warn( 'The API version of your Docker daemon is < 1.23, which does not return the image' ' loading result from the Docker daemon. Therefore, we cannot verify whether the' ' expected image was loaded, whether multiple images where loaded, or whether the load' ' actually succeeded. You should consider upgrading your Docker daemon.' ) except EnvironmentError as exc: if exc.errno == errno.ENOENT: self.client.fail("Error opening image %s - %s" % (self.load_path, to_native(exc))) self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output)) except Exception as exc: self.client.fail("Error loading image %s - %s" % (self.name, to_native(exc)), stdout='\n'.join(load_output)) # Collect loaded images if has_output: # We can only do this when we actually got some output from Docker daemon loaded_images = set() loaded_image_ids = set() for line in load_output: if line.startswith('Loaded image:'): loaded_images.add(line[len('Loaded image:'):].strip()) if line.startswith('Loaded image ID:'): loaded_image_ids.add( line[len('Loaded image ID:'):].strip().lower()) if not loaded_images and not loaded_image_ids: self.client.fail( "Detected no loaded images. Archive potentially corrupt?", stdout='\n'.join(load_output)) if is_image_name_id(self.name): expected_image = self.name.lower() found_image = expected_image not in loaded_image_ids else: expected_image = '%s:%s' % (self.name, self.tag) found_image = expected_image not in loaded_images if found_image: self.client.fail( "The archive did not contain image '%s'. Instead, found %s." % (expected_image, ', '.join( sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids)))), stdout='\n'.join(load_output)) loaded_images.remove(expected_image) if loaded_images: self.client.module.warn( "The archive contained more images than specified: %s" % (', '.join( sorted(["'%s'" % image for image in loaded_images] + list(loaded_image_ids))), )) if is_image_name_id(self.name): return self.client.find_image_by_id(self.name, accept_missing_image=True) else: return self.client.find_image(self.name, self.tag)
def build_image(self): ''' Build an image :return: image dict ''' params = dict( path=self.build_path, tag=self.name, rm=self.rm, nocache=self.nocache, timeout=self.http_timeout, pull=self.pull, forcerm=self.rm, dockerfile=self.dockerfile, decode=True, ) if self.client.docker_py_version < LooseVersion('3.0.0'): params['stream'] = True if self.tag: params['tag'] = "%s:%s" % (self.name, self.tag) if self.container_limits: params['container_limits'] = self.container_limits if self.buildargs: for key, value in self.buildargs.items(): self.buildargs[key] = to_native(value) params['buildargs'] = self.buildargs if self.cache_from: params['cache_from'] = self.cache_from if self.network: params['network_mode'] = self.network if self.extra_hosts: params['extra_hosts'] = self.extra_hosts if self.use_config_proxy: params['use_config_proxy'] = self.use_config_proxy # Due to a bug in docker-py, it will crash if # use_config_proxy is True and buildargs is None if 'buildargs' not in params: params['buildargs'] = {} if self.target: params['target'] = self.target if self.build_platform is not None: params['platform'] = self.build_platform build_output = [] for line in self.client.build(**params): # line = json.loads(line) self.log(line, pretty_print=True) self._extract_output_line(line, build_output) if line.get('error'): if line.get('errorDetail'): errorDetail = line.get('errorDetail') self.fail( "Error building %s - code: %s, message: %s, logs: %s" % (self.name, errorDetail.get('code'), errorDetail.get('message'), build_output)) else: self.fail("Error building %s - message: %s, logs: %s" % (self.name, line.get('error'), build_output)) return { "stdout": "\n".join(build_output), "image": self.client.find_image(name=self.name, tag=self.tag) }
from ansible_collections.community.docker.plugins.module_utils.common import ( clean_dict_booleans_for_docker_api, docker_version, AnsibleDockerClient, DockerBaseClass, is_image_name_id, is_valid_tag, RequestException, ) from ansible.module_utils.common.text.converters import to_native from ansible_collections.community.docker.plugins.module_utils.version import LooseVersion if docker_version is not None: try: if LooseVersion(docker_version) >= LooseVersion('2.0.0'): from docker.auth import resolve_repository_name else: from docker.auth.auth import resolve_repository_name from docker.utils.utils import parse_repository_tag from docker.errors import DockerException, NotFound except ImportError: # missing Docker SDK for Python handled in module_utils.docker.common pass class ImageManager(DockerBaseClass): def __init__(self, client, results): super(ImageManager, self).__init__()
def main(): argument_spec = dict( containers=dict(type='bool', default=False), containers_filters=dict(type='dict'), images=dict(type='bool', default=False), images_filters=dict(type='dict'), networks=dict(type='bool', default=False), networks_filters=dict(type='dict'), volumes=dict(type='bool', default=False), volumes_filters=dict(type='dict'), builder_cache=dict(type='bool', default=False), ) client = AnsibleDockerClient( argument_spec=argument_spec, # supports_check_mode=True, min_docker_api_version='1.25', min_docker_version='2.1.0', ) # Version checks cache_min_version = '3.3.0' if client.module.params[ 'builder_cache'] and client.docker_py_version < LooseVersion( cache_min_version): msg = "Error: Docker SDK for Python's version is %s. Minimum version required for builds option is %s. Use `pip install --upgrade docker` to upgrade." client.fail(msg % (docker_version, cache_min_version)) try: result = dict() if client.module.params['containers']: filters = clean_dict_booleans_for_docker_api( client.module.params.get('containers_filters')) res = client.prune_containers(filters=filters) result['containers'] = res.get('ContainersDeleted') or [] result['containers_space_reclaimed'] = res['SpaceReclaimed'] if client.module.params['images']: filters = clean_dict_booleans_for_docker_api( client.module.params.get('images_filters')) res = client.prune_images(filters=filters) result['images'] = res.get('ImagesDeleted') or [] result['images_space_reclaimed'] = res['SpaceReclaimed'] if client.module.params['networks']: filters = clean_dict_booleans_for_docker_api( client.module.params.get('networks_filters')) res = client.prune_networks(filters=filters) result['networks'] = res.get('NetworksDeleted') or [] if client.module.params['volumes']: filters = clean_dict_booleans_for_docker_api( client.module.params.get('volumes_filters')) res = client.prune_volumes(filters=filters) result['volumes'] = res.get('VolumesDeleted') or [] result['volumes_space_reclaimed'] = res['SpaceReclaimed'] if client.module.params['builder_cache']: res = client.prune_builds() result['builder_cache_space_reclaimed'] = res['SpaceReclaimed'] client.module.exit_json(**result) except DockerException as e: client.fail('An unexpected docker error occurred: {0}'.format( to_native(e)), exception=traceback.format_exc()) except RequestException as e: client.fail( 'An unexpected requests error occurred when docker-py tried to talk to the docker daemon: {0}' .format(to_native(e)), exception=traceback.format_exc())
def get_unlock_key(self): if self.docker_py_version < LooseVersion('2.7.0'): return None return super(AnsibleDockerSwarmClient, self).get_unlock_key()