def dc(self): if self._dc is not None: return self._dc docker_kwargs = self.docker_kwargs.copy() self._dc = docker.Client(version='auto', **docker_kwargs) return self._dc
def __init__(self, _docker=None): if _docker: self.docker = _docker else: import docker self.docker = docker.Client(base_url='unix://var/run/docker.sock')
def get_client(): return docker.Client(base_url="unix:/run/podman/podman.sock")
import docker c = docker.Client(base_url='unix://var/run/docker.sock') c.pull(repository='nginx', tag='latest') container_id = c.create_container(image='nginx:latest', ports=[80], volumes=['/data'], name='hello') c.start(container_id, port_bindings={80: ('0.0.0.0', 80)}, binds={'/data': { 'bind': '/data', 'ro': False }})
def __init__(self, name, ip, endpoint=None, docker_port=None, socket_path=None, api_version=None, timeout=None, ssh_tunnel=None, tls=None, tls_verify=False, tls_ca_cert=None, tls_cert=None, tls_key=None, ssl_version=None): """Instantiate a new ship. Args: name (string): the name of the ship. ip (string): the IP address of resolvable host name of the host. docker_port (int): the port the Docker daemon listens on. socket_path (string): the path to the unix socket the Docker daemon listens on. api_version (string): the API version of the Docker daemon. ssh_tunnel (dict): configuration for SSH tunneling to the remote Docker daemon. """ Entity.__init__(self, name) self._ip = ip self._endpoint = endpoint or ip self._docker_port = int(docker_port or (self.DEFAULT_DOCKER_TLS_PORT if tls else self.DEFAULT_DOCKER_PORT)) self._socket_path = os.path.realpath(socket_path) \ if socket_path else None self._tunnel = None if ssh_tunnel: if 'user' not in ssh_tunnel: raise exceptions.EnvironmentConfigurationException( 'Missing SSH user for ship {} tunnel configuration'.format( self.name)) if 'key' not in ssh_tunnel: raise exceptions.EnvironmentConfigurationException( 'Missing SSH key for ship {} tunnel configuration'.format( self.name)) self._tunnel = bgtunnel.open(ssh_address=self._endpoint, ssh_user=ssh_tunnel['user'], ssh_port=int( ssh_tunnel.get('port', 22)), host_port=self._docker_port, silent=True, identity_file=ssh_tunnel['key']) # Make sure we use https through the tunnel, if tls is enabled proto = "https" if (tls or tls_verify) else "http" self._backend_url = '{:s}://localhost:{:d}'.format( proto, self._tunnel.bind_port) # Apparently bgtunnel isn't always ready right away and this # drastically cuts down on the timeouts time.sleep(1) elif self._socket_path is not None: self._backend_url = 'unix://{:s}'.format(self._socket_path) else: proto = "https" if (tls or tls_verify) else "http" self._backend_url = '{:s}://{:s}:{:d}'.format( proto, self._endpoint, self._docker_port) self._tls = docker.tls.TLSConfig( verify=tls_verify, client_cert=(tls_cert, tls_key), ca_cert=tls_ca_cert, ssl_version=ssl_version) if tls else None self._backend = docker.Client(base_url=self._backend_url, version=str(api_version or Ship.DEFAULT_API_VERSION), timeout=timeout or Ship.DEFAULT_DOCKER_TIMEOUT, tls=self._tls)
def collect(self): def print_metric(cc, name): data = cc.stats(name) metrics = json.loads(data.next()) # memory metrics self.memory = self.flatten_dict(metrics['memory_stats']) for key, value in self.memory.items(): metric_name = name + ".memory." + key self.publish_counter(metric_name, value) # cpu metrics self.cpu = self.flatten_dict(metrics['cpu_stats']) for key, value in self.cpu.items(): # percpu_usage is a list, we'll deal with it after if type(value) == int: metric_name = name + ".cpu." + key self.publish_counter(metric_name, value) # dealing with percpu_usage if type(value) == list: self.length = len(value) for i in range(self.length): self.value = value self.metric_name = name + ".cpu." + key + str(i) self.publish_counter(self.metric_name, self.value[i]) # network metrics self.network = self.flatten_dict(metrics['network']) for key, value in self.network.items(): metric_name = name + ".network." + key self.publish_counter(metric_name, value) # blkio metrics self.blkio = self.flatten_dict(metrics['blkio_stats']) for key, value in self.blkio.items(): metric_name = name + ".blkio." + key self.publish_counter(metric_name, value) cc = docker.Client( base_url='unix://var/run/docker.sock', version='auto') dockernames = [i['Names'] for i in cc.containers()] running_containers = len(cc.containers()) all_containers = len(cc.containers(all=True)) stopped_containers = (all_containers - running_containers) image_count = len(set(cc.images(quiet=True))) dangling_image_count = len( set(cc.images(quiet=True, all=True, filters={'dangling': True}))) self.publish('counts.running', running_containers) self.publish('counts.stopped', stopped_containers) self.publish('counts.all_containers', all_containers) self.publish('counts.images', image_count) self.publish('counts.dangling_images', dangling_image_count) threads = [] for dname in dockernames: t = threading.Thread(target=print_metric, args=(cc, dname[0][1:])) threads.append(t) t.start() for thread in threads: thread.join()
def docker_client(): """ Docker API client. Set version to auto to always use server version """ return docker.Client(base_url='unix://var/run/docker.sock', version='auto')
import resource print resource.getrusage(resource.RUSAGE_SELF).ru_maxrss """ # CONSTANTS socket = 'unix://var/run/docker.sock' version = '1.12' timeout = 10 # image='fedora' image = 'docker-skimage' # DEBUG # print "Create Client" import pdb c = docker.Client(base_url=socket, version=version, timeout=timeout) # removed parameter memswap_limit, present in docker-py REAMDE. # ERROR - unexpected keyword argument # TODO - verify # the code in sample_code returns in kilobytes. # mem_limit accepts values in bytes, for ex - this container has 5 MB container = c.create_container( image, command='python', hostname=None, user=None, detach=False, stdin_open=True, tty=False,
import docker from os import getenv from multiprocessing import cpu_count from random import sample as random_sample from collections import Counter from .. import settings client = docker.Client(base_url=settings.DOCKER_HOST) def find_container(name): # prepend / to name name = f'/{name}' containers = client.containers(all=True) containers = [c for c in containers if name in c['Names']] if not containers: return None return containers[0] def list_containers(all=True): client = docker.Client() return client.containers(all=all) def translate_host_basedir(path): # TODO: if container is created with a custom hostname this will not work # improve self id detection in the future. self_id = getenv('HOSTNAME') self_container = client.containers(filters={'id': self_id})[0]
def remediate(target_id, results_dir): # Class docker.Client was renamed to docker.APIClient in # python-docker-py 2.0.0. try: client = docker.APIClient() except AttributeError: client = docker.Client() try: client.ping() except requests.exceptions.ConnectionError as e: raise RuntimeError( "The Docker daemon does not appear to be running: {}.\n".format(e)) print("Remediating target {}.".format(target_id)) temp_dir = tempfile.mkdtemp() fix_script = os.path.join(results_dir, target_id, "fix.sh") try: shutil.copy(fix_script, temp_dir) except IOError as e: raise RuntimeError( "Can't find a remediation for given image: {}.\n".format(e)) # Finds a platform CPE in the ARF results file and based on it selects # proper package manager and its cleanup command. Applying cleanup command # after fix script will produce smaller images after remediation. In case # a platform CPE is not found in the ARF results file cleanup command is # left empty. pkg_clean_cmd = "" arf_results = os.path.join(results_dir, target_id, "arf.xml") try: tree = ET.parse(arf_results) root = tree.getroot() except FileNotFoundError as e: raise RuntimeError(e) try: ns = "http://checklists.nist.gov/xccdf/1.2" platform_cpe = root.find(".//{%s}TestResult/{%s}platform" % (ns, ns)).attrib['idref'] except AttributeError: pass if "fedora" in platform_cpe: pkg_clean_cmd = "; dnf clean all" elif "redhat" in platform_cpe: try: distro_version = int(re.search(r"\d+", platform_cpe).group(0)) except AttributeError: # In case it is not possible to extract rhel version, use yum. distro_version = 7 if distro_version >= 8: pkg_clean_cmd = "; dnf clean all" else: pkg_clean_cmd = "; yum clean all" elif "debian" in platform_cpe: pkg_clean_cmd = "; apt-get clean; rm -rf /var/lib/apt/lists/*" elif "ubuntu" in platform_cpe: pkg_clean_cmd = "; apt-get clean; rm -rf /var/lib/apt/lists/*" try: dockerfile_path = os.path.join(temp_dir, "Dockerfile") with open(dockerfile_path, "w") as f: f.write("FROM " + target_id + "\n") f.write("COPY fix.sh /\n") # Let's ignore any errors from package cleanup # It may fail if the system has no connectivity # or doesn't have a subscription. f.write("RUN chmod +x /fix.sh; /fix.sh {}; true\n".format( pkg_clean_cmd)) try: build_output_generator = client.build( path=temp_dir, # don't use image cache to ensure that original image # is always remediated nocache=True, # remove intermediate containers spawned during build rm=True) except docker.errors.APIError as e: raise RuntimeError("Docker exception: {}\n".format(e)) build_output = [] for item in build_output_generator: item_dict = json.loads(item.decode("utf-8")) if "error" in item_dict: raise RuntimeError("Error during Docker build {}\n".format( item_dict["error"])) try: sys.stdout.write(item_dict["stream"]) build_output.append(item_dict["stream"]) except KeyError: # Skip empty items of build_output_generator. pass image_id = build_output[-1].split()[-1] print("Successfully built remediated image {} from {}.\n".format( image_id, target_id)) except RuntimeError as e: raise RuntimeError( "Cannot build remediated image from {}: {}\n".format(target_id, e)) finally: shutil.rmtree(temp_dir)
# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import datetime import docker expires = dict(hours=12) client = docker.Client(base_url='unix://var/run/docker.sock', version='auto', timeout=10) containers = client.containers(all=True, filters={'status': ['exited', 'dead']}) for cont in containers: insp = client.inspect_container(cont) blah = insp['State']['FinishedAt'].split(".")[0] if cont["Status"] == "Dead": print(insp['Id']) continue if blah.startswith("00"): # WTF continue dt = datetime.datetime.strptime(blah, "%Y-%m-%dT%H:%M:%S")
# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import os import sys import docker import docker.errors from pycalico.ipam import IPAMClient from pycalico.datastore import (ETCD_AUTHORITY_ENV, ETCD_AUTHORITY_DEFAULT, ETCD_SCHEME_ENV, ETCD_SCHEME_DEFAULT, ETCD_KEY_FILE_ENV, ETCD_CERT_FILE_ENV, ETCD_CA_CERT_FILE_ENV, DataStoreError) from utils import DOCKER_VERSION from utils import print_paragraph from pycalico.util import validate_hostname_port try: client = IPAMClient() except DataStoreError as e: print_paragraph(e.message) sys.exit(1) DOCKER_URL = os.getenv("DOCKER_HOST", "unix://var/run/docker.sock") docker_client = docker.Client(version=DOCKER_VERSION, base_url=DOCKER_URL)
def host_flood(count, tag, name, env_vars, limit, image, network_mode, criteria, rhsm_log_dir): client = docker.Client(version='1.22') # docker.from_env() num = 1 containers = deque() # create our base volume bind binds = {'/dev/log': {'bind': '/dev/log', 'mode': 'rw'}} # allow for local storage of rhsm logs if rhsm_log_dir: rhsm_log_dir = '' if rhsm_log_dir == '.' else rhsm_log_dir if not os.path.isabs(rhsm_log_dir): rhsm_log_dir = os.path.abspath(rhsm_log_dir) if not os.path.isdir(rhsm_log_dir): os.makedirs(rhsm_log_dir) while num < count or containers: if len(containers) < limit and num <= count: # check if queue is full local_file = None if rhsm_log_dir: # create our log bind local_file = '{}/{}{}.log'.format(rhsm_log_dir, name, num) with open(local_file, 'w'): pass binds[local_file] = { 'bind': '/var/log/rhsm/rhsm.log', 'mode': 'rw' } hostname = '{0}{1}'.format(name, num) container = client.create_container( image='{0}:{1}'.format(image, tag), hostname=hostname, detach=False, environment=env_vars, host_config=client.create_host_config(binds=binds), ) # destroy the bind for this host, for the next one if binds.get(local_file or None): del binds[local_file] containers.append({'container': container, 'name': hostname}) client.start(container=container, network_mode=network_mode) logging.info('Created: {0}'.format(hostname)) num += 1 logs = client.logs(containers[0]['container']['Id']) if criteria == 'reg': if 'system has been registered'.encode() in logs: rm_container(client, containers) elif 'no enabled repos'.encode() in logs: rm_container( client, containers, 'No repos enabled. Check registration/subscription status.', ) elif criteria == 'age': if 'Complete!'.encode() in logs: rm_container(client, containers) elif 'no enabled repos'.encode() in logs: rm_container( client, containers, 'No repos enabled. Check registration/subscription status.', ) elif 'No package katello-agent available'.encode() in logs: rm_container(client, containers, 'katello-agent not found.') else: if 'No package katello-agent available'.encode() in logs: rm_container(client, containers, 'katello-agent not found.') elif 'no enabled repos'.encode() in logs: rm_container( client, containers, 'No repos enabled. Check registration/subscription status.', ) elif time.time() - containers[0].get('delay', time.time()) >= criteria: rm_container(client, containers) elif not containers[0].get('delay', False) and 'Complete!'.encode() in logs: containers[0]['delay'] = time.time() elif (client.inspect_container( containers[0]['container']['Id'])['State']['Status'] != u'running'): rm_container(client, containers)
def virt_flood(tag, limit, image, name, env_vars, network_mode, hypervisors, guests): virt_data, guest_list = gen_json(hypervisors, guests) with open('/tmp/temp.json', 'w') as f: json.dump(virt_data, f) client = docker.Client(version='1.22') temphost = 'meeseeks-{}'.format(str(uuid.uuid4())) logging.info( "Submitting virt-who report. Note: this will create a host: '{}'.". format(temphost)) client.pull('jacobcallahan/genvirt') container = client.create_container( image='jacobcallahan/genvirt', hostname=temphost, detach=False, environment=env_vars, volumes='/tmp/temp.json', host_config=client.create_host_config( binds={'/tmp/temp.json': { 'bind': '/tmp/temp.json', 'mode': 'ro' }}), ) client.start(container=container, network_mode=network_mode) while 'Done!'.encode() not in client.logs(container): time.sleep(2) client.remove_container(container, v=True, force=True) os.remove('/tmp/temp.json') if sys.version_info.major < 3: _ = raw_input( "Pausing for you to attach subscriptions to the new hypervisors.") else: _ = input( "Pausing for you to attach subscriptions to the new hypervisors.") logging.info("Starting guest creation.") active_hosts = [] while guest_list or active_hosts: if guest_list and len(active_hosts) < limit: guest = guest_list.pop(0) hostname = '{}{}'.format(name, guest.split('-')[4]) container = client.create_container( image='{0}:{1}'.format(image, tag), hostname=hostname, detach=False, environment=merge_dicts(env_vars, {'UUID': guest}), ) active_hosts.append({'container': container, 'name': hostname}) client.start(container=container, network_mode=network_mode) logging.info('Created Guest: {}. {} left in queue.'.format( hostname, len(guest_list))) logs = client.logs(active_hosts[0]['container']['Id']) # We'll wait for 30 seconds after attempting to auto-attach if 'no enabled repos'.encode() in logs: rm_container(client, active_hosts) elif 'No package katello-agent available'.encode() in logs: rm_container(client, active_hosts) elif time.time() - active_hosts[0].get('delay', time.time()) >= 30: rm_container(client, active_hosts) elif not active_hosts[0].get('delay', False) and 'auto-attach'.encode() in logs: active_hosts[0]['delay'] = time.time() elif (client.inspect_container( active_hosts[0]['container']['Id'])['State']['Status'] != u'running'): rm_container(client, active_hosts)
def list_groups(): hosts = setup() groups = defaultdict(list) hostvars = defaultdict(dict) for host in hosts: ssh_port = host.pop('private_ssh_port', None) default_ip = host.pop('default_ip', None) hostname = host.get('base_url') try: client = docker.Client(**host) containers = client.containers(all=True) except (HTTPError, ConnectionError) as e: write_stderr(e) sys.exit(1) for container in containers: id = container.get('Id') short_id = id[:13] try: name = container.get('Names', list()).pop(0).lstrip('/') except IndexError: name = short_id if not id: continue inspect = client.inspect_container(id) running = inspect.get('State', dict()).get('Running') groups[id].append(name) groups[name].append(name) if not short_id in groups.keys(): groups[short_id].append(name) groups[hostname].append(name) if running is True: groups['running'].append(name) else: groups['stopped'].append(name) try: port = client.port(container, ssh_port)[0] except (IndexError, AttributeError, TypeError): port = dict() try: ip = default_ip if port['HostIp'] == '0.0.0.0' else port[ 'HostIp'] except KeyError: ip = '' container_info = dict( ansible_ssh_host=ip, ansible_ssh_port=port.get('HostPort', int()), docker_args=inspect.get('Args'), docker_config=inspect.get('Config'), docker_created=inspect.get('Created'), docker_driver=inspect.get('Driver'), docker_exec_driver=inspect.get('ExecDriver'), docker_host_config=inspect.get('HostConfig'), docker_hostname_path=inspect.get('HostnamePath'), docker_hosts_path=inspect.get('HostsPath'), docker_id=inspect.get('ID'), docker_image=inspect.get('Image'), docker_name=name, docker_network_settings=inspect.get('NetworkSettings'), docker_path=inspect.get('Path'), docker_resolv_conf_path=inspect.get('ResolvConfPath'), docker_state=inspect.get('State'), docker_volumes=inspect.get('Volumes'), docker_volumes_rw=inspect.get('VolumesRW'), ) hostvars[name].update(container_info) groups['docker_hosts'] = [host.get('base_url') for host in hosts] groups['_meta'] = dict() groups['_meta']['hostvars'] = hostvars print(json.dumps(groups, sort_keys=True, indent=4)) sys.exit(0)
def list_containers(all=True): client = docker.Client() return client.containers(all=all)
def main(args): parser = argparse.ArgumentParser( description=('Start a new Jenkins slave in a docker container. ' 'Output from the slave slave jar is written to stdout and input to the slave is received on stdin. ' 'Messages related to managing the slave container are written to stderr.')) parser.add_argument('--image', help='Docker image to launch to slave in.', required=True, type=decode_arg) parser.add_argument('--name', help='Name of the job. This will become the docker container name.', required=True, type=decode_arg) parser.add_argument('--clean', help=('Always create a new container. ' 'The default is to reuse a previous job container if it exists and ' 'the options are the same.'), action='store_true') parser.add_argument('--env', '-e', help='Environment variable to set in the container.', metavar='NAME=VALUE', action='append', dest='environment', type=env_var, default=[]) parser.add_argument('--volume', '-v', help=('Bind a directory from the host machine into the job container. ' 'Access can be "ro" or "rw". Default is "ro".'), metavar='/host:/container[:access]', action='append', dest='volumes', type=volume, default=[]) options = parser.parse_args(args) install_dir = os.path.dirname(os.path.abspath(__file__)) slave_dir = os.path.join(install_dir, 'slave') with open(os.path.join(slave_dir, 'properties.sh')) as fh: slave_config = env_to_map(fh.readlines()) server_address = slave_config['CONNECT_ADDRESS'] server_port = int(slave_config['CONNECT_PORT']) container_name = encode_container_name(options.name) message( 'Creating slave container for job "{}" (container={})'.format(options.name, container_name)) # TODO override docker url in configuration # TODO use minimum possible API version? docker_client = docker.Client(base_url='unix://var/run/docker.sock', version='1.15') # Pull the image so we have the latest version locally pull_job_image(docker_client, options.image) # Check if container exists or needs to be updated container_info = find_job_container(docker_client, container_name) # Append AWS_ACCOUNT_ID if not any(env.startswith('AWS_ACCOUNT_ID=') for env in options.environment): account_id = subprocess.getoutput( 'aws sts get-caller-identity --query "Account" --output text') options.environment.append("AWS_ACCOUNT_ID={}".format(account_id)) create_container = True create_opts = { 'image': options.image, 'name': container_name, # Include a hash of the init file in the command. The hash is not actually used by the # launch script, but only included to ensure the command changes when the init script # changes. This ensures that an init script change will cause the container to be recreated. 'command': ['/bin/bash', install_dir + '/launch_slave.sh', hash_file(slave_dir + '/init_slave.sh')], 'volumes': [install_dir] + [v['container'] for v in options.volumes], 'environment': options.environment } start_opts = { 'container': None, # container id; set later 'binds': dict( {v['host']: {'bind': v['container'], 'ro': v['ro']} for v in options.volumes}, **{slave_dir: { 'bind': install_dir, 'ro': True } }) } if container_info is None: message('No existing container found. Will create new container for job "{}"'.format( options.name)) else: create_container = options.clean or \ container_changed(docker_client, container_info, create_opts) if create_container: message('Deleting old container {} for job "{}"'.format( container_info['Id'], options.name)) docker_client.remove_container(container_info['Id'], v=True, force=True) else: message('Reusing existing container {} for job "{}"'.format( container_info['Id'], options.name)) start_opts['container'] = container_info['Id'] if create_container: message('Creating container: {}'.format(create_opts)) create_result = docker_client.create_container(**create_opts) start_opts['container'] = create_result['Id'] for warning in create_result.get('Warnings') or []: message('Warning: {}'.format(warning)) else: # Kill the container, if it is currently running docker_client.kill(start_opts['container']) server = create_server(server_address, server_port) message('Starting container: {}'.format(start_opts)) docker_client.start(**start_opts) try: run_server(server) finally: if options.clean: message('Deleting container {} for job "{}"'.format( start_opts['container'], options.name)) docker_client.remove_container(start_opts['container'], v=True, force=True) else: message('Stopping container {} for job "{}"'.format( start_opts['container'], options.name)) docker_client.kill(start_opts['container'])
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.mgb """ from __future__ import print_function import docker import json import logging import os import re import tarfile import tempfile import time try: d = docker.Client(version="1.22") except: base_url = os.getenv('DOCKER_HOST', 'unix://var/run/docker.sock') d = docker.APIClient(base_url=base_url, version="1.22") class ExecException(Exception): def __init__(self, message, output=None): super(ExecException, self).__init__(message) self.output = output class Container(object): """ Object representing a docker test container, it is used in tests """
def docker(self): if self._docker is None: self._docker = docker.Client(base_url=docker_daemon_url) return self._docker
def main(arguments): args = parse_arguments(arguments) setup_logging(args) run(args, docker.Client(version='1.14'))
def setUp(self): self.client = docker.Client(version="1.6") self.client.pull('busybox') self.tmp_imgs = [] self.tmp_containers = []
def stop_container(container_id): print 'stopping container with id {}'.format(container_id) dockerclient = docker.Client() dockerclient.stop(container_id) print 'container stopped'
logging.debug("Start - debug") #logging.info("Start - info") #logging.error("Start - error") #client = docker.from_env(assert_hostname=False) #print (client.version()) # set paths to certs for tls # set base url tls_config = docker.tls.TLSConfig \ (client_cert= \ ('C:\\Users\\trota\\.docker\\machine\\certs\\cert.pem', \ 'C:\\Users\\trota\\.docker\\machine\\certs\\key.pem'), \ verify='C:\\Users\\trota\\.docker\\machine\\certs\\ca.pem' \ ) client = docker.Client(base_url='https://192.168.99.100:2376', tls=tls_config) # print some information about the environment info = client.info() pp = pprint.PrettyPrinter(indent=4) print(" ") print("****************Info********************") print(" ") pp.pprint(info) print(" ") #print(" ") #print("*************Images****************") #print(" ") #pp.pprint(client.images()) #print(" ")
def get_authenticated_cli(userId, registry, registry_creds=[]): global docker_cli_unauth, docker_clis logger.debug( "DOCKER CLI: entering auth cli create/fetch for input user/registry: " + str(userId) + " / " + str(registry)) localconfig = anchore_engine.configuration.localconfig.get_config() if not userId: if not docker_cli_unauth: docker_cli_unauth = docker.Client( base_url=localconfig['docker_conn'], version='auto', timeout=int(localconfig['docker_conn_timeout'])) logger.debug("DOCKER CLI: returning unauth client") return (docker_cli_unauth) if userId in docker_clis and registry in docker_clis[userId]: if 'registry_creds' in docker_clis[userId][ registry] and registry_creds == docker_clis[userId][registry][ 'registry_creds']: logger.debug("DOCKER CLI: found existing authenticated CLI") return (docker_clis[userId][registry]['cli']) else: logger.debug("DOCKER CLI: detected cred change, will refresh CLI") logger.debug("DOCKER CLI: making new auth CLI for user/registry: " + str(userId) + " / " + str(registry)) try: if userId not in docker_clis: docker_clis[userId] = {} if registry not in docker_clis[userId]: docker_clis[userId][registry] = {} user = pw = None for registry_record in registry_creds: if registry_record['registry'] == registry: user, pw = anchore_engine.auth.common.get_docker_registry_userpw( registry_record) if not user or not pw: logger.debug("DOCKER CLI: making unauth CLI") docker_clis[userId][registry]['cli'] = docker.Client( base_url=localconfig['docker_conn'], version='auto', timeout=int(localconfig['docker_conn_timeout'])) docker_clis[userId][registry]['registry_creds'] = [] else: logger.debug("DOCKER CLI: making auth CLI") try: cli = docker.Client(base_url=localconfig['docker_conn'], version='auto', timeout=int( localconfig['docker_conn_timeout'])) rc = cli.login(user, password=pw, registry=registry, reauth=False) docker_clis[userId][registry]['cli'] = cli docker_clis[userId][registry][ 'registry_creds'] = registry_creds except Exception as err: logger.error("DOCKER CLI auth err: " + str(err)) raise err except Exception as err: logger.error("DOCKER CLI: unable to get docker cli - exception: " + str(err)) raise err if userId in docker_clis and registry in docker_clis[userId]: logger.debug("DOCKER CLI: returning auth client") return (docker_clis[userId][registry]['cli']) logger.error( "DOCKER CLI: unable to complete authenticated client create/fetch") raise Exception( "DOCKER CLI: unable to complete authenticated client create/fetch") return (None)
def setUp(self): self.client = docker.Client(base_url='unix://var/run/docker.sock', version='1.12', timeout=10) self.addTypeEqualityFunc(dict, 'assertDictionariesSubset')
def delete_postgres_instance(container_id): c = docker.Client(base_url='unix://var/run/docker.sock', timeout=30, version='auto') c.remove_container(container_id, force=True) return
def docker(): return libdocker.Client(version='auto')
consul=os.environ['CONSULT_ROOT'] if os.environ['DOCKER_TLS_VERIFY']=="1": tls_config = docker.tls.TLSConfig( client_cert=(os.environ['DOCKER_CERT_PATH']+'/cert.pem', os.environ['DOCKER_CERT_PATH']+'/key.pem'), verify=False ) base_url=re.sub("^tcp","https",os.environ['DOCKER_HOST']) else: if "DOCKER_HOST" in os.environ.keys(): base_url=re.sub("^tcp","http",os.environ['DOCKER_HOST']) else: base_url="" client = docker.Client(base_url=base_url, tls=tls_config) previous=[] while True: root="test/"+str(time.time()) kv={} rules={} nb_server={} print "checking" for dock in client.containers(filters={"label":"traefik.backend"}): # traefik.enable=false: disable this container in traefik if "traefik.enable" in dock["Labels"].keys(): if dock["Labels"]["traefik.enable"]=="false":
def __init__(self, config, state_factory=None, network=None, docker_client=None): self.config = config self.state_factory = state_factory or BlockadeStateFactory() self.network = network or BlockadeNetwork(config) self.docker_client = docker_client or docker.Client()
client.images() client.close() del client assert len(w) == 0, \ "No warnings produced: {0}".format(w[0].message) #################### # REGRESSION TESTS # #################### class TestRegressions(unittest.TestCase): def setUp(self): self.client = docker.client.Client(timeout=5) def test_443(self): dfile = io.BytesIO() with self.assertRaises(docker.errors.APIError) as exc: for line in self.client.build(fileobj=dfile, tag="a/b/c"): pass self.assertEqual(exc.exception.response.status_code, 500) dfile.close() if __name__ == '__main__': c = docker.Client(base_url=DEFAULT_BASE_URL) c.pull('busybox') c.close() unittest.main()