def __init__(self): config = self._load_config() self.REDIS_HOST = config['redis_host'] self.PYWB_HOST = config['pywb_host'] self.C_EXPIRE_TIME = config['container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.image_prefix = config['image_prefix'] self.browsers = config['browsers'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False kwargs['version'] = self.VERSION self.cli = Client(**kwargs)
def run(self, ssl_env_name='STUNNEL_SSL'): """ Run a container scan for a variable containing a certificate env dictionary Args: ssl_env_name(string): The string containing the certificate env """ cli = Client(base_url='unix://var/run/docker.sock') for container in cli.containers(): container_details = cli.inspect_container(container.get('Id')) container_envs = container_details.get('Config').get('Env') env_ssl = [env for env in container_envs if ssl_env_name in env] if len(env_ssl) > 0: env_cert = env_ssl[0].split('=', 1)[1] env_json = json.loads(env_cert) raw_ssl = env_json.get('cert') cert = c.load_certificate(c.FILETYPE_PEM, raw_ssl) not_after = cert.get_notAfter() not_after_date = self.get_cert_time(not_after) has_expired = cert.has_expired() signature_algorithm = cert.get_signature_algorithm() self.logger.info("Found stunnel container envs", extra={ 'notAfter': '{}'.format(not_after), 'notAfterDate': '{}'.format(not_after_date), 'hasExpired': '{}'.format(has_expired), 'containerId': '{}'.format(container.get('Id')), 'signatureAlgorithm': '{}'.format(signature_algorithm) })
def docker_client(args): """ Attempts to create a docker client. - args: The arguments parsed on the command line. - returns: a docker-py client """ if _platform == 'linux' or _platform == 'linux2': # linux if "docker_url" in args: return Client( base_url=args.docker_url, timeout=args.timeout, version='auto') else: # TODO: test to see if this does the right thing by default. return Client( version='auto', timeout=args.timeout, **kwargs_from_env()) elif _platform == 'darwin': # OS X - Assume boot2docker, and pull from that environment. kwargs = kwargs_from_env() if len(kwargs) == 0: logging.error('Could not correctly pull in docker environment. ' 'Try running: eval "$(docker-machine env default)"') sys.exit(2) if not args.strict_docker_tls: kwargs['tls'].assert_hostname = False return Client(version='auto', timeout=args.timeout, **kwargs) elif _platform == 'win32' or _platform == 'cygwin': # Windows. logging.fatal("Sorry, windows is not currently supported!") sys.exit(2)
def download_docker_image(docker_image, target_file, cache=None): try: from docker.client import Client from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False docker_cli = Client(**kwargs) image = docker_cli.get_image(docker_image) image_tar = open(target_file,'w') image_tar.write(image.data) image_tar.close() except Exception as e: if cache is not None: cached_file = os.path.join(cache, docker_image.lower().replace('/','-').replace(':','-') + '.tgz') if os.path.isfile(cached_file): print 'using cached version of', docker_image urllib.urlretrieve(cached_file, target_file) return print >> sys.stderr, docker_image, 'not found in cache', cache sys.exit(1) if isinstance(e, KeyError): print >> sys.stderr, 'docker not configured on this machine (or environment variables are not properly set)' else: print >> sys.stderr, docker_image, 'not found on local machine' print >> sys.stderr, 'you must either pull the image, or download it and use the --docker-cache option' sys.exit(1)
def docker_client(args): """ Attempts to create a docker client. - args: The arguments parsed on the command line. - returns: a docker-py client """ if _platform == 'linux' or _platform == 'linux2': # linux if "docker_url" in args: return Client(base_url=args.docker_url) else: # TODO: test to see if this does the right thing by default. return Client(**kwargs_from_env()) elif _platform == 'darwin': # OS X - Assume boot2docker, and pull from that environment. kwargs = kwargs_from_env() if not args.strict_docker_tls: kwargs['tls'].assert_hostname = False return Client(**kwargs) elif _platform == 'win32' or _platform == 'cygwin': # Windows. logging.fatal("Sorry, windows is not currently supported!") sys.exit(2)
def docker_context(): """Make a docker context""" host = os.environ.get('DOCKER_HOST') cert_path = os.environ.get('DOCKER_CERT_PATH') tls_verify = os.environ.get('DOCKER_TLS_VERIFY') options = {"timeout": 60} if host: options['base_url'] = (host.replace('tcp://', 'https://') if tls_verify else host) if tls_verify and cert_path: options['tls'] = docker.tls.TLSConfig( verify = True , ca_cert = os.path.join(cert_path, 'ca.pem') , client_cert = (os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')) , ssl_version = ssl.PROTOCOL_TLSv1 , assert_hostname = False ) client = DockerClient(**options) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: raise BadDockerConnection(base_url=options['base_url'], error=error) return client
def docker_context(): """Make a docker context""" host = os.environ.get('DOCKER_HOST') cert_path = os.environ.get('DOCKER_CERT_PATH') tls_verify = os.environ.get('DOCKER_TLS_VERIFY') options = {"timeout": 60} if host: options['base_url'] = (host.replace('tcp://', 'https://') if tls_verify else host) if tls_verify and cert_path: options['tls'] = docker.tls.TLSConfig( verify=True, ca_cert=os.path.join(cert_path, 'ca.pem'), client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ssl_version=ssl.PROTOCOL_TLSv1, assert_hostname=False) client = DockerClient(**options) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: raise BadDockerConnection(base_url=options['base_url'], error=error) return client
def download_docker_image(docker_image, target_file, cache=None): try: from docker.client import Client from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'] = False docker_cli = Client(**kwargs) image = docker_cli.get_image(docker_image) image_tar = open(target_file,'w') image_tar.write(image.data) image_tar.close() except Exception as e: if cache is not None: cached_file = os.path.join(cache, docker_image.lower().replace('/','-').replace(':','-') + '.tgz') if os.path.isfile(cached_file): print 'using cached version of', docker_image urllib.urlretrieve(cached_file, target_file) return print >> sys.stderr, docker_image, 'not found in cache', cache sys.exit(1) if isinstance(e, KeyError): print >> sys.stderr, 'docker not configured on this machine (or environment variables are not properly set)' else: print >> sys.stderr, docker_image, 'not found on local machine' print >> sys.stderr, 'you must either pull the image, or download it and use the --docker-cache option' sys.exit(1)
def test_update_headers(self): sample_headers = { 'X-Docker-Locale': 'en-US', } def f(self, headers=None): return headers client = Client() client._auth_configs = {} g = update_headers(f) assert g(client, headers=None) is None assert g(client, headers={}) == {} assert g(client, headers={'Content-type': 'application/json'}) == { 'Content-type': 'application/json', } client._auth_configs = { 'HttpHeaders': sample_headers } assert g(client, headers=None) == sample_headers assert g(client, headers={}) == sample_headers assert g(client, headers={'Content-type': 'application/json'}) == { 'Content-type': 'application/json', 'X-Docker-Locale': 'en-US', }
def test_update_headers(self): sample_headers = { 'X-Docker-Locale': 'en-US', } def f(self, headers=None): return headers client = Client() client._auth_configs = {} g = update_headers(f) assert g(client, headers=None) is None assert g(client, headers={}) == {} assert g(client, headers={'Content-type': 'application/json'}) == { 'Content-type': 'application/json', } client._auth_configs = {'HttpHeaders': sample_headers} assert g(client, headers=None) == sample_headers assert g(client, headers={}) == sample_headers assert g(client, headers={'Content-type': 'application/json'}) == { 'Content-type': 'application/json', 'X-Docker-Locale': 'en-US', }
def run(self, ssl_env_name='STUNNEL_SSL'): """ Run a container scan for a variable containing a certificate env dictionary Args: ssl_env_name(string): The string containing the certificate env """ cli = Client(base_url='unix://var/run/docker.sock') for container in cli.containers(): container_details = cli.inspect_container(container.get('Id')) container_envs = container_details.get('Config').get('Env') env_ssl = [ env for env in container_envs if ssl_env_name in env] if len(env_ssl) > 0: env_cert = env_ssl[0].split('=', 1)[1] env_json = json.loads(env_cert) raw_ssl = env_json.get('cert') cert = c.load_certificate(c.FILETYPE_PEM, raw_ssl) not_after = cert.get_notAfter() not_after_date = self.get_cert_time(not_after) has_expired = cert.has_expired() signature_algorithm = cert.get_signature_algorithm() self.logger.info("Found stunnel container envs", extra={'notAfter': '{}'.format(not_after), 'notAfterDate': '{}'.format(not_after_date), 'hasExpired': '{}'.format(has_expired), 'containerId': '{}'.format(container.get('Id')), 'signatureAlgorithm': '{}'.format(signature_algorithm)})
def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = "netcapsule_redis_1" self.REDIS_HOST = os.environ.get("REDIS_HOST", self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get("PYWB_HOST", "netcapsule_pywb_1") self.C_EXPIRE_TIME = config["init_container_expire_secs"] self.Q_EXPIRE_TIME = config["queue_expire_secs"] self.REMOVE_EXP_TIME = config["remove_expired_secs"] self.VERSION = config["api_version"] self.VNC_PORT = config["vnc_port"] self.CMD_PORT = config["cmd_port"] self.MAX_CONT = config["max_containers"] self.image_prefix = config["image_prefix"] self.browser_list = config["browsers"] self.browser_paths = {} for browser in self.browser_list: path = browser["path"] if path in self.browser_paths: raise Exception("Already a browser for path {0}".format(path)) self.browser_paths[path] = browser self.default_browser = config["default_browser"] self.redirect_paths = config["redirect_paths"] self.randompages = [] try: with open(config["random_page_file"]) as fh: self.randompages = list([line.rstrip() for line in fh]) except Exception as e: print(e) self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx("next_client", "1") self.redis.setnx("max_containers", self.MAX_CONT) throttle_samples = config["throttle_samples"] self.redis.setnx("throttle_samples", throttle_samples) throttle_max_avg = config["throttle_max_avg"] self.redis.setnx("throttle_max_avg", throttle_max_avg) self.redis.setnx("container_expire_secs", config["full_container_expire_secs"]) self.T_EXPIRE_TIME = config["throttle_expire_secs"] if os.path.exists("/var/run/docker.sock"): self.cli = Client(base_url="unix://var/run/docker.sock", version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs["version"] = self.VERSION self.cli = Client(**kwargs)
def _init_cli(self): if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.api_version) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.api_version self.cli = Client(**kwargs)
def download_docker_image(docker_image, target_file): from docker.client import Client from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False docker_cli = Client(**kwargs) image = docker_cli.get_image(docker_image) image_tar = open(target_file,'w') image_tar.write(image.data) image_tar.close()
def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = 'netcapsule_redis_1' self.REDIS_HOST = os.environ.get('REDIS_HOST', self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get('PYWB_HOST', 'netcapsule_pywb_1') self.C_EXPIRE_TIME = config['init_container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.MAX_CONT = config['max_containers'] self.image_prefix = config['image_prefix'] self.browser_list = config['browsers'] self.browser_paths = {} for browser in self.browser_list: path = browser['path'] if path in self.browser_paths: raise Exception('Already a browser for path {0}'.format(path)) self.browser_paths[path] = browser self.default_browser = config['default_browser'] self.redirect_paths = config['redirect_paths'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.MAX_CONT) throttle_samples = config['throttle_samples'] self.redis.setnx('throttle_samples', throttle_samples) throttle_max_avg = config['throttle_max_avg'] self.redis.setnx('throttle_max_avg', throttle_max_avg) self.redis.setnx('container_expire_secs', config['full_container_expire_secs']) self.T_EXPIRE_TIME = config['throttle_expire_secs'] if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.VERSION self.cli = Client(**kwargs)
def docker_context(): """Make a docker context""" base_url = None if "DOCKER_HOST" in os.environ: base_url = os.environ["DOCKER_HOST"] client = DockerClient(base_url=base_url, timeout=5) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except requests.exceptions.ConnectionError as error: raise BadDockerConnection(base_url=base_url, error=error) return client
def docker_context(): """Make a docker context""" options = kwargs_from_env(assert_hostname=False) options["version"] = "auto" options["timeout"] = int(os.environ.get("DOCKER_CLIENT_TIMEOUT", 180)) client = DockerClient(**options) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: raise BadDockerConnection(base_url=options.get('base_url'), error=error) return client
def download_docker_image(docker_image, target_file): from docker.client import Client try: # First attempt boot2docker, because it is fail-fast from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False docker_cli = Client(**kwargs) except KeyError as e: # Assume this means we are not using boot2docker docker_cli = Client(base_url='unix://var/run/docker.sock', tls=False) image = docker_cli.get_image(docker_image) image_tar = open(target_file, 'w') image_tar.write(image.data) image_tar.close()
def download_docker_image(docker_image, target_file): from docker.client import Client try: # First attempt boot2docker, because it is fail-fast from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False docker_cli = Client(**kwargs) except KeyError as e: # Assume this means we are not using boot2docker docker_cli = Client(base_url='unix://var/run/docker.sock', tls=False) image = docker_cli.get_image(docker_image) image_tar = open(target_file,'w') image_tar.write(image.data) image_tar.close()
def deploy_instance(docker_api_endpoint, service_name, env_vars, revision, memory, cpu, ports=None): c = DockerClient(docker_api_endpoint) res = c.create_container( image="docker-service-provisioner/%s:v%s" % (service_name, revision), environment=env_vars, # TODO: Implement memory and CPU #mem_limit=memory, ) container_id = res['Id'] c.start(container_id, port_bindings={"%s/tcp" % p: [{'HostIp': '', 'HostPort': ''}] for p in ports}) # Use inspect_container, as c.ports() doesn't seem to work for some reason container = c.inspect_container(container_id) return container['ID'], {p: container['NetworkSettings']['Ports']["%s/tcp" % p][0]['HostPort'] for p in ports}
def handle(self, *args, **kwargs): hosts = Host.objects.filter(use_for_building_images=True).order_by('?') if hosts: c = DockerClient(base_url=hosts[0].docker_api_endpoint) for plugin_dict in pool.get_all_plugin_dicts(): result, log = c.build( tag="docker-service-provisioner/%s:v%s" % (plugin_dict['service'], plugin_dict['version']), path=urljoin(settings.DOCKER_PROVISION_URL, "dockerfile/%s/%s/" % (plugin_dict['service'], plugin_dict['version'])) ) if result: print "Converted", plugin_dict['service'], plugin_dict['version'], 'to', result else: print "Failed converting", plugin_dict['service'], plugin_dict['version'], 'to', result else: raise Exception("No hosts available for building images!")
def main(self, args): # !! TODO needs to implement login if using that containers = [] try: directory = args.metadata_path directory = os.path.expanduser(directory) with open(os.path.join(directory, "containers"), 'r') as f: for line in f: container = ast.literal_eval(line.rstrip("\n")) containers.append(container['container_id'] + "," + container['host']) except: pass host_args = Object() host_args.metadata_path = args.metadata_path host_args.z = True host_a = hosts.hosts.main(host_args) host_c = [] for host in host_a: # !! TODO is using TLS, put in env for each host c = Client(**kwargs_from_env()) #tls_config = docker.tls.TLSConfig(verify=False) #c = docker.Client(base_url='tcp://'+host, version='1.12', # #tls=tls_config, # timeout=2) host_c.append(c.containers()) compare_containers = [] try: for container in host_c[0]: compare_containers.append(container['Id']) except: if not args.z: print "no hosts found" return "" running_containers = [] for item in containers: container_id = item.split(',')[0] if container_id in compare_containers: running_containers.append(item) if not args.z: for container in running_containers: print container return running_containers
def connect(self): url = self.api_cfg.get('url', 'unix://var/run/docker.sock') version = self.api_cfg.get('version', '1.12') boot2docker = self.api_cfg.get('boot2docker') tls_config = self._construct_tlsconfig() if boot2docker is True: kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False client = Client(**kwargs) else: client = Client(base_url=url, version=str(version), tls=tls_config, timeout=50) return client
def _init_docker(): kwargs = kwargs_from_env() if 'tls' in kwargs: # see http://docker-py.readthedocs.org/en/latest/boot2docker/ import requests.packages.urllib3 as urllib3 urllib3.disable_warnings() kwargs['tls'].assert_hostname = False docker = Client(**kwargs) try: docker.version() except: raise UserMessageException("Please set up 'docker' correctly") return docker
def __init__(self, remote, username=None, password=None, email=None): super(DockerPyClient,self).__init__() self.client = Client(base_url=remote, version='1.15') self.log = logging.getLogger(__name__) self.log.debug('password %s, remote = %s, username=%s', password, remote, username) if username: self.client.login(username=username, password=password, email=email)
def test_from_env_with_version(self): os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', DOCKER_CERT_PATH=TEST_CERT_DIR, DOCKER_TLS_VERIFY='1') client = Client.from_env(version='2.32') self.assertEqual(client.base_url, "https://192.168.59.103:2376") self.assertEqual(client._version, '2.32')
def initialize(): """ Initialize the discovery service. This is done automatically when discovery is imported, but it can be used to reinitialize the service. """ global services discovery_mode = os.getenv('DISCOVERY_MODE') docker_host = os.getenv('DOCKER_HOST') if not discovery_mode and docker_host: discovery_mode = 'docker' if not discovery_mode: discovery_mode = 'dns' if discovery_mode == 'docker': if not docker_host: raise ValueError("DOCKER_HOST not set") from docker.client import Client from docker.utils import kwargs_from_env ip = re.match(r'.*?://(.*?):\d+', docker_host).group(1) kwargs = kwargs_from_env(assert_hostname=False) client = Client(**kwargs) services = DockerRegistry(client, ip) elif discovery_mode == 'env': services = EnvironmentRegistry() elif discovery_mode == 'dns': services = DnsRegistry() else: raise ValueError("Unknown DISCOVERY_MODE: {}".format(discovery_mode))
def docker_client(): # MAC OSX SUPPORT # if platform.system() == 'Darwin': # kwargs = kwargs_from_env(assert_hostname=False) # return Client(**kwargs) # else: return Client(version='auto')
def __init__(self, cfg): from docker.client import Client from docker.utils import kwargs_from_env self.config = cfg docker_kwargs = kwargs_from_env() docker_kwargs['tls'].assert_hostname = False self.docker = Client(**docker_kwargs)
def world_of_docker(): """Pulls and starts a container from a random Docker image in the top 100.""" with open('repos.json') as data_file: repos = json.load(data_file) client = Client(**kwargs_from_env()) random_repo = random.choice(repos)['name'] click.echo('Hmmmmmm.... how about %s? Everybody likes %s!' % (random_repo, random_repo)) for line in client.pull(random_repo, stream=True): click.echo(json.loads(line)['status']) click.echo('Now let\'s just start up a container here...') container = client.create_container(image=random_repo) client.start(container=container.get('Id')) container_name = client.inspect_container( container['Id'])['Name'].strip('/') click.echo('Up and running! Enjoy your new %s container, %s' % (random_repo, container_name))
def cli(ctx, url, network): # initialize Client cl = Client(base_url=url, version='auto') # output version to show the connected succeeded v = cl.version() info('Connected to Docker {v[Version]}, api version ' '{v[ApiVersion]}.'.format(v=v)) # find frontend network nets = [n for n in cl.networks(names=[network]) if n['Name'] == network] assert len(nets) < 2 # WTF? if not nets: exit_err("Could not find a network name {!r}".format(network)) ctx.obj = {'cl': cl, 'network_name': network, 'network': nets[0]['Name']}
def get_cli(assert_hostname=True): kwargs = kwargs_from_env() if not assert_hostname: kwargs['tls'].assert_hostname = False cli = Client(**kwargs) return cli
def client(): """ Returns a docker-py client configured using environment variables according to the same logic as the official Docker client. """ kwargs = kwargs_from_env() if 'tls' in kwargs: kwargs['tls'].assert_hostname = False return Client(version='auto', **kwargs)
def test_from_env(self): """Test that environment variables are passed through to utils.kwargs_from_env(). KwargsFromEnvTest tests that environment variables are parsed correctly.""" os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', DOCKER_CERT_PATH=TEST_CERT_DIR, DOCKER_TLS_VERIFY='1') client = Client.from_env() self.assertEqual(client.base_url, "https://192.168.59.103:2376")
def handle(self, *args, **kwargs): hosts = Host.objects.filter(use_for_building_images=True).order_by('?') if hosts: c = DockerClient(base_url=hosts[0].docker_api_endpoint) for plugin_dict in pool.get_all_plugin_dicts(): result, log = c.build( tag="docker-service-provisioner/%s:v%s" % (plugin_dict['service'], plugin_dict['version']), path=urljoin( settings.DOCKER_PROVISION_URL, "dockerfile/%s/%s/" % (plugin_dict['service'], plugin_dict['version']))) if result: print "Converted", plugin_dict['service'], plugin_dict[ 'version'], 'to', result else: print "Failed converting", plugin_dict[ 'service'], plugin_dict['version'], 'to', result else: raise Exception("No hosts available for building images!")
def docker_client(): """Create a Docker client instance or return an existing one.""" global _DOCKER_CLIENT # pylint: disable = global-statement if _DOCKER_CLIENT: return _DOCKER_CLIENT else: # assert_hostname=False is required when using boot2docker, it's taken as a hint from Fig: https://github.com/docker/fig/blob/master/compose/cli/docker_client.py#L29 _DOCKER_CLIENT = Client(**kwargs_from_env(assert_hostname=False)) return _DOCKER_CLIENT
class DockerDaemon(): def __init__(self, db): self.client = Client() self.db = db def get_events(self): events = self.client.events(decode=True) for event in events: if event: if event.get('status') in ['start', 'die']: self.send_container_status(event) def send_container_status(self, event): event['container'] = self.get_container(event.get('id')) key = '%s_%s' % (event.get('id'), event.get('status')) self.db.set(key, event) def get_container(self, id): return self.client.inspect_container(id)
def download(url, filename, cache=None): if cache is not None: basename = os.path.basename(filename) cachename = os.path.join(cache, basename) if os.path.isfile(cachename): print('- using cached version of', basename) shutil.copy(cachename, filename) return if url.startswith("http:") or url.startswith("https"): # [mboldt:20160908] Using urllib.urlretrieve gave an "Access # Denied" page when trying to download docker boshrelease. # I don't know why. requests.get works. Do what works. response = requests.get(url, stream=True) response.raise_for_status() with open(filename, 'wb') as file: for chunk in response.iter_content(chunk_size=1024): if chunk: file.write(chunk) elif url.startswith("docker:"): docker_image = url.lstrip("docker:").lstrip("/").lstrip("/") try: from docker.client import Client from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'] = False docker_cli = Client(**kwargs) image = docker_cli.get_image(docker_image) image_tar = open(filename,'w') image_tar.write(image.data) image_tar.close() except KeyError as e: print('docker not configured on this machine (or environment variables are not properly set)', file=sys.stderr) sys.exit(1) except: print(docker_image, 'not found on local machine', file=sys.stderr) print('you must either pull the image, or download it and use the --cache option', file=sys.stderr) sys.exit(1) else: shutil.copy(url, filename)
def __init__(self, project): self.project = project self.checkout_directory = project.get_checkout_directory() self.artifact_directory = project.get_artifact_directory() self.serve_directory = project.get_serve_directory() self.image_name = self.get_image_name() self.container_name = self.get_container_name() kwargs = kwargs_from_env() if settings.DEBUG and osx: # development helper for boot2docker users kwargs['tls'].assert_hostname = False self.docker = Client(**kwargs) if not os.path.exists(self.checkout_directory): raise NonexistantCheckout( 'No such checkout: %s' % self.checkout_directory ) if not os.path.exists(self.artifact_directory): os.makedirs(self.artifact_directory) if not os.path.exists(self.serve_directory): os.makedirs(self.serve_directory)
def get_client(daemon_client): """Get client. Returns docker client using daemon_client as configuration. :param daemon_client: optional configuration for client creation :raises NonRecoverableError: when docker.errors.APIError during client. :return: docker client """ try: return Client(**daemon_client) except DockerException as e: raise NonRecoverableError( 'Error while getting client: {0}.'.format(str(e)))
def test_kwargs_from_env_tls(self): os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', DOCKER_CERT_PATH=TEST_CERT_DIR, DOCKER_TLS_VERIFY='1') kwargs = kwargs_from_env(assert_hostname=False) self.assertEqual('https://192.168.59.103:2376', kwargs['base_url']) self.assertTrue('ca.pem' in kwargs['tls'].verify) self.assertTrue('cert.pem' in kwargs['tls'].cert[0]) self.assertTrue('key.pem' in kwargs['tls'].cert[1]) self.assertEqual(False, kwargs['tls'].assert_hostname) try: client = Client(**kwargs) self.assertEqual(kwargs['base_url'], client.base_url) self.assertEqual(kwargs['tls'].verify, client.verify) self.assertEqual(kwargs['tls'].cert, client.cert) except TypeError as e: self.fail(e)
def client_from_opts(client_opts=()): """ Construct a docker-py Client from a string specifying options. """ kwargs = CLIENT_DEFAULTS.copy() for name, value in client_opts: if name == "timeout": kwargs["timeout"] = int(value) elif name == "tls": kwargs["tls"] = strtobool(value.lower()) else: kwargs[name] = value return Client(**kwargs)
class Runner: """ This class is in charge of loading test suites and runs them on different environments """ STOP_TIMEOUT = 3 def __init__(self, cfg): from docker.client import Client from docker.utils import kwargs_from_env self.config = cfg docker_kwargs = kwargs_from_env() docker_kwargs['tls'].assert_hostname = False self.docker = Client(**docker_kwargs) def run(self, build, *tests): """ Run all the test suites passed in as parameters on the given build This method will start a container of the build, run the tests and stop it """ from docker.utils import create_host_config print("Running tests on {}".format(build.name)) ports = self.config['environment']['ports'] host = self.config['global'].get('docker_host', os.getenv('DOCKER_HOST').split('/')[-1].split(':')[0]) container = self.docker.create_container( image=build.docker_tag, command='/bin/bash -c "nc -l 8080"', ports=ports, host_config=create_host_config(port_bindings=dict(zip(ports, [None] * len(ports)))) ).get('Id') self.docker.start(container) info = self.docker.inspect_container(container) port_bindings = {port: bind[0]['HostPort'] for port, bind in info['NetworkSettings']['Ports'].items()} for test in tests: test.run(host, port_bindings, build.context) self.docker.stop(container, timeout=self.STOP_TIMEOUT) log_file_path = os.path.join(self.config['global'].get('logs_dir', '/tmp'), '{}.log'.format(build.name)) with open(log_file_path, 'wb') as logs: logs.write(self.docker.logs(container, stdout=True, stderr=True, stream=False)) print("Container logs wrote to {}".format(log_file_path))
def test_kwargs_from_env_tls_verify_false_no_cert(self): temp_dir = tempfile.mkdtemp() cert_dir = os.path.join(temp_dir, '.docker') shutil.copytree(TEST_CERT_DIR, cert_dir) os.environ.update(DOCKER_HOST='tcp://192.168.59.103:2376', HOME=temp_dir, DOCKER_TLS_VERIFY='') os.environ.pop('DOCKER_CERT_PATH', None) kwargs = kwargs_from_env(assert_hostname=True) self.assertEqual('https://192.168.59.103:2376', kwargs['base_url']) self.assertTrue('ca.pem' in kwargs['tls'].ca_cert) self.assertTrue('cert.pem' in kwargs['tls'].cert[0]) self.assertTrue('key.pem' in kwargs['tls'].cert[1]) self.assertEqual(True, kwargs['tls'].assert_hostname) self.assertEqual(False, kwargs['tls'].verify) try: client = Client(**kwargs) self.assertEqual(kwargs['base_url'], client.base_url) self.assertEqual(kwargs['tls'].cert, client.cert) self.assertFalse(kwargs['tls'].verify) except TypeError as e: self.fail(e)
class DisableSocketTest(base.BaseTestCase): class DummySocket(object): def __init__(self, timeout=60): self.timeout = timeout def settimeout(self, timeout): self.timeout = timeout def gettimeout(self): return self.timeout def setUp(self): self.client = Client() def test_disable_socket_timeout(self): """Test that the timeout is disabled on a generic socket object.""" socket = self.DummySocket() self.client._disable_socket_timeout(socket) self.assertEqual(socket.timeout, None) def test_disable_socket_timeout2(self): """Test that the timeouts are disabled on a generic socket object and it's _sock object if present.""" socket = self.DummySocket() socket._sock = self.DummySocket() self.client._disable_socket_timeout(socket) self.assertEqual(socket.timeout, None) self.assertEqual(socket._sock.timeout, None) def test_disable_socket_timout_non_blocking(self): """Test that a non-blocking socket does not get set to blocking.""" socket = self.DummySocket() socket._sock = self.DummySocket(0.0) self.client._disable_socket_timeout(socket) self.assertEqual(socket.timeout, None) self.assertEqual(socket._sock.timeout, 0.0)
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.LOCAL_REDIS_HOST = 'netcapsule_redis_1' self.REDIS_HOST = os.environ.get('REDIS_HOST', self.LOCAL_REDIS_HOST) self.PYWB_HOST = os.environ.get('PYWB_HOST', 'netcapsule_pywb_1') self.C_EXPIRE_TIME = config['init_container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.MAX_CONT = config['max_containers'] self.image_prefix = config['image_prefix'] self.browser_list = config['browsers'] self.browser_paths = {} for browser in self.browser_list: path = browser['path'] if path in self.browser_paths: raise Exception('Already a browser for path {0}'.format(path)) self.browser_paths[path] = browser self.default_browser = config['default_browser'] self.redirect_paths = config['redirect_paths'] self.randompages = [] try: with open(config['random_page_file']) as fh: self.randompages = list([line.rstrip() for line in fh]) except Exception as e: print(e) self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') self.redis.setnx('max_containers', self.MAX_CONT) self.redis.setnx('num_containers', '0') self.redis.setnx('cpu_auto_adjust', 5.5) throttle_samples = config['throttle_samples'] self.redis.setnx('throttle_samples', throttle_samples) throttle_max_avg = config['throttle_max_avg'] self.redis.setnx('throttle_max_avg', throttle_max_avg) self.redis.setnx('container_expire_secs', config['full_container_expire_secs']) self.T_EXPIRE_TIME = config['throttle_expire_secs'] if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env(assert_hostname=False) kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def _get_host_port(self, info, port, default_host): info = info['NetworkSettings']['Ports'][str(port) + '/tcp'] info = info[0] host = info['HostIp'] if host == '0.0.0.0' and default_host: host = default_host return host + ':' + info['HostPort'] def timed_new_container(self, browser, env, host, client_id): start = time.time() info = self.new_container(browser, env, host) end = time.time() dur = end - start time_key = 't:' + client_id self.redis.setex(time_key, self.T_EXPIRE_TIME, dur) throttle_samples = int(self.redis.get('throttle_samples')) print('INIT DUR: ' + str(dur)) self.redis.lpush('init_timings', time_key) self.redis.ltrim('init_timings', 0, throttle_samples - 1) return info def new_container(self, browser_id, env=None, default_host=None): browser = self.browser_paths.get(browser_id) # get default browser if not browser: browser = self.browser_paths.get(self.default_browser) container = self.cli.create_container(image=self.image_prefix + '/' + browser['id'], ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) short_id = None try: id_ = container.get('Id') short_id = id_[:12] res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, volumes_from=['netcapsule_shared_data_1'], network_mode='netcapsule', ) info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] if not ip: ip = info['NetworkSettings']['Networks']['netcapsule']['IPAddress'] #self.redis.hset('all_containers', short_id, ip) self.redis.incr('num_containers') self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return {'vnc_host': self._get_host_port(info, self.VNC_PORT, default_host), 'cmd_host': self._get_host_port(info, self.CMD_PORT, default_host), } except Exception as e: if short_id: self.remove_container(short_id) traceback.print_exc(e) return {} def remove_container(self, short_id, ip=None): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) #self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) if ip: ip_keys = self.redis.keys(ip + ':*') for key in ip_keys: self.redis.delete(key) def remove_expired(self): print('Start Expired Check') while True: try: value = self.redis.blpop('remove_q', 1000) if not value: continue short_id, ip = value[1].split(' ') self.remove_container(short_id, ip) self.redis.decr('num_containers') except Exception as e: traceback.print_exc(e) def check_nodes(self): print('Check Nodes') try: scale = self.redis.get('cpu_auto_adjust') if not scale: return info = self.cli.info() cpus = int(info.get('NCPU', 0)) if cpus <= 1: return total = int(float(scale) * cpus) self.redis.set('max_containers', total) except Exception as e: print(e) def add_new_client(self): client_id = self.redis.incr('clients') enc_id = base64.b64encode(os.urandom(27)) self.redis.setex('cm:' + enc_id, self.Q_EXPIRE_TIME, client_id) self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return enc_id, client_id def am_i_next(self, enc_id): client_id = None if enc_id: self.redis.expire('cm:' + enc_id, self.Q_EXPIRE_TIME) client_id = self.redis.get('cm:' + enc_id) if not client_id: enc_id, client_id = self.add_new_client() client_id = int(client_id) next_client = int(self.redis.get('next_client')) # not next client if client_id != next_client: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: enc_id, client_id = self.add_new_client() diff = client_id - next_client if self.throttle(): self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client #num_containers = self.redis.hlen('all_containers') num_containers = int(self.redis.get('num_containers')) max_containers = self.redis.get('max_containers') max_containers = int(max_containers) if max_containers else self.MAX_CONT if diff <= (max_containers - num_containers): self.redis.incr('next_client') return enc_id, -1 else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return enc_id, client_id - next_client def throttle(self): timings = self.redis.lrange('init_timings', 0, -1) if not timings: return False timings = self.redis.mget(*timings) avg = 0 count = 0 for val in timings: if val is not None: avg += float(val) count += 1 if count == 0: return False avg = avg / count print('AVG: ', avg) throttle_max_avg = float(self.redis.get('throttle_max_avg')) if avg >= throttle_max_avg: print('Throttling, too slow...') return True return False def do_init(self, browser, url, ts, host, client_id): env = {} env['URL'] = url env['TS'] = ts env['SCREEN_WIDTH'] = os.environ.get('SCREEN_WIDTH') env['SCREEN_HEIGHT'] = os.environ.get('SCREEN_HEIGHT') env['REDIS_HOST'] = dc.REDIS_HOST env['PYWB_HOST_PORT'] = dc.PYWB_HOST + ':8080' env['BROWSER'] = browser info = self.timed_new_container(browser, env, host, client_id) info['queue'] = 0 return info def get_randompage(self): if not self.randompages: return '/' url, ts = random.choice(self.randompages).split(' ', 1) print(url, ts) path = random.choice(self.browser_paths.keys()) return '/' + path + '/' + ts + '/' + url
def getImages(self,daemon_address=None): c = Client(daemon_address) result = Bag() for i,image in enumerate(c.images()): result['r_%i' %i] = Bag(image) return result
def __init__(self, db): self.client = Client() self.db = db
#!/usr/bin/env python import dockerpty import os from docker.client import Client from docker.utils import kwargs_from_env client = Client(base_url=os.environ['DOCKER_HOST'], version=os.environ['DOCKER_SERVER_API_VERSION']) container = client.create_container(image="debian:wheezy", stdin_open=True, tty=True, command='echo "passed"') dockerpty.start(client, container)
def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url = base_url) logger.debug("Client init") self.check = None
class Docker: dockerconf = KOOPLEX.get('docker', {}) def __init__(self): base_url = self.dockerconf.get('base_url', '') self.client = Client(base_url = base_url) logger.debug("Client init") self.check = None def list_imagenames(self): logger.debug("Listing image names") pattern_imagenamefilter = KOOPLEX.get('docker', {}).get('pattern_imagename_filter', r'^image-%(\w+):\w$') for image in self.client.images(all = True): if image['RepoTags'] is None: continue for tag in image['RepoTags']: if re.match(pattern_imagenamefilter, tag): _, imagename, _ = re.split(pattern_imagenamefilter, tag) logger.debug("Found image: %s" % imagename) yield imagename def list_volumenames(self): logger.debug("Listing volume names") volumes = self.client.volumes() for volume in volumes['Volumes']: yield volume['Name'] def get_container(self, container): for item in self.client.containers(all = True): # docker API prepends '/' in front of container names if '/' + container.name in item['Names']: logger.debug("Get container %s" % container.name) return item return None def create_container(self, container): volumes = [] # the list of mount points in the container binds = {} # a mapping dictionary of the container mounts for volume in container.volumes: logger.debug("container %s, volume %s" % (container, volume)) mp = volume.mountpoint volumes.append(mp) binds[volume.name] = { 'bind': mp, 'mode': volume.mode(container.user) } logger.debug("container %s binds %s" % (container, binds)) host_config = self.client.create_host_config( binds = binds, privileged = True, mem_limit = '2g', memswap_limit = '170m', mem_swappiness = 0, # oom_kill_disable = True, cpu_shares = 2, ) network = self.dockerconf.get('network', 'host') networking_config = { 'EndpointsConfig': { network: {} } } ports = self.dockerconf.get('container_ports', [ 8000, 9000 ]) imagename = container.image.imagename if container.image else self.dockerconf.get('default_image', 'basic') args = { 'name': container.name, 'image': imagename, 'detach': True, 'hostname': container.name, 'host_config': host_config, 'networking_config': networking_config, 'environment': container.environment, 'volumes': volumes, 'ports': ports, } self.client.create_container(**args) logger.debug("Container created") self.managemount(container) #FIXME: check if not called twice return self.get_container(container) def _writefile(self, container_name, path, filename, content): import tarfile import time from io import BytesIO tarstream = BytesIO() tar = tarfile.TarFile(fileobj = tarstream, mode = 'w') tarinfo = tarfile.TarInfo(name = filename) tarinfo.size = len(content) tarinfo.mtime = time.time() tar.addfile(tarinfo, BytesIO(content)) tar.close() tarstream.seek(0) try: status = self.client.put_archive(container = container_name, path = path, data = tarstream) logger.info("container %s put_archive %s/%s returns %s" % (container_name, path, filename, status)) except Exception as e: logger.error("container %s put_archive %s/%s fails -- %s" % (container_name, path, filename, e)) def managemount(self, container): from kooplex.lib.fs_dirname import Dirname path, filename = os.path.split(self.dockerconf.get('mountconf', '/tmp/mount.conf')) mapper = [] for v in container.volumes: mapper.extend([ "%s:%s" % (v.volumetype, d) for d in Dirname.containervolume_listfolders(container, v) ]) #NOTE: mounter uses read to process the mapper configuration, thus we need to make sure '\n' terminates the config mapper file mapper.append('') logger.debug("container %s map %s" % (container, mapper)) file_data = "\n".join(mapper).encode('utf8') self._writefile(container.name, path, filename, file_data) def trigger_impersonator(self, vcproject): #FIXME: dont call it 1-by-1 from kooplex.lib.fs_dirname import Dirname container_name = self.dockerconf.get('impersonator', 'impersonator') path, filename = os.path.split(self.dockerconf.get('gitcommandconf', '/tmp/gitcommand.conf')) cmdmaps = [] token = vcproject.token fn_clonesh = os.path.join(Dirname.vcpcache(vcproject), "clone.sh") fn_key = os.path.join(Dirname.userhome(vcproject.token.user), '.ssh', token.fn_rsa) cmdmaps.append("%s:%s:%s:%s" % (token.user.username, fn_key, token.repository.domain, fn_clonesh)) cmdmaps.append('') file_data = "\n".join(cmdmaps).encode('utf8') self._writefile(container_name, path, filename, file_data) def run_container(self, container): docker_container_info = self.get_container(container) if docker_container_info is None: logger.debug("Container did not exist, Creating new one") docker_container_info = self.create_container(container) container_state = docker_container_info['Status'] if container_state == 'Created' or container_state.startswith('Exited'): logger.debug("Starting container") self.start_container(container) def refresh_container_state(self, container): docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() container.save() def start_container(self, container): self.client.start(container.name) # we need to retrieve the container state after starting it docker_container_info = self.get_container(container) container_state = docker_container_info['State'] logger.debug("Container state %s" % container_state) container.last_message = str(container_state) container.last_message_at = now() assert container_state == 'running', "Container failed to start: %s" % docker_container_info def stop_container(self, container): try: self.client.stop(container.name) container.last_message = 'Container stopped' except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) def remove_container(self, container): try: self.client.remove_container(container.name) container.last_message = 'Container removed' container.last_message_at = now() except Exception as e: logger.warn("docker container not found by API -- %s" % e) container.last_message = str(e) container.last_message_at = now() logger.debug("Container removed %s" % container.name) #FIXME: az execute2 lesz az igazi... def execute(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) return self.client.exec_start(execution, detach = False) def execute2(self, container, command): logger.info("execution: %s in %s" % (command, container)) execution = self.client.exec_create(container = container.name, cmd = shlex.split(command)) response = self.client.exec_start(exec_id = execution['Id'], stream = False) check = self.client.exec_inspect(exec_id = execution['Id']) self.check = check if check['ExitCode'] != 0: logger.error('Execution %s in %s failed -- %s' % (command, container, check)) return response.decode()
def download(url, filename, cache=None): if cache is not None: basename = os.path.basename(filename) cachename = os.path.join(cache, basename) if os.path.isfile(cachename): print('- using cached version of', basename) shutil.copy(cachename, filename) return # Special url to find a file associated with a github release. # github://cf-platform-eng/meta-buildpack/meta-buildpack.tgz # will find the file named meta-buildpack-0.0.3.tgz in the latest # release for https://github.com/cf-platform-eng/meta-buildpack if url.startswith("github:"): repo_name = url.lstrip("github:").lstrip("/").lstrip("/") file_name = os.path.basename(repo_name) repo_name = os.path.dirname(repo_name) url = "https://api.github.com/repos/" + repo_name + "/releases/latest" response = requests.get(url, stream=True) response.raise_for_status() release = response.json() assets = release.get('assets', []) url = None pattern = re.compile('.*\\.'.join(file_name.rsplit('.', 1))+'\\Z') for asset in assets: if pattern.match(asset['name']) is not None: url = asset['browser_download_url'] break if url is None: print('no matching asset found for repo', repo_name, 'file', file_name, file=sys.stderr) sys.exit(1) # Fallthrough intentional, we now proceed to download the URL we found if url.startswith("http:") or url.startswith("https"): # [mboldt:20160908] Using urllib.urlretrieve gave an "Access # Denied" page when trying to download docker boshrelease. # I don't know why. requests.get works. Do what works. response = requests.get(url, stream=True) response.raise_for_status() with open(filename, 'wb') as file: for chunk in response.iter_content(chunk_size=1024): if chunk: file.write(chunk) elif url.startswith("docker:"): docker_image = url.lstrip("docker:").lstrip("/").lstrip("/") try: from docker.client import Client from docker.utils import kwargs_from_env kwargs = kwargs_from_env() kwargs['tls'] = False docker_cli = Client(**kwargs) image = docker_cli.get_image(docker_image) image_tar = open(filename,'w') image_tar.write(image.data) image_tar.close() except KeyError as e: print('docker not configured on this machine (or environment variables are not properly set)', file=sys.stderr) sys.exit(1) except: print(docker_image, 'not found on local machine', file=sys.stderr) print('you must either pull the image, or download it and use the --cache option', file=sys.stderr) sys.exit(1) elif os.path.isdir(url): shutil.copytree(url, filename) else: shutil.copy(url, filename)
class DockerController(object): def _load_config(self): with open('./config.yaml') as fh: config = yaml.load(fh) return config def __init__(self): config = self._load_config() self.REDIS_HOST = config['redis_host'] self.PYWB_HOST = config['pywb_host'] self.C_EXPIRE_TIME = config['container_expire_secs'] self.Q_EXPIRE_TIME = config['queue_expire_secs'] self.REMOVE_EXP_TIME = config['remove_expired_secs'] self.VERSION = config['api_version'] self.VNC_PORT = config['vnc_port'] self.CMD_PORT = config['cmd_port'] self.image_prefix = config['image_prefix'] self.browsers = config['browsers'] self.redis = redis.StrictRedis(host=self.REDIS_HOST) self.redis.setnx('next_client', '1') if os.path.exists('/var/run/docker.sock'): self.cli = Client(base_url='unix://var/run/docker.sock', version=self.VERSION) else: kwargs = kwargs_from_env() kwargs['tls'].assert_hostname = False kwargs['version'] = self.VERSION self.cli = Client(**kwargs) def new_container(self, browser, env=None): tag = self.browsers.get(browser) # get default browser if not tag: tag = self.browsers[''] container = self.cli.create_container(image=self.image_prefix + '/' + tag, ports=[self.VNC_PORT, self.CMD_PORT], environment=env, ) id_ = container.get('Id') res = self.cli.start(container=id_, port_bindings={self.VNC_PORT: None, self.CMD_PORT: None}, links={self.PYWB_HOST: self.PYWB_HOST, self.REDIS_HOST: self.REDIS_HOST}, volumes_from=['netcapsule_shared_data_1'], ) vnc_port = self.cli.port(id_, self.VNC_PORT) vnc_port = vnc_port[0]['HostPort'] cmd_port = self.cli.port(id_, self.CMD_PORT) cmd_port = cmd_port[0]['HostPort'] info = self.cli.inspect_container(id_) ip = info['NetworkSettings']['IPAddress'] short_id = id_[:12] self.redis.hset('all_containers', short_id, ip) self.redis.setex('c:' + short_id, self.C_EXPIRE_TIME, 1) return vnc_port, cmd_port def remove_container(self, short_id, ip): print('REMOVING ' + short_id) try: self.cli.remove_container(short_id, force=True) except Exception as e: print(e) self.redis.hdel('all_containers', short_id) self.redis.delete('c:' + short_id) ip_keys = self.redis.keys(ip +':*') for key in ip_keys: self.redis.delete(key) def remove_all(self, check_expired=False): all_containers = self.redis.hgetall('all_containers') for short_id, ip in all_containers.iteritems(): if check_expired: remove = not self.redis.get('c:' + short_id) else: remove = True if remove: self.remove_container(short_id, ip) def add_new_client(self): #client_id = base64.b64encode(os.urandom(27)) #self.redis.rpush('q:clients', client_id) client_id = self.redis.incr('clients') self.redis.setex('q:' + str(client_id), self.Q_EXPIRE_TIME, 1) return client_id def am_i_next(self, client_id): next_client = int(self.redis.get('next_client')) # not next client if next_client != client_id: # if this client expired, delete it from queue if not self.redis.get('q:' + str(next_client)): print('skipping expired', next_client) self.redis.incr('next_client') # missed your number somehow, get a new one! if client_id < next_client: client_id = self.add_new_client() else: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return client_id, client_id - next_client # not avail yet num_containers = self.redis.hlen('all_containers') if num_containers >= MAX_CONT: self.redis.expire('q:' + str(client_id), self.Q_EXPIRE_TIME) return client_id, client_id - next_client self.redis.incr('next_client') return client_id, -1
def download(url, filename, cache=None): if cache is not None: basename = os.path.basename(filename) cachename = os.path.join(cache, basename) if os.path.isfile(cachename): print('- using cached version of', basename) shutil.copy(cachename, filename) return # Special url to find a file associated with a github release. # github://cf-platform-eng/meta-buildpack/meta-buildpack.tgz # will find the file named meta-buildpack-0.0.3.tgz in the latest # release for https://github.com/cf-platform-eng/meta-buildpack if url.startswith("github:"): repo_name = url.replace('github:', '', 1).lstrip("/") file_name = os.path.basename(repo_name) repo_name = os.path.dirname(repo_name) url = "https://api.github.com/repos/" + repo_name + "/releases/latest" response = requests.get(url, stream=True) response.raise_for_status() release = response.json() assets = release.get('assets', []) url = None pattern = re.compile('.*\\.'.join(file_name.rsplit('.', 1)) + '\\Z') for asset in assets: if pattern.match(asset['name']) is not None: url = asset['browser_download_url'] break if url is None: print('no matching asset found for repo', repo_name, 'file', file_name, file=sys.stderr) sys.exit(1) # Fallthrough intentional, we now proceed to download the URL we found if url.startswith("http:") or url.startswith("https"): # [mboldt:20160908] Using urllib.urlretrieve gave an "Access # Denied" page when trying to download docker boshrelease. # I don't know why. requests.get works. Do what works. response = requests.get(url, stream=True) response.raise_for_status() with open(filename, 'wb') as file: for chunk in response.iter_content(chunk_size=1024): if chunk: file.write(chunk) elif url.startswith("docker:"): docker_image = url.replace('docker:', '', 1) try: from docker.client import Client docker_cli = Client.from_env() docker_cli.pull(docker_image) image = docker_cli.get_image(docker_image) image_tar = open(filename, 'w') image_tar.write(image.data) image_tar.close() except KeyError as e: print( 'docker not configured on this machine (or environment variables are not properly set)', file=sys.stderr) sys.exit(1) except Exception as e: print(e) print(docker_image, 'not found on local machine', file=sys.stderr) print( 'you must either pull the image, or download it and use the --cache option', file=sys.stderr) sys.exit(1) elif os.path.isdir(url): shutil.copytree(url, filename) else: shutil.copy(url, filename) if cache: if os.path.isdir(filename): basename = os.path.basename(filename) cachedir = os.path.join(cache, basename) if os.path.exists(cachedir): shutil.rmtree(cachedir) shutil.copytree(filename, os.path.join(cache, basename)) elif os.path.isfile(filename): shutil.copy(filename, cache) else: print(filename, 'is not a file or directory. Cannot cache.', file=sys.stderr)
def get_docker_client(): """ Try to fire up boot2docker and set any environmental variables """ # For Mac try: # Get boot2docker info (will fail if not Mac) process = ['boot2docker', 'info'] p = subprocess.Popen(process, stdout=PIPE) boot2docker_info = json.loads(p.communicate()[0]) # Defaults docker_host = '' docker_cert_path = '' docker_tls_verify = '' # Start the boot2docker VM if it is not already running if boot2docker_info['State'] != "running": print('Starting Boot2Docker VM:') # Start up the Docker VM process = ['boot2docker', 'start'] subprocess.call(process) if ('DOCKER_HOST' not in os.environ) or ('DOCKER_CERT_PATH' not in os.environ) or ('DOCKER_TLS_VERIFY' not in os.environ): # Get environmental variable values process = ['boot2docker', 'shellinit'] p = subprocess.Popen(process, stdout=PIPE) boot2docker_envs = p.communicate()[0].split() for env in boot2docker_envs: if 'DOCKER_HOST' in env: docker_host = env.split('=')[1] elif 'DOCKER_CERT_PATH' in env: docker_cert_path = env.split('=')[1] elif 'DOCKER_TLS_VERIFY' in env: docker_tls_verify = env.split('=')[1] # Set environmental variables os.environ['DOCKER_TLS_VERIFY'] = docker_tls_verify os.environ['DOCKER_HOST'] = docker_host os.environ['DOCKER_CERT_PATH'] = docker_cert_path else: # Handle case when boot2docker is already running docker_host = os.environ['DOCKER_HOST'].split('=')[1] # Get the arguments form the environment client_kwargs = kwargs_from_env(assert_hostname=False) client_kwargs['version'] = MINIMUM_API_VERSION # Find the right version of the API by creating a DockerClient with the minimum working version # Then test to see if the Docker is running a later version than the minimum # See: https://github.com/docker/docker-py/issues/439 version_client = DockerClient(**client_kwargs) client_kwargs['version'] = get_api_version(MAX_CLIENT_DOCKER_API_VERSION, version_client.version()['ApiVersion']) # Create Real Docker client docker_client = DockerClient(**client_kwargs) # Derive the host address only from string formatted: "tcp://<host>:<port>" docker_client.host = docker_host.split(':')[1].strip('//') return docker_client # For Linux except OSError: # Find the right version of the API by creating a DockerClient with the minimum working version # Then test to see if the Docker is running a later version than the minimum # See: https://github.com/docker/docker-py/issues/439 version_client = DockerClient(base_url='unix://var/run/docker.sock', version=MINIMUM_API_VERSION) version = get_api_version(MAX_CLIENT_DOCKER_API_VERSION, version_client.version()['ApiVersion']) docker_client = DockerClient(base_url='unix://var/run/docker.sock', version=version) docker_client.host = DEFAULT_DOCKER_HOST return docker_client except: raise