def download_image(self, image_name: str) -> bool: """Downloads (pulls) the named image using the configured container runtime.""" result = True absolute_image_name = self.get_absolute_image_name(image_name) t0 = time.time() if self.container_runtime == KernelImagePuller.DOCKER_CLIENT: try: DockerClient.from_env().images.pull(absolute_image_name) except NotFound: result = False elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT: argv = [ "crictl", "-r", self.runtime_endpoint, "pull", absolute_image_name ] result = self.execute_cmd(argv) else: # invalid container runtime logger.error( f"Invalid container runtime detected: '{self.container_runtime}'!" ) result = False t1 = time.time() if result is True: logger.info( f"Pulled image '{image_name}' in {(t1 - t0):.3f} secs.") return result
def image_exists(self, image_name: str) -> bool: """Checks for the existence of the named image using the configured container runtime.""" result = True absolute_image_name = self.get_absolute_image_name(image_name) t0 = time.time() if self.container_runtime == KernelImagePuller.DOCKER_CLIENT: try: DockerClient.from_env().images.get(absolute_image_name) except NotFound: result = False elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT: argv = [ "crictl", "-r", self.runtime_endpoint, "inspecti", "-q", absolute_image_name ] result = self.execute_cmd(argv) else: # invalid container runtime logger.error( f"Invalid container runtime detected: '{self.container_runtime}'!" ) result = False t1 = time.time() logger.debug( f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}" ) return result
def image_exists(image_name: str) -> bool: """Checks for the existence of the named image using the configured container runtime.""" result = True absolute_image_name = get_absolute_image_name(image_name) t0 = time.time() if container_runtime == DOCKER_CLIENT: try: DockerClient.from_env().images.get(absolute_image_name) except NotFound: result = False elif container_runtime == CONTAINERD_CLIENT: argv = [ 'crictl', '-r', runtime_endpoint, 'inspecti', '-q', absolute_image_name ] result = execute_cmd(argv) else: # invalid container runtime logger.error( f"Invalid container runtime detected: '{container_runtime}'!") result = False t1 = time.time() logger.debug( f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}" ) return result
def __init__(self, docker='unix:///var/run/docker.sock', domain='docker'): from docker.client import DockerClient self._docker = DockerClient(docker, version='auto') self._domain = domain self._storage = Storage() self._lock = threading.Lock() threading.Thread(group=None, target=self.listen).start()
def __login_docker_registry(client: DockerClient): try: # login registry_url = config.REGISTRY_URL username = config.REGISTRY_USER password = config.REGISTRY_PW log.debug("logging into docker registry %s", registry_url) client.login(registry=registry_url + '/v2', username=username, password=password) log.debug("logged into docker registry %s", registry_url) except docker.errors.APIError as err: log.exception("Error while loggin into the registry") raise exceptions.RegistryConnectionError( "Error while logging to docker registry", err) from err
def installer(self, config): """Returns an initialized partially mocked SwarmInstaller.""" return SwarmInstaller( config, "docker-swarm", docker_client=DockerClient() )
def test_set_properties_with_self_update(hello_world_image): client = DockerClient.from_env() container_dict = dict( labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3", }, ports=[(4567, "tcp"), (9876, "tcp")], ) container = client.api.create_container("hello-world:latest", **container_dict) new = set_properties(client.containers.get(container.get("Id")), hello_world_image, self_update=True) assert new.get("labels", dict()).get("test") == "12345" assert new.get( "labels", dict()).get("dockupdater.updater_port") == "4567,tcp:9876,tcp" assert not new.get("ports") container_dict["labels"] = new.get("labels") del container_dict["ports"] container2 = client.api.create_container("hello-world:latest", **container_dict) new2 = set_properties(client.containers.get(container2.get("Id")), hello_world_image, self_update=True) assert new2.get("labels").get("test") == "12345" assert new2.get("labels").get("dockupdater.updater_port") is None assert all([(a, b) for a, b in new2.get("ports") if a in [4567, 9876]])
async def postgres(loop): tag = 'latest' image = 'postgres' host = '127.0.0.1' timeout = 60 unused_tcp_port = get_free_port() client = DockerClient(version='auto', **kwargs_from_env()) client.images.pull(image, tag=tag) print('Stating %s:%s on %s:%s' % (image, tag, host, unused_tcp_port)) cont = client.containers.run('%s:%s' % (image, tag), detach=True, ports={'5432/tcp': ('0.0.0.0', unused_tcp_port)}) try: start_time = time.time() conn = None while conn is None: if start_time + timeout < time.time(): raise Exception("Initialization timeout, failed to " "initialize postgresql container") try: conn = await asyncpg.connect( 'postgresql://postgres@%s:%s/postgres' '' % (host, unused_tcp_port), loop=loop) except Exception as e: time.sleep(.1) await conn.close() yield (host, unused_tcp_port) finally: cont.kill() cont.remove()
def docker_context(): """Make a docker context""" host = os.environ.get('DOCKER_HOST') cert_path = os.environ.get('DOCKER_CERT_PATH') tls_verify = os.environ.get('DOCKER_TLS_VERIFY') options = {"timeout": 60} if host: options['base_url'] = (host.replace('tcp://', 'https://') if tls_verify else host) if tls_verify and cert_path: options['tls'] = docker.tls.TLSConfig( verify=True, ca_cert=os.path.join(cert_path, 'ca.pem'), client_cert=(os.path.join(cert_path, 'cert.pem'), os.path.join(cert_path, 'key.pem')), ssl_version=ssl.PROTOCOL_TLSv1, assert_hostname=False) client = DockerClient(**options) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: raise BadDockerConnection(base_url=options['base_url'], error=error) return client
def __init__(self): """Initialize Docker base wrapper.""" self.docker: DockerClient = DockerClient( base_url=f"unix:/{str(SOCKET_DOCKER)}", version="auto", timeout=900 ) self.network: DockerNetwork = DockerNetwork(self.docker) self._info: DockerInfo = DockerInfo.new(self.docker.info()) self.config: DockerConfig = DockerConfig()
def docker_client(version='auto', base_url=None, tls=False, **kwargs): kwargs = kwargs_from_env(**kwargs) kwargs['version'] = version kwargs['base_url'] = base_url if tls: cert, _ = AUTH_CONFIG kwargs['tls'] = TLSConfig(client_cert=cert, verify=False) return DockerClient(**kwargs)
def launch_docker_kernel(kernel_id, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters... param_env = dict() param_env['EG_RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop('PATH') # Let the image PATH be used. Since this is relative to images, we're probably safe. client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') kernel_service = client.services.create(image_name, name=container_name, endpoint_spec=endpoint_spec, restart_policy=restart_policy, env=param_env, container_labels=labels, labels=labels, #mounts=mounts, # Enable if necessary networks=networks) else: volumes = {'/usr/local/share/jupyter/kernels': {'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro'}} kernel_container = client.containers.run(image_name, name=container_name, hostname=container_name, environment=param_env, labels=labels, remove=remove_container, network=docker_network, #volumes=volumes, # Enable if necessary detach=True)
def client(self) -> DockerClient: """Get DockerClient""" try: client = None if self.local: client = DockerClient.from_env() else: client = DockerClient( base_url=self.url, tls=DockerInlineTLS( verification_kp=self.tls_verification, authentication_kp=self.tls_authentication, ).write(), ) client.containers.list() except DockerException as exc: LOGGER.warning(exc) raise ServiceConnectionInvalid from exc return client
def pull_image(img_name: str, registry_details: dict) -> None: """Pull the docker images locally and remove the registry info from the tag""" print(f"Pulling image from '{registry_details['registry']}'") client = DockerClient.from_env() client.login(**registry_details) img = client.images.pull(f"{registry_details['registry']}/{img_name}") if isinstance(img, Image): img.tag(img_name) else: img[0].tag(img_name)
def setup_container(): """Create and prepare a docker container and let Fabric point at it""" from docker.client import DockerClient image = 'centos:centos7' container_name = 'APP_installation_target' cli = DockerClient.from_env(version='auto', timeout=10) # Create and start a container using the newly created stage1 image cont = cli.containers.run(image=image, name=container_name, remove=False, detach=True, tty=True) success("Created container %s from %s" % (container_name, image)) # Find out container IP, prepare container for APP installation try: host_ip = cli.api.inspect_container(cont.id)['NetworkSettings']['IPAddress'] info("Updating and installing OpenSSH server in container") cont.exec_run('yum -y update') cont.exec_run('yum -y install openssh-server sudo') cont.exec_run('yum clean all') info('Configuring OpenSSH to allow connections to container') add_public_ssh_key(cont) cont.exec_run('sed -i "s/#PermitRootLogin yes/PermitRootLogin yes/" /etc/ssh/sshd_config') cont.exec_run('sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config') cont.exec_run('ssh-keygen -A') cont.exec_run('chown root.root /root/.ssh/authorized_keys') cont.exec_run('chmod 600 /root/.ssh/authorized_keys') cont.exec_run('chmod 700 /root/.ssh') info('Starting OpenSSH deamon in container') cont.exec_run('/usr/sbin/sshd -D', detach=True) except: failure("Error while preparing container for APP installation, cleaning up...") cont.stop() cont.remove() raise # From now on we connect to root@host_ip using our SSH key env.hosts = host_ip env.user = '******' if 'key_filename' not in env and 'key' not in env: env.key_filename = os.path.expanduser("~/.ssh/id_rsa") # Make sure we can connect via SSH to the newly started container # We disable the known hosts check since docker containers created at # different times might end up having the same IP assigned to them, and the # ssh known hosts check will fail with settings(disable_known_hosts=True): execute(check_ssh) success('Container successfully setup! APP installation will start now') return DockerContainerState(cli, cont)
def hello_world_container(hello_world_image): global HELLO_WORLD_CONTAINER if not HELLO_WORLD_CONTAINER: client = DockerClient.from_env() HELLO_WORLD_CONTAINER = client.containers.create( hello_world_image, labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3" }) return HELLO_WORLD_CONTAINER
def deploy_instance(docker_api_endpoint, service_name, env_vars, revision, memory, cpu, ports=None): c = DockerClient(docker_api_endpoint) res = c.create_container( image="docker-service-provisioner/%s:v%s" % (service_name, revision), environment=env_vars, # TODO: Implement memory and CPU #mem_limit=memory, ) container_id = res['Id'] c.start(container_id, port_bindings={"%s/tcp" % p: [{'HostIp': '', 'HostPort': ''}] for p in ports}) # Use inspect_container, as c.ports() doesn't seem to work for some reason container = c.inspect_container(container_id) return container['ID'], {p: container['NetworkSettings']['Ports']["%s/tcp" % p][0]['HostPort'] for p in ports}
def docker_context(): """Make a docker context""" options = kwargs_from_env(assert_hostname=False) options["version"] = "auto" options["timeout"] = int(os.environ.get("DOCKER_CLIENT_TIMEOUT", 180)) client = DockerClient(**options) try: info = client.info() log.info("Connected to docker daemon\tdriver=%s\tkernel=%s", info["Driver"], info["KernelVersion"]) except (requests.exceptions.ConnectionError, requests.exceptions.Timeout) as error: raise BadDockerConnection(base_url=options.get('base_url'), error=error) return client
def hello_world_container_with_port(): global HELLO_WORLD_CONTAINER_WITH_PORT if not HELLO_WORLD_CONTAINER_WITH_PORT: client = DockerClient.from_env() container = client.api.create_container("hello-world:latest", labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3" }, ports=[(4567, "tcp"), (9876, "tcp")]) HELLO_WORLD_CONTAINER_WITH_PORT = client.containers.get( container.get("Id")) return HELLO_WORLD_CONTAINER_WITH_PORT
def handle(self, *args, **kwargs): hosts = Host.objects.filter(use_for_building_images=True).order_by('?') if hosts: c = DockerClient(base_url=hosts[0].docker_api_endpoint) for plugin_dict in pool.get_all_plugin_dicts(): result, log = c.build( tag="docker-service-provisioner/%s:v%s" % (plugin_dict['service'], plugin_dict['version']), path=urljoin( settings.DOCKER_PROVISION_URL, "dockerfile/%s/%s/" % (plugin_dict['service'], plugin_dict['version']))) if result: print "Converted", plugin_dict['service'], plugin_dict[ 'version'], 'to', result else: print "Failed converting", plugin_dict[ 'service'], plugin_dict['version'], 'to', result else: raise Exception("No hosts available for building images!")
def _create_docker_pg_db( image, version="latest", user="******", dbname="postgres", host="127.0.0.1", init_timeout=10, ): # searching free port sock = socket.socket() sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() client = DockerClient(version="auto", **kwargs_from_env()) cont = client.containers.run( f"{image}:{version}", detach=True, ports={"5432/tcp": (host, port)}, environment={"POSTGRES_HOST_AUTH_METHOD": "trust"}, ) try: start_time = time.time() conn = None while conn is None: if start_time + init_timeout < time.time(): raise Exception( "Initialization timeout, failed to " "initialize postgresql container" ) try: conn = psycopg2.connect( f"dbname={dbname} user={user} " f"host={host} port={port}" ) except psycopg2.OperationalError: time.sleep(0.10) conn.close() yield (host, port), cont finally: cont.kill() cont.remove()
def testInstallInDockerWithoutGUI(base_fixture): """ Start an installation with the installer running without the updater ui. """ client = DockerClient() docker_base_cmd = f"docker build {str(base_fixture.base_path)} -t {WAQD_IMAGE} -f ./test/testdata/auto_updater/dockerfile_install" if platform.system() == "Linux": docker_base_cmd = docker_base_cmd + " | tee install.log" ret = os.system(docker_base_cmd) assert ret == 0 cont = client.containers.create(WAQD_IMAGE, name="waqd-install-test", stdin_open=True, auto_remove=True) cont.start() # check if pipx installed res = cont.exec_run("python3 -m pipx --version", user="******") assert res.exit_code == 0 # check if pyqt-5 is installed res = cont.exec_run("qtchooser -l", user="******") assert b"qt5" in res.output # check if waqd is installed res = cont.exec_run("/home/pi/.local/bin/waqd.{VERSION} --version") assert VERSION in res.output.decode("utf-8") # get waqd-start executable # check if it was set for autostart # check system setup # autostart #arch = cont.get_archive("/home/pi/.config/lxsession/LXDE-pi/autostart") # ... TODO #cont.attach() cont.stop() client.images.prune()
def _create_postgres_like_container(request): host = "127.0.0.1" timeout = 600 # getting free port sock = socket.socket() sock.bind(("", 0)) port = sock.getsockname()[1] sock.close() image, tag, user, dbname = request.param client = DockerClient(version="auto", **kwargs_from_env()) client.images.pull(image, tag=tag) cont = client.containers.run( f"{image}:{tag}", detach=True, ports={"5432/tcp": (host, port)}, environment={"POSTGRES_HOST_AUTH_METHOD": "trust"}, ) try: start_time = time.time() conn = None while conn is None: if start_time + timeout < time.time(): raise Exception(f"Initialization timeout, failed to initialize" f" {image} container") try: conn = psycopg2.connect(f"dbname={dbname} user={user} " f"host={host} port={port}") except psycopg2.OperationalError: time.sleep(0.10) conn.close() yield host, port, user, dbname, image, tag finally: cont.kill() cont.remove()
async def rabbit(loop, rabbit_override_addr): if rabbit_override_addr: yield rabbit_override_addr.split(':') return tag = '3.7.1' image = 'rabbitmq:{}'.format(tag) host = '0.0.0.0' timeout = 60 unused_tcp_port = get_free_port() client = DockerClient(version='auto', **kwargs_from_env()) print('Stating rabbitmq %s on %s:%s' % (image, host, unused_tcp_port)) cont = client.containers.run(image, detach=True, ports={'5672/tcp': ('0.0.0.0', unused_tcp_port)}) try: start_time = time.time() conn = transport = None while conn is None: if start_time + timeout < time.time(): raise Exception("Initialization timeout, failed t o " "initialize rabbitmq container") try: transport, conn = await aioamqp.connect(host, unused_tcp_port, loop=loop) except Exception: time.sleep(.1) await conn.close() transport.close() yield (host, unused_tcp_port) finally: cont.kill() cont.remove()
from docker.client import DockerClient from docker.errors import NotFound, APIError gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888") num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2")) num_retries = int(os.getenv("KIP_NUM_RETRIES", "3")) interval = int(os.getenv("KIP_INTERVAL", "300")) log_level = os.getenv("KIP_LOG_LEVEL", "INFO") POLICY_IF_NOT_PRESENT = "IfNotPresent" POLICY_ALYWAYS = "Always" policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS) policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT) docker_client = DockerClient.from_env() logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s') def get_kernelspecs(): """Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs""" end_point = '{}/api/kernelspecs'.format(gateway_host) logger.info("Fetching kernelspecs from '{}' ...".format(end_point)) resp = requests.get(end_point) if not resp.ok: raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code)) return resp.json() def fetch_image_names():
def __init__(self): self.sclient = DockerClient.from_env() # sync self._aclient: Optional[aiodocker.Docker] = None # async
gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888") num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2")) num_retries = int(os.getenv("KIP_NUM_RETRIES", "3")) interval = int(os.getenv("KIP_INTERVAL", "300")) log_level = os.getenv("KIP_LOG_LEVEL", "INFO") POLICY_IF_NOT_PRESENT = "IfNotPresent" POLICY_ALYWAYS = "Always" policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS) policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT) #docker_client = DockerClient.from_env() docker_client = DockerClient(base_url='unix://var/run/docker.sock') logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s') def get_kernelspecs(): """Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs""" end_point = '{}/api/kernelspecs'.format(gateway_host) logger.info("Fetching kernelspecs from '{}' ...".format(end_point)) resp = requests.get(end_point) if not resp.ok: raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code)) return resp.json() def fetch_image_names():
def launch_docker_kernel(connection_file, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID kernel_id = os.environ['KERNEL_ID'] container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters - including the parameters to the actual kernel launcher in the image... param_env = dict() # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive # value since this is used to locate the kernel launch script within the image. param_env['KERNEL_NAME'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) param_env['EG_RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_CONNECTION_FILENAME'] = connection_file param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop('PATH') # Let the image PATH be used. Since this is relative to images, we're probably safe. client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') kernel_service = client.services.create(image_name, command='/etc/bootstrap-kernel.sh', name=container_name, endpoint_spec=endpoint_spec, restart_policy=restart_policy, env=param_env, container_labels=labels, labels=labels, #mounts=mounts, # Enable if necessary networks=networks) else: volumes = {'/usr/local/share/jupyter/kernels': {'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro'}} kernel_container = client.containers.run(image_name, command='/etc/bootstrap-kernel.sh', name=container_name, hostname=container_name, environment=param_env, labels=labels, remove=remove_container, network=docker_network, #volumes=volumes, # Enable if necessary detach=True)
def get_logs(container: DockerClient) -> str: logs = container.logs() return logs.decode("utf-8")
def launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit( "ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!" ) # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get( 'DOCKER_NETWORK', os.environ.get('EG_DOCKER_NETWORK', 'bridge')) # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters... param_env = dict() param_env['PORT_RANGE'] = port_range param_env['PUBLIC_KEY'] = public_key param_env['RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop( 'PATH' ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get('KERNEL_UID') group = param_env.get('KERNEL_GID') # setup common args kwargs = dict() kwargs['name'] = container_name kwargs['hostname'] = container_name kwargs['user'] = user kwargs['labels'] = labels client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append( "/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro" ) endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') # finish args setup kwargs['env'] = param_env kwargs['endpoint_spec'] = endpoint_spec kwargs['restart_policy'] = restart_policy kwargs['container_labels'] = labels kwargs['networks'] = networks kwargs['groups'] = [group, '100'] if param_env.get('KERNEL_WORKING_DIR'): kwargs['workdir'] = param_env.get('KERNEL_WORKING_DIR') # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug kernel_service = client.services.create(image_name, **kwargs) else: volumes = { '/usr/local/share/jupyter/kernels': { 'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro' } } # finish args setup kwargs['environment'] = param_env kwargs['remove'] = remove_container kwargs['network'] = docker_network kwargs['group_add'] = [group, '100'] kwargs['detach'] = True if param_env.get('KERNEL_WORKING_DIR'): kwargs['working_dir'] = param_env.get('KERNEL_WORKING_DIR') # kwargs['volumes'] = volumes # Enable if necessary # print("container args: {}".format(kwargs)) # useful for debug kernel_container = client.containers.run(image_name, **kwargs)
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Kernel managers that operate against a remote process.""" import os import logging from docker.client import DockerClient from docker.errors import NotFound from .container import ContainerProcessProxy # Debug logging level of docker produces too much noise - raise to info by default. logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('EG_DOCKER_LOG_LEVEL', logging.WARNING)) docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') client = DockerClient.from_env() class DockerSwarmProcessProxy(ContainerProcessProxy): def __init__(self, kernel_manager, proxy_config): super(DockerSwarmProcessProxy, self).__init__(kernel_manager, proxy_config) def launch_process(self, kernel_cmd, **kw): # Convey the network to the docker launch script kw['env']['EG_DOCKER_NETWORK'] = docker_network kw['env']['EG_DOCKER_MODE'] = 'swarm' return super(DockerSwarmProcessProxy, self).launch_process(kernel_cmd, **kw) def get_initial_states(self): return {'preparing', 'starting', 'running'}
class Resolver(Middleware): def __init__(self, docker='unix:///var/run/docker.sock', domain='docker'): from docker.client import DockerClient self._docker = DockerClient(docker, version='auto') self._domain = domain self._storage = Storage() self._lock = threading.Lock() threading.Thread(group=None, target=self.listen).start() def listen(self): self.running = True events = self._docker.events() for container in self._docker.containers.list(): for rec in self._inspect(container): self._storage.append(rec.name, rec.addrs) for raw in events: if not self.running: break evt = json.loads(raw) if evt.get('Type', 'container') != 'container': continue cid = evt.get('id') if cid is None: continue status = evt.get('status') if status in {'start', 'die'}: container = self._docker.containers.get(cid) for rec in self._inspect(container): if status == 'start': self._storage.append(rec.name, rec.addrs) else: self._storage.remove(rec.name) def _inspect(self, container): name = get(container.attrs, 'Name') if not container.name: return None id = get(container.attrs, 'Id') labels = get(container.attrs, 'Config', 'Labels') state = get(container.attrs, 'State', 'Running') networks = get(container.attrs, 'NetworkSettings', 'Networks') ip_addrs = self._get_addrs(networks) return [ Container(id, name, state, ip_addrs) for name in self._get_names(name, labels) ] def _get_addrs(self, networks): return list( filter(None, [value['IPAddress'] for value in networks.values()])) def _get_names(self, name, labels): names = [RE_VALIDNAME.sub('', name).rstrip('.')] labels = labels or {} instance = labels.get('com.docker.compose.container-number') service = labels.get('com.docker.compose.service') project = labels.get('com.docker.compose.project') if all((instance, service, project)): names.append('%s.%s.%s' % (instance, service, project)) names.append('%s.%s' % (service, project)) names = ['.'.join((name, self._domain)) for name in names] domain = labels.get('com.dhns.domain') if domain is not None: for name in domain.split(';'): names.append(name) return names def handle_dns_packet(self, query: DNSRecord, answer: DNSRecord): addrs = [] if query.q.qtype in (QTYPE.A, QTYPE.ANY): addrs = self._storage.query(str(query.q.qname).rstrip('.')) if len(addrs): for addr in addrs: answer.add_answer( RR(rname=query.q.qname, rtype=QTYPE.A, ttl=60, rdata=RDMAP["A"](addr))) return self