def image_exists(self, image_name: str) -> bool: """Checks for the existence of the named image using the configured container runtime.""" result = True absolute_image_name = self.get_absolute_image_name(image_name) t0 = time.time() if self.container_runtime == KernelImagePuller.DOCKER_CLIENT: try: DockerClient.from_env().images.get(absolute_image_name) except NotFound: result = False elif self.container_runtime == KernelImagePuller.CONTAINERD_CLIENT: argv = [ "crictl", "-r", self.runtime_endpoint, "inspecti", "-q", absolute_image_name ] result = self.execute_cmd(argv) else: # invalid container runtime logger.error( f"Invalid container runtime detected: '{self.container_runtime}'!" ) result = False t1 = time.time() logger.debug( f"Checked existence of image '{image_name}' in {(t1 - t0):.3f} secs. exists = {result}" ) return result
def test_set_properties_with_self_update(hello_world_image): client = DockerClient.from_env() container_dict = dict( labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3", }, ports=[(4567, "tcp"), (9876, "tcp")], ) container = client.api.create_container("hello-world:latest", **container_dict) new = set_properties(client.containers.get(container.get("Id")), hello_world_image, self_update=True) assert new.get("labels", dict()).get("test") == "12345" assert new.get( "labels", dict()).get("dockupdater.updater_port") == "4567,tcp:9876,tcp" assert not new.get("ports") container_dict["labels"] = new.get("labels") del container_dict["ports"] container2 = client.api.create_container("hello-world:latest", **container_dict) new2 = set_properties(client.containers.get(container2.get("Id")), hello_world_image, self_update=True) assert new2.get("labels").get("test") == "12345" assert new2.get("labels").get("dockupdater.updater_port") is None assert all([(a, b) for a, b in new2.get("ports") if a in [4567, 9876]])
def launch_docker_kernel(kernel_id, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters... param_env = dict() param_env['EG_RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop('PATH') # Let the image PATH be used. Since this is relative to images, we're probably safe. client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') kernel_service = client.services.create(image_name, name=container_name, endpoint_spec=endpoint_spec, restart_policy=restart_policy, env=param_env, container_labels=labels, labels=labels, #mounts=mounts, # Enable if necessary networks=networks) else: volumes = {'/usr/local/share/jupyter/kernels': {'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro'}} kernel_container = client.containers.run(image_name, name=container_name, hostname=container_name, environment=param_env, labels=labels, remove=remove_container, network=docker_network, #volumes=volumes, # Enable if necessary detach=True)
def pull_image(img_name: str, registry_details: dict) -> None: """Pull the docker images locally and remove the registry info from the tag""" print(f"Pulling image from '{registry_details['registry']}'") client = DockerClient.from_env() client.login(**registry_details) img = client.images.pull(f"{registry_details['registry']}/{img_name}") if isinstance(img, Image): img.tag(img_name) else: img[0].tag(img_name)
def setup_container(): """Create and prepare a docker container and let Fabric point at it""" from docker.client import DockerClient image = 'centos:centos7' container_name = 'APP_installation_target' cli = DockerClient.from_env(version='auto', timeout=10) # Create and start a container using the newly created stage1 image cont = cli.containers.run(image=image, name=container_name, remove=False, detach=True, tty=True) success("Created container %s from %s" % (container_name, image)) # Find out container IP, prepare container for APP installation try: host_ip = cli.api.inspect_container(cont.id)['NetworkSettings']['IPAddress'] info("Updating and installing OpenSSH server in container") cont.exec_run('yum -y update') cont.exec_run('yum -y install openssh-server sudo') cont.exec_run('yum clean all') info('Configuring OpenSSH to allow connections to container') add_public_ssh_key(cont) cont.exec_run('sed -i "s/#PermitRootLogin yes/PermitRootLogin yes/" /etc/ssh/sshd_config') cont.exec_run('sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config') cont.exec_run('ssh-keygen -A') cont.exec_run('chown root.root /root/.ssh/authorized_keys') cont.exec_run('chmod 600 /root/.ssh/authorized_keys') cont.exec_run('chmod 700 /root/.ssh') info('Starting OpenSSH deamon in container') cont.exec_run('/usr/sbin/sshd -D', detach=True) except: failure("Error while preparing container for APP installation, cleaning up...") cont.stop() cont.remove() raise # From now on we connect to root@host_ip using our SSH key env.hosts = host_ip env.user = '******' if 'key_filename' not in env and 'key' not in env: env.key_filename = os.path.expanduser("~/.ssh/id_rsa") # Make sure we can connect via SSH to the newly started container # We disable the known hosts check since docker containers created at # different times might end up having the same IP assigned to them, and the # ssh known hosts check will fail with settings(disable_known_hosts=True): execute(check_ssh) success('Container successfully setup! APP installation will start now') return DockerContainerState(cli, cont)
def download_image(image_name: str) -> bool: """Downloads (pulls) the named image using the configured container runtime.""" result = True absolute_image_name = get_absolute_image_name(image_name) t0 = time.time() if container_runtime == DOCKER_CLIENT: try: DockerClient.from_env().images.pull(absolute_image_name) except NotFound: result = False elif container_runtime == CONTAINERD_CLIENT: argv = ['crictl', '-r', runtime_endpoint, 'pull', absolute_image_name] result = execute_cmd(argv) else: # invalid container runtime logger.error( f"Invalid container runtime detected: '{container_runtime}'!") result = False t1 = time.time() if result is True: logger.info(f"Pulled image '{image_name}' in {(t1 - t0):.3f} secs.") return result
def hello_world_container(hello_world_image): global HELLO_WORLD_CONTAINER if not HELLO_WORLD_CONTAINER: client = DockerClient.from_env() HELLO_WORLD_CONTAINER = client.containers.create( hello_world_image, labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3" }) return HELLO_WORLD_CONTAINER
def hello_world_container_with_port(): global HELLO_WORLD_CONTAINER_WITH_PORT if not HELLO_WORLD_CONTAINER_WITH_PORT: client = DockerClient.from_env() container = client.api.create_container("hello-world:latest", labels={"test": "12345"}, hostname="hello-world-test", environment={ "env1": "test1", "env2": "test2", "env3": "test3" }, ports=[(4567, "tcp"), (9876, "tcp")]) HELLO_WORLD_CONTAINER_WITH_PORT = client.containers.get( container.get("Id")) return HELLO_WORLD_CONTAINER_WITH_PORT
def client(self) -> DockerClient: """Get DockerClient""" try: client = None if self.local: client = DockerClient.from_env() else: client = DockerClient( base_url=self.url, tls=DockerInlineTLS( verification_kp=self.tls_verification, authentication_kp=self.tls_authentication, ).write(), ) client.containers.list() except DockerException as exc: LOGGER.warning(exc) raise ServiceConnectionInvalid from exc return client
from docker.client import DockerClient from docker.errors import NotFound, APIError gateway_host = os.getenv("KIP_GATEWAY_HOST", "http://localhost:8888") num_pullers = int(os.getenv("KIP_NUM_PULLERS", "2")) num_retries = int(os.getenv("KIP_NUM_RETRIES", "3")) interval = int(os.getenv("KIP_INTERVAL", "300")) log_level = os.getenv("KIP_LOG_LEVEL", "INFO") POLICY_IF_NOT_PRESENT = "IfNotPresent" POLICY_ALYWAYS = "Always" policies = (POLICY_IF_NOT_PRESENT, POLICY_ALYWAYS) policy = os.getenv("KIP_PULL_POLICY", POLICY_IF_NOT_PRESENT) docker_client = DockerClient.from_env() logging.basicConfig(format='[%(levelname)1.1s %(asctime)s %(name)s.%(threadName)s] %(message)s') def get_kernelspecs(): """Fetches the set of kernelspecs from the gateway, returning a dict of configured kernel specs""" end_point = '{}/api/kernelspecs'.format(gateway_host) logger.info("Fetching kernelspecs from '{}' ...".format(end_point)) resp = requests.get(end_point) if not resp.ok: raise requests.exceptions.HTTPError('Gateway server response: {}'.format(resp.status_code)) return resp.json() def fetch_image_names():
def launch_docker_kernel(kernel_id, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get("KERNEL_IMAGE", None) if image_name is None: sys.exit( "ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!" ) # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get("KERNEL_USERNAME", "") + "-" + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, # fall back to 'bridge'... docker_network = os.environ.get("EG_DOCKER_NETWORK", "bridge") # Build labels - these will be modelled similar to kubernetes: # kernel_id, component, app, ... labels = dict() labels["kernel_id"] = kernel_id labels["component"] = "kernel" labels["app"] = "enterprise-gateway" # Capture env parameters... param_env = dict() param_env["EG_RESPONSE_ADDRESS"] = response_addr param_env["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of # kernelspec, KERNEL_ and ENV_WHITELIST) just add the env here. param_env.update(os.environ) param_env.pop("PATH") # Let the image PATH be used. Since this is relative to images, # we're probably safe. # setup common args kwargs = dict() kwargs["name"] = container_name kwargs["labels"] = labels client = DockerClient.from_env() print("Started Jupyter kernel in normal docker mode") # Note: seems to me that the kernels don't need to be mounted on a # container that runs a single kernel mount the kernel working # directory from EG to kernel container # finish args setup kwargs["hostname"] = container_name kwargs["environment"] = param_env kwargs["remove"] = remove_container kwargs["network"] = docker_network kwargs["group_add"] = [param_env.get("ORCHEST_HOST_GID")] kwargs["detach"] = True if param_env.get("KERNEL_WORKING_DIR"): kwargs["working_dir"] = param_env.get("KERNEL_WORKING_DIR") # print("container args: {}".format(kwargs)) # useful for debug orchest_mounts = get_orchest_mounts( project_dir=_config.PROJECT_DIR, host_user_dir=os.path.join(param_env.get("ORCHEST_HOST_PROJECT_DIR"), os.pardir, os.pardir, "data"), host_project_dir=param_env.get("ORCHEST_HOST_PROJECT_DIR"), ) volume_source, volume_spec = get_volume_mount( param_env.get("ORCHEST_PIPELINE_UUID"), param_env.get("ORCHEST_PROJECT_UUID"), ) orchest_mounts[volume_source] = volume_spec # Extract environment_uuid from the image name (last 36 characters) extracted_environment_uuid = image_name[-36:] device_requests = get_device_requests( extracted_environment_uuid, param_env.get("ORCHEST_PROJECT_UUID")) client.containers.run(image_name, volumes=orchest_mounts, device_requests=device_requests, **kwargs)
def __init__(self): self.sclient = DockerClient.from_env() # sync self._aclient: Optional[aiodocker.Docker] = None # async
def launch_docker_kernel(kernel_id, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get("KERNEL_IMAGE", None) if image_name is None: sys.exit( "ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!" ) # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get("KERNEL_USERNAME", "") + "-" + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get("EG_DOCKER_NETWORK", "bridge") # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels["kernel_id"] = kernel_id labels["component"] = "kernel" labels["app"] = "enterprise-gateway" # Capture env parameters... param_env = dict() param_env["EG_RESPONSE_ADDRESS"] = response_addr param_env["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop( "PATH" ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get("KERNEL_UID") group = param_env.get("KERNEL_GID") # setup common args kwargs = dict() kwargs["name"] = container_name kwargs["user"] = user kwargs["labels"] = labels client = DockerClient.from_env() if swarm_mode: print("Started Jupyter kernel in swarm-mode") networks = list() networks.append(docker_network) mounts = list() mounts.append( "/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro" ) endpoint_spec = EndpointSpec(mode="dnsrr") restart_policy = RestartPolicy(condition="none") # finish args setup kwargs["env"] = param_env kwargs["endpoint_spec"] = endpoint_spec kwargs["restart_policy"] = restart_policy kwargs["container_labels"] = labels kwargs["networks"] = networks kwargs["groups"] = [group, "100"] if param_env.get("KERNEL_WORKING_DIR"): kwargs["workdir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug kernel_service = client.services.create(image_name, **kwargs) else: print("Started Jupyter kernel in normal docker mode") # Note: seems to me that the kernels don't need to be mounted on a container that runs a single kernel # mount the kernel working directory from EG to kernel container # finish args setup kwargs["hostname"] = container_name kwargs["environment"] = param_env kwargs["remove"] = remove_container kwargs["network"] = docker_network kwargs["group_add"] = [ group, "100", ] # NOTE: "group_add" for newer versions of docker kwargs["detach"] = True if param_env.get("KERNEL_WORKING_DIR"): kwargs["working_dir"] = param_env.get("KERNEL_WORKING_DIR") # print("container args: {}".format(kwargs)) # useful for debug orchest_mounts = get_orchest_mounts( project_dir=param_env.get("KERNEL_WORKING_DIR"), host_project_dir=param_env.get("ORCHEST_HOST_PROJECT_DIR"), ) orchest_mounts += [ get_volume_mount( param_env.get("ORCHEST_PIPELINE_UUID"), param_env.get("ORCHEST_PROJECT_UUID"), ) ] # Extract environment_uuid from the image name (last 36 characters) extracted_environment_uuid = image_name[-36:] device_requests = get_device_requests( extracted_environment_uuid, param_env.get("ORCHEST_PROJECT_UUID") ) kernel_container = client.containers.run( image_name, mounts=orchest_mounts, device_requests=device_requests, **kwargs )
def launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit( "ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!" ) # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get( 'DOCKER_NETWORK', os.environ.get('EG_DOCKER_NETWORK', 'bridge')) # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters... param_env = dict() param_env['PORT_RANGE'] = port_range param_env['PUBLIC_KEY'] = public_key param_env['RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop( 'PATH' ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get('KERNEL_UID') group = param_env.get('KERNEL_GID') # setup common args kwargs = dict() kwargs['name'] = container_name kwargs['hostname'] = container_name kwargs['user'] = user kwargs['labels'] = labels client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append( "/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro" ) endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') # finish args setup kwargs['env'] = param_env kwargs['endpoint_spec'] = endpoint_spec kwargs['restart_policy'] = restart_policy kwargs['container_labels'] = labels kwargs['networks'] = networks kwargs['groups'] = [group, '100'] if param_env.get('KERNEL_WORKING_DIR'): kwargs['workdir'] = param_env.get('KERNEL_WORKING_DIR') # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug kernel_service = client.services.create(image_name, **kwargs) else: volumes = { '/usr/local/share/jupyter/kernels': { 'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro' } } # finish args setup kwargs['environment'] = param_env kwargs['remove'] = remove_container kwargs['network'] = docker_network kwargs['group_add'] = [group, '100'] kwargs['detach'] = True if param_env.get('KERNEL_WORKING_DIR'): kwargs['working_dir'] = param_env.get('KERNEL_WORKING_DIR') # kwargs['volumes'] = volumes # Enable if necessary # print("container args: {}".format(kwargs)) # useful for debug kernel_container = client.containers.run(image_name, **kwargs)
def launch_docker_kernel(kernel_id, port_range, response_addr, public_key, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get("KERNEL_IMAGE", None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get("KERNEL_USERNAME", "") + "-" + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get("DOCKER_NETWORK", os.environ.get("EG_DOCKER_NETWORK", "bridge")) # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels["kernel_id"] = kernel_id labels["component"] = "kernel" labels["app"] = "enterprise-gateway" # Capture env parameters... param_env = dict() param_env["PORT_RANGE"] = port_range param_env["PUBLIC_KEY"] = public_key param_env["RESPONSE_ADDRESS"] = response_addr param_env["KERNEL_SPARK_CONTEXT_INIT_MODE"] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and EG_CLIENT_ENVS) # just add the env here. param_env.update(os.environ) param_env.pop( "PATH" ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get("KERNEL_UID") group = param_env.get("KERNEL_GID") # setup common args kwargs = dict() kwargs["name"] = container_name kwargs["hostname"] = container_name kwargs["user"] = user kwargs["labels"] = labels client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) # mounts = list() # Enable if necessary # mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode="dnsrr") restart_policy = RestartPolicy(condition="none") # finish args setup kwargs["env"] = param_env kwargs["endpoint_spec"] = endpoint_spec kwargs["restart_policy"] = restart_policy kwargs["container_labels"] = labels kwargs["networks"] = networks kwargs["groups"] = [group, "100"] if param_env.get("KERNEL_WORKING_DIR"): kwargs["workdir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug client.services.create(image_name, **kwargs) # noqa else: # volumes = { # Enable if necessary # "/usr/local/share/jupyter/kernels": { # "bind": "/usr/local/share/jupyter/kernels", # "mode": "ro", # } # } # finish args setup kwargs["environment"] = param_env kwargs["remove"] = remove_container kwargs["network"] = docker_network kwargs["group_add"] = [group, "100"] kwargs["detach"] = True if param_env.get("KERNEL_WORKING_DIR"): kwargs["working_dir"] = param_env.get("KERNEL_WORKING_DIR") # kwargs['volumes'] = volumes # Enable if necessary # print("container args: {}".format(kwargs)) # useful for debug client.containers.run(image_name, **kwargs) # noqa
def setup_container(): """Create and prepare a docker container and let Fabric point at it""" from docker.client import DockerClient image = 'library/centos:7' container_name = 'APP_installation_target' info("Creating docker container based on {0}".format(image)) info("Please stand-by....") cli = DockerClient.from_env(version='auto', timeout=60) # Create and start a container using the newly created stage1 image cont = cli.containers.run(image=image, name=container_name, remove=False, detach=True, tty=True, ports={22: 2222}) success("Created container %s from %s" % (container_name, image)) # Find out container IP, prepare container for APP installation try: host_ip = cli.api.inspect_container( cont.id)['NetworkSettings']['IPAddress'] # info("Updating and installing OpenSSH server in container") # execOutput(cont, 'yum -y update') info("Installing OpenSSH server...") execOutput(cont, 'yum -y install openssh-server sudo') info("Installing OpenSSH client...") execOutput(cont, 'yum -y install openssh-clients sudo') info("Installing initscripts...") execOutput(cont, 'yum -y install initscripts sudo') info("Cleaning up...") execOutput(cont, 'yum clean all') info('Configuring OpenSSH to allow connections to container') add_public_ssh_key(cont) execOutput( cont, 'sed -i "s/#PermitRootLogin yes/PermitRootLogin yes/" /etc/ssh/sshd_config' ) execOutput(cont, 'sed -i "s/#UseDNS yes/UseDNS no/" /etc/ssh/sshd_config') execOutput(cont, 'ssh-keygen -A') execOutput(cont, 'mkdir -p /root/.ssh') execOutput(cont, 'touch /root/.ssh/authorized_keys') execOutput(cont, 'chown root.root /root/.ssh/authorized_keys') execOutput(cont, 'chmod 600 /root/.ssh/authorized_keys') execOutput(cont, 'chmod 700 /root/.ssh') execOutput(cont, 'rm /run/nologin') info('Starting OpenSSH deamon in container') execOutput(cont, '/usr/sbin/sshd -D', detach=True) except: failure( "Error while preparing container for APP installation, cleaning up..." ) cont.stop() cont.remove() raise # From now on we connect to root@host_ip using our SSH key env.hosts = ['localhost'] env.docker = True env.port = 2222 env.user = '******' if 'key_filename' not in env and 'key' not in env: env.key_filename = os.path.expanduser("~/.ssh/id_rsa") # Make sure we can connect via SSH to the newly started container # We disable the known hosts check since docker containers created at # different times might end up having the same IP assigned to them, and the # ssh known hosts check will fail # # NOTE: This does NOT work on a Mac, because the docker0 network is not # available! with settings(disable_known_hosts=True): execute(check_ssh) success('Container successfully setup! {0} installation will start now'.\ format(APP_name())) return DockerContainerState(cli, cont)
def launch_docker_kernel(connection_file, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit("ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!") # Container name is composed of KERNEL_USERNAME and KERNEL_ID kernel_id = os.environ['KERNEL_ID'] container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters - including the parameters to the actual kernel launcher in the image... param_env = dict() # Since jupyter lower cases the kernel directory as the kernel-name, we need to capture its case-sensitive # value since this is used to locate the kernel launch script within the image. param_env['KERNEL_NAME'] = os.path.basename(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) param_env['EG_RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_CONNECTION_FILENAME'] = connection_file param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop('PATH') # Let the image PATH be used. Since this is relative to images, we're probably safe. client = DockerClient.from_env() if swarm_mode: networks = list() networks.append(docker_network) mounts = list() mounts.append("/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro") endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') kernel_service = client.services.create(image_name, command='/etc/bootstrap-kernel.sh', name=container_name, endpoint_spec=endpoint_spec, restart_policy=restart_policy, env=param_env, container_labels=labels, labels=labels, #mounts=mounts, # Enable if necessary networks=networks) else: volumes = {'/usr/local/share/jupyter/kernels': {'bind': '/usr/local/share/jupyter/kernels', 'mode': 'ro'}} kernel_container = client.containers.run(image_name, command='/etc/bootstrap-kernel.sh', name=container_name, hostname=container_name, environment=param_env, labels=labels, remove=remove_container, network=docker_network, #volumes=volumes, # Enable if necessary detach=True)
def hello_world_image(): global HELLO_WORLD_IMAGE if not HELLO_WORLD_IMAGE: client = DockerClient.from_env() HELLO_WORLD_IMAGE = client.images.pull("hello-world:latest") return HELLO_WORLD_IMAGE
# Copyright (c) Jupyter Development Team. # Distributed under the terms of the Modified BSD License. """Kernel managers that operate against a remote process.""" import os import logging from docker.client import DockerClient from docker.errors import NotFound from .container import ContainerProcessProxy # Debug logging level of docker produces too much noise - raise to info by default. logging.getLogger('urllib3.connectionpool').setLevel(os.environ.get('EG_DOCKER_LOG_LEVEL', logging.WARNING)) docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') client = DockerClient.from_env() class DockerSwarmProcessProxy(ContainerProcessProxy): def __init__(self, kernel_manager, proxy_config): super(DockerSwarmProcessProxy, self).__init__(kernel_manager, proxy_config) def launch_process(self, kernel_cmd, **kw): # Convey the network to the docker launch script kw['env']['EG_DOCKER_NETWORK'] = docker_network kw['env']['EG_DOCKER_MODE'] = 'swarm' return super(DockerSwarmProcessProxy, self).launch_process(kernel_cmd, **kw) def get_initial_states(self): return {'preparing', 'starting', 'running'}
def launch_docker_kernel(kernel_id, response_addr, spark_context_init_mode): # Launches a containerized kernel. # Can't proceed if no image was specified. image_name = os.environ.get('KERNEL_IMAGE', None) if image_name is None: sys.exit( "ERROR - KERNEL_IMAGE not found in environment - kernel launch terminating!" ) # Container name is composed of KERNEL_USERNAME and KERNEL_ID container_name = os.environ.get('KERNEL_USERNAME', '') + '-' + kernel_id # Determine network. If EG_DOCKER_NETWORK has not been propagated, fall back to 'bridge'... docker_network = os.environ.get('EG_DOCKER_NETWORK', 'bridge') # Build labels - these will be modelled similar to kubernetes: kernel_id, component, app, ... labels = dict() labels['kernel_id'] = kernel_id labels['component'] = 'kernel' labels['app'] = 'enterprise-gateway' # Capture env parameters... param_env = dict() param_env['EG_RESPONSE_ADDRESS'] = response_addr param_env['KERNEL_SPARK_CONTEXT_INIT_MODE'] = spark_context_init_mode # Since the environment is specific to the kernel (per env stanza of kernelspec, KERNEL_ and ENV_WHITELIST) # just add the env here. param_env.update(os.environ) param_env.pop( 'PATH' ) # Let the image PATH be used. Since this is relative to images, we're probably safe. user = param_env.get('KERNEL_UID') group = param_env.get('KERNEL_GID') # setup common args kwargs = dict() kwargs['name'] = container_name kwargs['user'] = user kwargs['labels'] = labels client = DockerClient.from_env() if swarm_mode: print("Started Jupyter kernel in swarm-mode") networks = list() networks.append(docker_network) mounts = list() mounts.append( "/usr/local/share/jupyter/kernels:/usr/local/share/jupyter/kernels:ro" ) endpoint_spec = EndpointSpec(mode='dnsrr') restart_policy = RestartPolicy(condition='none') # finish args setup kwargs['env'] = param_env kwargs['endpoint_spec'] = endpoint_spec kwargs['restart_policy'] = restart_policy kwargs['container_labels'] = labels kwargs['networks'] = networks kwargs['groups'] = [group, '100'] if param_env.get('KERNEL_WORKING_DIR'): kwargs['workdir'] = param_env.get('KERNEL_WORKING_DIR') # kwargs['mounts'] = mounts # Enable if necessary # print("service args: {}".format(kwargs)) # useful for debug kernel_service = client.services.create(image_name, **kwargs) else: print("Started Jupyter kernel in normal docker mode") # Note: seems to me that the kernels don't need to be mounted on a container that runs a single kernel # mount the kernel working directory from EG to kernel container # TODO: mount pipeline directory # finish args setup kwargs['hostname'] = container_name kwargs['environment'] = param_env kwargs['remove'] = remove_container kwargs['network'] = docker_network kwargs['group_add'] = [ group, '100' ] # NOTE: "group_add" for newer versions of docker kwargs['detach'] = True if param_env.get('KERNEL_WORKING_DIR'): kwargs['working_dir'] = param_env.get('KERNEL_WORKING_DIR') pipeline_dir_mount = Mount(target=param_env.get('KERNEL_WORKING_DIR'), source=param_env.get('HOST_PIPELINE_DIR'), type='bind') mounts = [pipeline_dir_mount] # dynamically mount host-dir sources dynamic_mounts = get_dynamic_mounts(param_env) mounts = mounts + dynamic_mounts # print("container args: {}".format(kwargs)) # useful for debug kernel_container = client.containers.run(image_name, mounts=mounts, **kwargs)