def create_object(self): """Start the single-user server in a docker service.""" container_kwargs = dict( image=self.image, env=self.get_env(), args=(yield self.get_command()), mounts=self.mounts, ) container_kwargs.update(self.extra_container_spec) container_spec = ContainerSpec(**container_kwargs) for mount in self.mounts: if mount['Target'] == '/home/jovyan/work': if not os.path.exists(mount['Source']): os.makedirs(mount['Source']) resources_kwargs = dict( mem_limit=self.mem_limit, mem_reservation=self.mem_guarantee, cpu_limit=int(self.cpu_limit * 1e9) if self.cpu_limit else None, cpu_reservation=int(self.cpu_guarantee * 1e9) if self.cpu_guarantee else None, ) resources_kwargs.update(self.extra_resources_spec) resources_spec = Resources(**resources_kwargs) placement_kwargs = dict( constraints=None, preferences=None, platforms=None, ) placement_kwargs.update(self.extra_placement_spec) placement_spec = Placement(**placement_kwargs) task_kwargs = dict( container_spec=container_spec, resources=resources_spec, networks=[self.network_name] if self.network_name else [], placement=placement_spec, ) task_kwargs.update(self.extra_task_spec) task_spec = TaskTemplate(**task_kwargs) endpoint_kwargs = {} if not self.use_internal_ip: endpoint_kwargs["ports"] = {None: (self.port, "tcp")} endpoint_kwargs.update(self.extra_endpoint_spec) endpoint_spec = EndpointSpec(**endpoint_kwargs) create_kwargs = dict(task_template=task_spec, endpoint_spec=endpoint_spec, name=self.service_name) create_kwargs.update(self.extra_create_kwargs) return (yield self.docker("create_service", **create_kwargs))
def create_object(self): """Start the single-user server in a docker service.""" container_kwargs = dict( image=self.image, env=self.get_env(), args=(yield self.get_command()), mounts=self.mounts, ) container_kwargs.update(self.extra_container_spec) container_spec = ContainerSpec(**container_kwargs) resources_kwargs = dict( mem_limit=self.mem_limit, mem_reservation=self.mem_guarantee, cpu_limit=int(self.cpu_limit * 1e9) if self.cpu_limit else None, cpu_reservation=int(self.cpu_guarantee * 1e9) if self.cpu_guarantee else None, ) resources_kwargs.update(self.extra_resources_spec) resources_spec = Resources(**resources_kwargs) placement_kwargs = dict( constraints=None, preferences=None, platforms=None, ) placement_kwargs.update(self.extra_placement_spec) placement_spec = Placement(**placement_kwargs) task_kwargs = dict( container_spec=container_spec, resources=resources_spec, networks=[self.network_name] if self.network_name else [], placement=placement_spec, ) task_kwargs.update(self.extra_task_spec) task_spec = TaskTemplate(**task_kwargs) endpoint_kwargs = {} if not self.use_internal_ip: endpoint_kwargs["ports"] = {None: (self.port, "tcp")} endpoint_kwargs.update(self.extra_endpoint_spec) endpoint_spec = EndpointSpec(**endpoint_kwargs) create_kwargs = dict(task_template=task_spec, endpoint_spec=endpoint_spec, name=self.service_name) create_kwargs.update(self.extra_create_kwargs) result = yield self.docker("create_service", **create_kwargs) # Chenglu added: inspect_service right after create_servce may raise # Service not found error yield gen.sleep(1) self.log.debug("Docker >>> create_service with %s", json.dumps(create_kwargs)) return result
def _get_create_service_kwargs(func_name, kwargs): # Copy over things which can be copied directly create_kwargs = {} for key in copy.copy(kwargs): if key in CREATE_SERVICE_KWARGS: create_kwargs[key] = kwargs.pop(key) container_spec_kwargs = {} for key in copy.copy(kwargs): if key in CONTAINER_SPEC_KWARGS: container_spec_kwargs[key] = kwargs.pop(key) task_template_kwargs = {} for key in copy.copy(kwargs): if key in TASK_TEMPLATE_KWARGS: task_template_kwargs[key] = kwargs.pop(key) if 'container_labels' in kwargs: container_spec_kwargs['labels'] = kwargs.pop('container_labels') placement = {} for key in copy.copy(kwargs): if key in PLACEMENT_KWARGS: placement[key] = kwargs.pop(key) placement = Placement(**placement) task_template_kwargs['placement'] = placement if 'log_driver' in kwargs: task_template_kwargs['log_driver'] = { 'Name': kwargs.pop('log_driver'), 'Options': kwargs.pop('log_driver_options', {}) } if func_name == 'update': if 'force_update' in kwargs: task_template_kwargs['force_update'] = kwargs.pop('force_update') # fetch the current spec by default if updating the service # through the model fetch_current_spec = kwargs.pop('fetch_current_spec', True) create_kwargs['fetch_current_spec'] = fetch_current_spec # All kwargs should have been consumed by this point, so raise # error if any are left if kwargs: raise create_unexpected_kwargs_error(func_name, kwargs) container_spec = ContainerSpec(**container_spec_kwargs) task_template_kwargs['container_spec'] = container_spec create_kwargs['task_template'] = TaskTemplate(**task_template_kwargs) return create_kwargs
r = resources(default_config) c.CoursewareUserSpawner.default_resources = r restart_max_attempts = int(os.environ.get('SPAWNER_RESTART_MAX_ATTEMPTS', '10')) extra_task_spec = { 'restart_policy': RestartPolicy( condition='any', delay=5000000000, max_attempts=restart_max_attempts ) } if 'SPAWNER_CONSTRAINTS' in os.environ: placement_constraints = os.environ['SPAWNER_CONSTRAINTS'] extra_task_spec.update({ 'placement': Placement( constraints=[x.strip() for x in placement_constraints.split(';')] ) }) c.SwarmSpawner.extra_task_spec = extra_task_spec if 'JUPYTERHUB_SINGLEUSER_APP' in os.environ: c.Spawner.environment = { 'JUPYTERHUB_SINGLEUSER_APP': os.environ['JUPYTERHUB_SINGLEUSER_APP'] } # DB pg_user = os.environ['POSTGRES_ENV_JPY_PSQL_USER'] pg_pass = os.environ['POSTGRES_ENV_JPY_PSQL_PASSWORD'] pg_host = os.environ['POSTGRES_PORT_5432_TCP_ADDR'] c.JupyterHub.db_url = 'postgresql://{}:{}@{}:5432/jupyterhub'.format( pg_user,
def create_object(self): """Start the single-user server in a docker service.""" container_kwargs = dict( image=self.image, env=self.get_env(), args=(yield self.get_command()), mounts=self.mounts, ) container_kwargs.update(self.extra_container_spec) container_spec = ContainerSpec(**container_kwargs) resources_kwargs = dict( mem_limit=self.mem_limit, mem_reservation=self.mem_guarantee, cpu_limit=int(self.cpu_limit * 1e9) if self.cpu_limit else None, cpu_reservation=int( self.cpu_guarantee * 1e9 ) if self.cpu_guarantee else None, ) resources_kwargs.update(self.extra_resources_spec) resources_spec = Resources(**resources_kwargs) placement_kwargs = dict( constraints=None, preferences=None, platforms=None, ) placement_kwargs.update(self.extra_placement_spec) placement_spec = Placement(**placement_kwargs) task_kwargs = dict( container_spec=container_spec, resources=resources_spec, networks=[self.network_name] if self.network_name else [], placement=placement_spec, ) task_kwargs.update(self.extra_task_spec) task_spec = TaskTemplate(**task_kwargs) endpoint_kwargs = {} if not self.use_internal_ip: endpoint_kwargs["ports"] = {None: (self.port, "tcp")} endpoint_kwargs.update(self.extra_endpoint_spec) endpoint_spec = EndpointSpec(**endpoint_kwargs) create_kwargs = dict( task_template=task_spec, endpoint_spec=endpoint_spec, name=self.service_name ) create_kwargs.update(self.extra_create_kwargs) service = yield self.docker("create_service", **create_kwargs) while True: tasks = yield self.docker( "tasks", filters={"service": self.service_name}, ) if len(tasks) > 0: break yield gen.sleep(1.0) return service
def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ self.log.debug("User: {}, start spawn".format(self.user.__dict__)) # https://github.com/jupyterhub/jupyterhub # /blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} service = yield self.get_service() if service is None: # Validate state if hasattr(self, "container_spec") and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: self.log.error("User: {} is trying to create a service" " without a container_spec".format(self.user)) raise Exception("That notebook is missing a specification" "to launch it, contact the admin to resolve " "this issue") # Setup service container_spec.update(user_options.get("container_spec", {})) # Which image to spawn if self.use_user_options and "user_selected_image" in user_options: self.log.debug( "User options received: {}".format(user_options)) image_name = user_options["user_selected_name"] image_value = user_options["user_selected_image"] selected_image = None for di in self.images: if image_name == di["name"] and image_value == di["image"]: selected_image = copy.deepcopy(di) if selected_image is None: err_msg = "User selected image: {} couldn't be found".format( image_value) self.log.error(err_msg) raise Exception(err_msg) self.log.info( "Using the user selected image: {}".format(selected_image)) else: # Default image selected_image = self.images[0] self.log.info( "Using the default image: {}".format(selected_image)) self.log.debug("Image info: {}".format(selected_image)) # Does that image have restricted access if "access" in selected_image: # Check for static or db users allowed = False if self.service_owner in selected_image["access"]: allowed = True else: if os.path.exists(selected_image["access"]): db_path = selected_image["access"] try: self.log.info("Checking db: {} for " "User: {}".format( db_path, self.service_owner)) with open(db_path, "r") as db: users = [ user.rstrip("\n").rstrip("\r\n") for user in db ] if self.service_owner in users: allowed = True except IOError as err: self.log.error("User: {} tried to open db file {}," "Failed {}".format( self.service_owner, db_path, err)) if not allowed: self.log.error( "User: {} tried to launch {} without access".format( self.service_owner, selected_image["image"])) raise Exception( "You don't have permission to launch that image") self.log.debug("Container spec: {}".format(container_spec)) # Assign the image name as a label container_spec["labels"] = {"image_name": selected_image["name"]} # Setup mounts mounts = [] # Global mounts if "mounts" in container_spec: mounts.extend(container_spec["mounts"]) container_spec["mounts"] = [] # Image mounts if "mounts" in selected_image: mounts.extend(selected_image["mounts"]) for mount in mounts: if isinstance(mount, dict): m = VolumeMounter(mount) m = yield m.create(owner=self.service_owner) else: # Expects a mount_class that supports 'create' if hasattr(self.user, "data"): m = yield mount.create(self.user.data, owner=self.service_owner) else: m = yield mount.create(owner=self.service_owner) container_spec["mounts"].append(m) # Some envs are required by the single-user-image if "env" in container_spec: container_spec["env"].update(self.get_env()) else: container_spec["env"] = self.get_env() # Env of image if "env" in selected_image and isinstance(selected_image["env"], dict): container_spec["env"].update(selected_image["env"]) # Dynamic update of env values for env_key, env_value in container_spec["env"].items(): stripped_value = env_value.lstrip("{").rstrip("}") if hasattr(self, stripped_value) and isinstance( getattr(self, stripped_value), str): container_spec["env"][env_key] = getattr( self, stripped_value) if hasattr(self.user, stripped_value) and isinstance( getattr(self.user, stripped_value), str): container_spec["env"][env_key] = getattr( self.user, stripped_value) if (hasattr(self.user, "data") and hasattr(self.user.data, stripped_value) and isinstance(getattr(self.user.data, stripped_value), str)): container_spec["env"][env_key] = getattr( self.user.data, stripped_value) # Args of image if "args" in selected_image and isinstance(selected_image["args"], list): container_spec.update({"args": selected_image["args"]}) if ("command" in selected_image and isinstance(selected_image["command"], list) or "command" in selected_image and isinstance(selected_image["command"], str)): container_spec.update({"command": selected_image["command"]}) # Log mounts config self.log.debug("User: {} container_spec mounts: {}".format( self.user, container_spec["mounts"])) # Global resource_spec resource_spec = {} if hasattr(self, "resource_spec"): resource_spec = self.resource_spec resource_spec.update(user_options.get("resource_spec", {})) networks = None if hasattr(self, "networks"): networks = self.networks if user_options.get("networks") is not None: networks = user_options.get("networks") # Global Log driver log_driver = None if hasattr(self, "log_driver"): log_driver = self.log_driver if user_options.get("log_driver") is not None: log_driver = user_options.get("log_driver") accelerators = [] if hasattr(self, "accelerators"): accelerators = self.accelerators if user_options.get("accelerators") is not None: accelerators = user_options.get("accelerators") # Global placement placement = None if hasattr(self, "placement"): placement = self.placement if user_options.get("placement") is not None: placement = user_options.get("placement") # Image to spawn image = selected_image["image"] # Image resources if "resource_spec" in selected_image: resource_spec = selected_image["resource_spec"] # Accelerators attached to the image if "accelerators" in selected_image: accelerators = selected_image["accelerators"] # Placement of image if "placement" in selected_image: placement = selected_image["placement"] # Logdriver of image if "log_driver" in selected_image: log_driver = selected_image["log_driver"] # Configs attached to image if "configs" in selected_image and isinstance( selected_image["configs"], list): for c in selected_image["configs"]: if isinstance(c, dict): self.configs.append(c) endpoint_spec = {} if "endpoint_spec" in selected_image: endpoint_spec = selected_image["endpoint_spec"] if self.configs: # Check that the supplied configs already exists current_configs = yield self.docker("configs") config_error_msg = ( "The server has a misconfigured config, " "please contact an administrator to resolve this") for c in self.configs: if "config_name" not in c: self.log.error("Config: {} does not have a " "required config_name key".format(c)) raise Exception(config_error_msg) if "config_id" not in c: # Find the id from the supplied name config_ids = [ cc["ID"] for cc in current_configs if cc["Spec"]["Name"] == c["config_name"] ] if not config_ids: self.log.error( "A config with name {} could not be found") raise Exception(config_error_msg) c["config_id"] = config_ids[0] container_spec.update( {"configs": [ConfigReference(**c) for c in self.configs]}) # Prepare the accelerators and attach it to the environment if accelerators: for accelerator in accelerators: accelerator_id = accelerator.aquire(self.user.name) # NVIDIA_VISIBLE_DEVICES=0:0 container_spec["env"][ "NVIDIA_VISIBLE_DEVICES"] = "{}".format(accelerator_id) # Global container user uid_gid = None if "uid_gid" in container_spec: uid_gid = copy.deepcopy(container_spec["uid_gid"]) del container_spec["uid_gid"] # Image user if "uid_gid" in selected_image: uid_gid = selected_image["uid_gid"] self.log.info("gid info {}".format(uid_gid)) if isinstance(uid_gid, str): if ":" in uid_gid: uid, gid = uid_gid.split(":") else: uid, gid = uid_gid, None if (uid == "{uid}" and hasattr(self.user, "uid") and self.user.uid is not None): uid = self.user.uid if (gid is not None and gid == "{gid}" and hasattr(self.user, "gid") and self.user.gid is not None): gid = self.user.gid if uid: container_spec.update({"user": str(uid)}) if uid and gid: container_spec.update({"user": str(uid) + ":" + str(gid)}) # Global container user if "user" in container_spec: container_spec["user"] = str(container_spec["user"]) # Image user if "user" in selected_image: container_spec.update({"user": str(selected_image["user"])}) dynamic_holders = [Spawner, self, self.user] if hasattr(self.user, "data"): dynamic_holders.append(self.user.data) # Expand container_spec before start for construct in dynamic_holders: try: if not hasattr(construct, "__dict__"): continue recursive_format(container_spec, construct.__dict__) except TypeError: pass # Log driver log_driver_name, log_driver_options = None, None if log_driver and isinstance(log_driver, dict): if "name" in log_driver: log_driver_name = log_driver["name"] if "options" in log_driver: log_driver_options = log_driver["options"] # Create the service container_spec = ContainerSpec(image, **container_spec) resources = Resources(**resource_spec) placement = Placement(**placement) task_log_driver = None if log_driver_name: task_log_driver = DriverConfig(log_driver_name, options=log_driver_options) task_spec = { "container_spec": container_spec, "resources": resources, "placement": placement, } if task_log_driver: task_spec.update({"log_driver": task_log_driver}) task_tmpl = TaskTemplate(**task_spec) self.log.debug("task temp: {}".format(task_tmpl)) # Set endpoint spec endpoint_spec = EndpointSpec(**endpoint_spec) resp = yield self.docker( "create_service", task_tmpl, name=self.service_name, networks=networks, endpoint_spec=endpoint_spec, ) self.service_id = resp["ID"] self.log.info("Created Docker service {} (id: {}) from image {}" " for user {}".format(self.service_name, self.service_id[:7], image, self.user)) yield self.wait_for_running_tasks() else: self.log.info("Found existing Docker service '{}' (id: {})".format( self.service_name, self.service_id[:7])) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service["Spec"]["TaskTemplate"]["ContainerSpec"]["Env"] for line in envs: if line.startswith("JPY_API_TOKEN="): self.api_token = line.split("=", 1)[1] break ip = self.service_name port = self.service_port self.log.debug("Active service: '{}' with user '{}'".format( self.service_name, self.user)) # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return ip, port
def start(self): """Start the single-user server in a docker service. You can specify the params for the service through jupyterhub_config.py or using the user_options """ self.log.info("User: {}, start spawn".format(self.user.__dict__)) # https://github.com/jupyterhub/jupyterhub # /blob/master/jupyterhub/user.py#L202 # By default jupyterhub calls the spawner passing user_options if self.use_user_options: user_options = self.user_options else: user_options = {} service = yield self.get_service() if service is None: # Validate state if hasattr(self, 'container_spec') \ and self.container_spec is not None: container_spec = dict(**self.container_spec) elif user_options == {}: self.log.error("User: {} is trying to create a service" " without a container_spec".format(self.user)) raise Exception("That notebook is missing a specification" "to launch it, contact the admin to resolve " "this issue") # Setup service container_spec.update(user_options.get('container_spec', {})) # Which image to spawn if self.use_user_options and 'user_selected_image' in user_options: uimage = user_options['user_selected_image'] image_info = None for di in self.dockerimages: if di['image'] == uimage: image_info = copy.deepcopy(di) if image_info is None: err_msg = "User selected image: {} couldn't be found" \ .format(uimage['image']) self.log.error(err_msg) raise Exception(err_msg) else: # Default image image_info = self.dockerimages[0] self.log.debug("Image info: {}".format(image_info)) # Does that image have restricted access if 'access' in image_info: # Check for static or db users allowed = False if self.service_owner in image_info['access']: allowed = True else: if os.path.exists(image_info['access']): db_path = image_info['access'] try: self.log.info("Checking db: {} for " "User: {}".format( db_path, self.service_owner)) with open(db_path, 'r') as db: users = [ user.rstrip('\n').rstrip('\r\n') for user in db ] if self.service_owner in users: allowed = True except IOError as err: self.log.error("User: {} tried to open db file {}," "Failed {}".format( self.service_owner, db_path, err)) if not allowed: self.log.error( "User: {} tried to launch {} without access".format( self.service_owner, image_info['image'])) raise Exception( "You don't have permission to launch that image") self.log.debug("Container spec: {}".format(container_spec)) # Setup mounts mounts = [] # Global mounts if 'mounts' in container_spec: mounts.extend(container_spec['mounts']) container_spec['mounts'] = [] # Image mounts if 'mounts' in image_info: mounts.extend(image_info['mounts']) for mount in mounts: if isinstance(mount, dict): m = VolumeMounter(mount) m = yield m.create(owner=self.service_owner) else: # Expects a mount_class that supports 'create' if hasattr(self.user, 'data'): m = yield mount.create(self.user.data, owner=self.service_owner) else: m = yield mount.create(owner=self.service_owner) container_spec['mounts'].append(m) # Some envs are required by the single-user-image if 'env' in container_spec: container_spec['env'].update(self.get_env()) else: container_spec['env'] = self.get_env() # Env of image if 'env' in image_info and isinstance(image_info['env'], dict): container_spec['env'].update(image_info['env']) # Dynamic update of env values for env_key, env_value in container_spec['env'].items(): stripped_value = env_value.lstrip('{').rstrip('}') if hasattr(self, stripped_value) \ and isinstance(getattr(self, stripped_value), str): container_spec['env'][env_key] = getattr( self, stripped_value) if hasattr(self.user, stripped_value) \ and isinstance(getattr(self.user, stripped_value), str): container_spec['env'][env_key] = getattr( self.user, stripped_value) if hasattr(self.user, 'data') \ and hasattr(self.user.data, stripped_value)\ and isinstance(getattr(self.user.data, stripped_value), str): container_spec['env'][env_key] = getattr( self.user.data, stripped_value) # Args of image if 'args' in image_info and isinstance(image_info['args'], list): container_spec.update({'args': image_info['args']}) if 'command' in image_info and isinstance(image_info['command'], list)\ or 'command' in image_info and \ isinstance(image_info['command'], str): container_spec.update({'command': image_info['command']}) # Log mounts config self.log.debug("User: {} container_spec mounts: {}".format( self.user, container_spec['mounts'])) # Global resource_spec resource_spec = {} if hasattr(self, 'resource_spec'): resource_spec = self.resource_spec resource_spec.update(user_options.get('resource_spec', {})) networks = None if hasattr(self, 'networks'): networks = self.networks if user_options.get('networks') is not None: networks = user_options.get('networks') # Global placement placement = None if hasattr(self, 'placement'): placement = self.placement if user_options.get('placement') is not None: placement = user_options.get('placement') # Image to spawn image = image_info['image'] # Image resources if 'resource_spec' in image_info: resource_spec = image_info['resource_spec'] # Placement of image if 'placement' in image_info: placement = image_info['placement'] # Configs attached to image if 'configs' in image_info and isinstance(image_info['configs'], list): for c in image_info['configs']: if isinstance(c, dict): self.configs.append(c) if self.configs: # Check that the supplied configs already exists current_configs = yield self.docker('configs') config_error_msg = "The server has a misconfigured config, " \ "please contact an administrator to resolve this" for c in self.configs: if 'config_name' not in c: self.log.error("Config: {} does not have a " "required config_name key".format(c)) raise Exception(config_error_msg) if 'config_id' not in c: # Find the id from the supplied name config_ids = [ cc['ID'] for cc in current_configs if cc['Spec']['Name'] == c['config_name'] ] if not config_ids: self.log.error( "A config with name {} could not be found") raise Exception(config_error_msg) c['config_id'] = config_ids[0] container_spec.update( {'configs': [ConfigReference(**c) for c in self.configs]}) # Global container user uid_gid = None if 'uid_gid' in container_spec: uid_gid = copy.deepcopy(container_spec['uid_gid']) del container_spec['uid_gid'] # Image user if 'uid_gid' in image_info: uid_gid = image_info['uid_gid'] self.log.info("gid info {}".format(uid_gid)) if isinstance(uid_gid, str): if ":" in uid_gid: uid, gid = uid_gid.split(":") else: uid, gid = uid_gid, None if uid == '{uid}' and hasattr(self.user, 'uid') \ and self.user.uid is not None: uid = self.user.uid if gid is not None and gid == '{gid}' \ and hasattr(self.user, 'gid') \ and self.user.gid is not None: gid = self.user.gid if uid: container_spec.update({'user': str(uid)}) if uid and gid: container_spec.update({'user': str(uid) + ":" + str(gid)}) # Global container user if 'user' in container_spec: container_spec['user'] = str(container_spec['user']) # Image user if 'user' in image_info: container_spec.update({'user': str(image_info['user'])}) dynamic_holders = [Spawner, self, self.user] if hasattr(self.user, 'data'): dynamic_holders.append(self.user.data) # Expand container_spec before start for construct in dynamic_holders: try: if not hasattr(construct, '__dict__'): continue recursive_format(container_spec, construct.__dict__) except TypeError: pass # Create the service container_spec = ContainerSpec(image, **container_spec) resources = Resources(**resource_spec) placement = Placement(**placement) task_spec = { 'container_spec': container_spec, 'resources': resources, 'placement': placement } task_tmpl = TaskTemplate(**task_spec) self.log.info("task temp: {}".format(task_tmpl)) resp = yield self.docker('create_service', task_tmpl, name=self.service_name, networks=networks) self.service_id = resp['ID'] self.log.info("Created Docker service {} (id: {}) from image {}" " for user {}".format(self.service_name, self.service_id[:7], image, self.user)) yield self.wait_for_running_tasks() else: self.log.info("Found existing Docker service '{}' (id: {})".format( self.service_name, self.service_id[:7])) # Handle re-using API token. # Get the API token from the environment variables # of the running service: envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env'] for line in envs: if line.startswith('JPY_API_TOKEN='): self.api_token = line.split('=', 1)[1] break ip = self.service_name port = self.service_port self.log.debug("Active service: '{}' with user '{}'".format( self.service_name, self.user)) # we use service_name instead of ip # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery # service_port is actually equal to 8888 return ip, port