Example #1
0
def _check_limits(container_config, instance_detail):
    i_host_config = instance_detail['HostConfig']
    c_host_config = container_config.host_config
    c_create_options = container_config.create_options
    update_dict = {}
    needs_reset = False
    for inspect_key, config_key, check_co, input_func in CONTAINER_UPDATE_VARS:
        i_value = i_host_config.get(inspect_key) or None
        c_value = c_host_config.get(config_key) or None
        if not c_value and check_co:
            c_value = c_create_options.get(config_key) or None
        if config_key == 'memswap_limit' and not c_value:
            # Has a dependent default value.
            mem = c_host_config.get('mem_limit') or c_create_options.get('mem_limit')
            if mem:
                c_value = docker_utils.parse_bytes(mem) * 2
        if c_value and input_func:
            c_value = input_func(c_value)
        if i_value or c_value:
            log.debug("Comparing host-config variable %s - Container: %s - Config: %s.", inspect_key, i_value, c_value)
            if i_value != c_value:
                if c_value is not None:
                    log.debug("Updating %s to %s.", inspect_key, c_value)
                    update_dict[config_key] = c_value
                else:
                    # The API implementation (maybe just docker-py) will discard empty default values.
                    log.debug("Host-config variable %s cannot be reset to default, suggesting container reset.",
                              inspect_key)
                    needs_reset = True
    return update_dict, needs_reset
Example #2
0
def _check_limits(container_config, instance_detail):
    i_host_config = instance_detail['HostConfig']
    c_host_config = container_config.host_config
    c_create_options = container_config.create_options
    update_dict = {}
    needs_reset = False
    for inspect_key, config_key, check_co, input_func in CONTAINER_UPDATE_VARS:
        i_value = i_host_config.get(inspect_key) or None
        c_value = c_host_config.get(config_key) or None
        if not c_value and check_co:
            c_value = c_create_options.get(config_key) or None
        if config_key == 'memswap_limit' and not c_value:
            # Has a dependent default value.
            mem = c_host_config.get('mem_limit') or c_create_options.get(
                'mem_limit')
            if mem:
                c_value = docker_utils.parse_bytes(mem) * 2
        if c_value and input_func:
            c_value = input_func(c_value)
        if i_value or c_value:
            log.debug(
                "Comparing host-config variable %s - Container: %s - Config: %s.",
                inspect_key, i_value, c_value)
            if i_value != c_value:
                if c_value is not None:
                    log.debug("Updating %s to %s.", inspect_key, c_value)
                    update_dict[config_key] = c_value
                else:
                    # The API implementation (maybe just docker-py) will discard empty default values.
                    log.debug(
                        "Host-config variable %s cannot be reset to default, suggesting container reset.",
                        inspect_key)
                    needs_reset = True
    return update_dict, needs_reset
Example #3
0
 def test_parse_bytes_invalid(self):
     with pytest.raises(DockerException):
         parse_bytes("512MK")
     with pytest.raises(DockerException):
         parse_bytes("512L")
     with pytest.raises(DockerException):
         parse_bytes("127.0.0.1K")
Example #4
0
 def test_parse_bytes_invalid(self):
     with pytest.raises(DockerException):
         parse_bytes("512MK")
     with pytest.raises(DockerException):
         parse_bytes("512L")
     with pytest.raises(DockerException):
         parse_bytes("127.0.0.1K")
Example #5
0
 def test_parse_bytes_maxint(self):
     self.assertEqual(parse_bytes("{0}k".format(sys.maxsize)),
                      sys.maxsize * 1024)
Example #6
0
 def test_parse_bytes_valid(self):
     self.assertEqual(parse_bytes("512MB"), 536870912)
     self.assertEqual(parse_bytes("512M"), 536870912)
     self.assertEqual(parse_bytes("512m"), 536870912)
Example #7
0
 def test_parse_bytes(self):
     self.assertEqual(parse_bytes("512MB"), (536870912))
     self.assertEqual(parse_bytes("512M"), (536870912))
     self.assertRaises(DockerException, parse_bytes, "512MK")
     self.assertRaises(DockerException, parse_bytes, "512L")
Example #8
0
 def test_parse_bytes(self):
     self.assertEqual(parse_bytes("512MB"), (536870912))
     self.assertEqual(parse_bytes("512M"), (536870912))
     self.assertRaises(DockerException, parse_bytes, "512MK")
     self.assertRaises(DockerException, parse_bytes, "512L")
Example #9
0
 def test_parse_bytes_maxint(self):
     self.assertEqual(
         parse_bytes("{0}k".format(sys.maxsize)), sys.maxsize * 1024
     )
Example #10
0
 def test_parse_bytes_float(self):
     with pytest.raises(DockerException):
         parse_bytes("1.5k")
Example #11
0
    def _do_provision(self, token, instance_id, cur_ts):
        ap = self._get_ap()

        pbclient = ap.get_pb_client(token,
                                    self.config['INTERNAL_API_BASE_URL'],
                                    ssl_verify=False)

        log_uploader = self.create_prov_log_uploader(token,
                                                     instance_id,
                                                     log_type='provisioning')

        instance = pbclient.get_instance_description(instance_id)

        # fetch config
        blueprint = pbclient.get_blueprint_description(
            instance['blueprint_id'])
        blueprint_config = blueprint['full_config']

        log_uploader.info("selecting host...")

        docker_hosts = self._select_hosts(blueprint_config['consumed_slots'],
                                          cur_ts)
        selected_host = docker_hosts[0]

        docker_client = ap.get_docker_client(selected_host['docker_url'])

        log_uploader.info("done\n")

        container_name = instance['name']

        # total_memory is set to 3 times the size of RAM limit
        host_config = docker_client.create_host_config(
            mem_limit=blueprint_config['memory_limit'],
            memswap_limit=parse_bytes(blueprint_config['memory_limit']) * 3,
            publish_all_ports=True,
        )

        proxy_route = uuid.uuid4().hex

        config = {
            'image': blueprint_config['docker_image'],
            'name': container_name,
            'labels': {
                'slots': '%d' % blueprint_config['consumed_slots']
            },
            'host_config': host_config,
            'environment': blueprint_config['environment_vars'].split(),
        }
        if len(blueprint_config.get('launch_command', '')):
            launch_command = blueprint_config.get('launch_command').format(
                proxy_path='/%s' % proxy_route)
            config['command'] = launch_command

        log_uploader.info("creating container '%s'\n" % container_name)
        container = docker_client.create_container(**config)
        container_id = container['Id']

        log_uploader.info("starting container '%s'\n" % container_name)
        docker_client.start(container_id)

        # get the public port
        ports = docker_client.port(container_id,
                                   blueprint_config['internal_port'])
        if len(ports) == 1:
            public_port = ports[0]['HostPort']
            try:
                self._ap.wait_for_port(selected_host['private_ip'],
                                       int(public_port))
            except RuntimeError:
                log_uploader.warn(
                    "Could not check if the port used in provisioning is listening"
                )

        else:
            raise RuntimeError('Expected exactly one mapped port')

        instance_data = {
            'endpoints': [
                {
                    'name':
                    'https',
                    'access':
                    'https://%s:%s/%s' % (self.config['PUBLIC_IPV4'],
                                          self.config['EXTERNAL_HTTPS_PORT'],
                                          'notebooks/' + proxy_route)
                },
            ],
            'docker_url':
            selected_host['docker_url'],
            'docker_host_id':
            selected_host['id'],
            'proxy_route':
            proxy_route,
        }

        pbclient.do_instance_patch(
            instance_id,
            {
                #                'public_ip': self.config['PUBLIC_IPV4'],
                'instance_data': json.dumps(instance_data),
            })

        log_uploader.info("adding route\n")

        options = {}
        proxy_options = blueprint_config.get('proxy_options')
        if proxy_options:
            proxy_rewrite = proxy_options.get('proxy_rewrite')
            proxy_redirect = proxy_options.get('proxy_redirect')
            set_host_header = proxy_options.get('set_host_header')

            if proxy_rewrite:
                options['proxy_rewrite'] = proxy_rewrite
            if proxy_redirect:
                options['proxy_redirect'] = proxy_redirect
            if set_host_header:
                options['set_host_header'] = set_host_header

        ap.proxy_add_route(
            proxy_route,
            'http://%s:%s' % (selected_host['private_ip'], public_port),
            options)

        log_uploader.info("provisioning done for %s\n" % instance_id)
    def _do_provision(self, token, instance_id, cur_ts):
        ap = self._get_ap()

        pbclient = ap.get_pb_client(token, self.config['INTERNAL_API_BASE_URL'], ssl_verify=False)

        log_uploader = self.create_prov_log_uploader(token, instance_id, log_type='provisioning')

        instance = pbclient.get_instance_description(instance_id)

        # fetch config
        blueprint = pbclient.get_blueprint_description(instance['blueprint_id'])
        blueprint_config = blueprint['config']

        log_uploader.info("selecting host...")

        docker_hosts = self._select_hosts(blueprint_config['consumed_slots'], cur_ts)
        selected_host = docker_hosts[0]

        docker_client = ap.get_docker_client(selected_host['docker_url'])

        log_uploader.info("done\n")

        container_name = instance['name']

        # total_memory is set to 3 times the size of RAM limit
        host_config = docker_client.create_host_config(
            mem_limit=blueprint_config['memory_limit'],
            memswap_limit=parse_bytes(blueprint_config['memory_limit']) * 3,
            publish_all_ports=True,
        )

        proxy_route = uuid.uuid4().hex

        config = {
            'image': blueprint_config['docker_image'],
            'name': container_name,
            'labels': {'slots': '%d' % blueprint_config['consumed_slots']},
            'host_config': host_config,
            'environment': blueprint_config['environment_vars'].split(),
        }
        if len(blueprint_config.get('launch_command', '')):
            launch_command = blueprint_config.get('launch_command').format(
                proxy_path='/%s' % proxy_route
            )
            config['command'] = launch_command

        log_uploader.info("creating container '%s'\n" % container_name)
        container = docker_client.create_container(**config)
        container_id = container['Id']

        log_uploader.info("starting container '%s'\n" % container_name)
        docker_client.start(container_id)

        # get the public port
        ports = docker_client.port(container_id, blueprint_config['internal_port'])
        if len(ports) == 1:
            public_port = ports[0]['HostPort']
            try:
                self._ap.wait_for_port(selected_host['private_ip'], int(public_port))
            except RuntimeError:
                log_uploader.warn("Could not check if the port used in provisioning is listening")

        else:
            raise RuntimeError('Expected exactly one mapped port')

        instance_data = {
            'endpoints': [
                {
                    'name': 'https',
                    'access': 'https://%s:%s/%s' % (
                        self.config['PUBLIC_IPV4'],
                        self.config['EXTERNAL_HTTPS_PORT'],
                        'notebooks/' + proxy_route
                    )
                },
            ],
            'docker_url': selected_host['docker_url'],
            'docker_host_id': selected_host['id'],
            'proxy_route': proxy_route,
        }

        pbclient.do_instance_patch(
            instance_id,
            {
                #                'public_ip': self.config['PUBLIC_IPV4'],
                'instance_data': json.dumps(instance_data),
            }
        )

        log_uploader.info("adding route\n")

        options = {}
        proxy_options = blueprint_config.get('proxy_options')
        if proxy_options:
            proxy_rewrite = proxy_options.get('proxy_rewrite')
            proxy_redirect = proxy_options.get('proxy_redirect')
            set_host_header = proxy_options.get('set_host_header')

            if proxy_rewrite:
                options['proxy_rewrite'] = proxy_rewrite
            if proxy_redirect:
                options['proxy_redirect'] = proxy_redirect
            if set_host_header:
                options['set_host_header'] = set_host_header

        ap.proxy_add_route(
            proxy_route,
            'http://%s:%s' % (selected_host['private_ip'], public_port),
            options
        )

        log_uploader.info("provisioning done for %s\n" % instance_id)
Example #13
0
 def test_parse_bytes_maxint(self):
     assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
Example #14
0
 def test_parse_bytes_float(self):
     with pytest.raises(DockerException):
         parse_bytes("1.5k")
Example #15
0
 def test_parse_bytes_valid(self):
     assert parse_bytes("512MB") == 536870912
     assert parse_bytes("512M") == 536870912
     assert parse_bytes("512m") == 536870912
Example #16
0
 def test_parse_bytes_valid(self):
     assert parse_bytes("512MB") == 536870912
     assert parse_bytes("512M") == 536870912
     assert parse_bytes("512m") == 536870912
Example #17
0
 def test_parse_bytes_float(self):
     assert parse_bytes("1.5k") == 1536
Example #18
0
 def test_parse_bytes_valid(self):
     self.assertEqual(parse_bytes("512MB"), 536870912)
     self.assertEqual(parse_bytes("512M"), 536870912)
     self.assertEqual(parse_bytes("512m"), 536870912)
Example #19
0
 def test_parse_bytes_maxint(self):
     assert parse_bytes("{0}k".format(sys.maxsize)) == sys.maxsize * 1024
Example #20
0
    def start(self):
        """Start the single-user server in a docker service.
        You can specify the params for the service through jupyterhub_config.py
        or using the user_options
        """

        # https://github.com/jupyterhub/jupyterhub/blob/master/jupyterhub/user.py#L202
        # By default jupyterhub calls the spawner passing user_options
        if self.use_user_options:
            user_options = self.user_options
        else:
            user_options = {}

        self.log.warn("user_options: {}".format(user_options))

        service = yield self.get_service()

        if service is None:

            if 'name' in user_options:
                self.server_name = user_options['name']

            if hasattr(self,
                       'container_spec') and self.container_spec is not None:
                container_spec = dict(**self.container_spec)
            elif user_options == {}:
                raise ("A container_spec is needed in to create a service")

            container_spec.update(user_options.get('container_spec', {}))

            # iterates over mounts to create
            # a new mounts list of docker.types.Mount
            container_spec['mounts'] = []
            for mount in self.container_spec['mounts']:
                m = dict(**mount)

                if 'source' in m:
                    m['source'] = m['source'].format(
                        username=self.service_owner)

                if 'driver_config' in m:
                    device = m['driver_config']['options']['device'].format(
                        username=self.service_owner)
                    m['driver_config']['options']['device'] = device
                    m['driver_config'] = docker.types.DriverConfig(
                        **m['driver_config'])

                container_spec['mounts'].append(docker.types.Mount(**m))

            # some Envs are required by the single-user-image
            container_spec['env'] = self.get_env()

            if hasattr(self, 'resource_spec'):
                resource_spec = dict(**self.resource_spec)
            resource_spec.update(user_options.get('resource_spec', {}))
            # enable to set a human readable memory unit
            if 'mem_limit' in resource_spec:
                resource_spec['mem_limit'] = parse_bytes(
                    resource_spec['mem_limit'])
            if 'mem_reservation' in resource_spec:
                resource_spec['mem_reservation'] = parse_bytes(
                    resource_spec['mem_reservation'])

            if hasattr(self, 'networks'):
                networks = self.networks
            if user_options.get('networks') is not None:
                networks = user_options.get('networks')

            if hasattr(self, 'placement'):
                placement = self.placement
            if user_options.get('placement') is not None:
                placement = user_options.get('placement')

            image = container_spec['Image']
            del container_spec['Image']

            # create the service
            container_spec = docker.types.ContainerSpec(
                image, **container_spec)
            resources = docker.types.Resources(**resource_spec)

            task_spec = {
                'container_spec': container_spec,
                'resources': resources,
                'placement': placement
            }
            task_tmpl = docker.types.TaskTemplate(**task_spec)

            resp = yield self.docker('create_service',
                                     task_tmpl,
                                     name=self.service_name,
                                     networks=networks)

            self.service_id = resp['ID']

            self.log.info("Created Docker service '%s' (id: %s) from image %s",
                          self.service_name, self.service_id[:7], image)

        else:
            self.log.info("Found existing Docker service '%s' (id: %s)",
                          self.service_name, self.service_id[:7])
            # Handle re-using API token.
            # Get the API token from the environment variables
            # of the running service:
            envs = service['Spec']['TaskTemplate']['ContainerSpec']['Env']
            for line in envs:
                if line.startswith('JPY_API_TOKEN='):
                    self.api_token = line.split('=', 1)[1]
                    break

        ip = self.service_name
        port = self.service_port

        # we use service_name instead of ip
        # https://docs.docker.com/engine/swarm/networking/#use-swarm-mode-service-discovery
        # service_port is actually equal to 8888
        return (ip, port)