コード例 #1
0
    def get(self):
        spawner = dockworker.DockerSpawner(
            docker_host='https://192.168.99.100:2376')
        command_default = ('jupyter notebook --no-browser'
                           ' --port {port} --ip=0.0.0.0'
                           ' --NotebookApp.base_url=/{base_path}'
                           ' --NotebookApp.port_retries=0')
        username = self.get_argument("username")
        if username:
            container_config = dockworker.ContainerConfig(
                image='morpheuz/jupyther-notebook-minimal-nfs',
                command=command_default,
                mem_limit='512m',
                cpu_shares=None,
                container_ip='127.0.0.1',
                container_port='8888',
                container_user=username,
                host_network=False,
                host_directories=None)
            spawnpool = SpawnPool(proxy_endpoint='http://127.0.0.1:8001',
                                  proxy_token='',
                                  spawner=spawner,
                                  container_config=container_config,
                                  capacity=1,
                                  max_age=None,
                                  static_files=None,
                                  static_dump_path=STATIC_PATH,
                                  pool_name='JUPHUB',
                                  user_length=32)

            self.write("Start container with %s" % username)
            container = spawnpool._launch_container(user=username)
        else:
            self.write("Need username")
コード例 #2
0
def main():
    tornado.options.define('cull_period',
                           default=600,
                           help="Interval (s) for culling idle containers.")
    tornado.options.define('cull_timeout',
                           default=3600,
                           help="Timeout (s) for culling idle containers.")
    tornado.options.define('container_ip',
                           default='127.0.0.1',
                           help="IP address for containers to bind to")
    tornado.options.define('container_port',
                           default='8888',
                           help="Port for containers to bind to")

    command_default = ('ipython notebook --no-browser'
                       ' --port {port} --ip=0.0.0.0'
                       ' --NotebookApp.base_url=/{base_path}')

    tornado.options.define(
        'command',
        default=command_default,
        help=
        "command to run when booting the image. A placeholder for base_path should be provided."
    )
    tornado.options.define('port',
                           default=9999,
                           help="port for the main server to listen on")
    tornado.options.define(
        'ip',
        default=None,
        help="ip for the main server to listen on [default: all interfaces]")
    tornado.options.define('max_dock_workers',
                           default=2,
                           help="Maximum number of docker workers")
    tornado.options.define('mem_limit',
                           default="512m",
                           help="Limit on Memory, per container")
    tornado.options.define('cpu_shares',
                           default=None,
                           help="Limit CPU shares, per container")
    tornado.options.define(
        'image',
        default="jupyter/minimal",
        help=
        "Docker container to spawn for new users. Must be on the system already"
    )
    tornado.options.define('docker_version',
                           default="1.13",
                           help="Version of the Docker API to use")
    tornado.options.define(
        'redirect_uri',
        default="/tree",
        help="URI to redirect users to upon initial notebook launch")
    tornado.options.define(
        'pool_size',
        default=10,
        help=
        "Capacity for containers on this system. Will be prelaunched at startup."
    )
    tornado.options.define(
        'pool_name',
        default=None,
        help=
        "Container name fragment used to identity containers that belong to this instance."
    )
    tornado.options.define(
        'static_files',
        default=None,
        help="Static files to extract from the initial container launch")
    tornado.options.define(
        'allow_origin',
        default=None,
        help=
        "Set the Access-Control-Allow-Origin header. Use '*' to allow any origin to access."
    )
    tornado.options.define('assert_hostname',
                           default=False,
                           help="Verify hostname of Docker daemon.")

    tornado.options.parse_command_line()
    opts = tornado.options.options

    handlers = [
        (r"/", LoadingHandler),
        (r"/spawn/?(/user/\w+(?:/.*)?)?", SpawnHandler),
        (r"/api/spawn/", APISpawnHandler),
        (r"/(user/\w+)(?:/.*)?", LoadingHandler),
        (r"/stats", StatsHandler),
    ]

    proxy_token = os.environ['CONFIGPROXY_AUTH_TOKEN']
    proxy_endpoint = os.environ.get('CONFIGPROXY_ENDPOINT',
                                    "http://127.0.0.1:8001")
    docker_host = os.environ.get('DOCKER_HOST', 'unix://var/run/docker.sock')

    max_age = datetime.timedelta(seconds=opts.cull_timeout)
    pool_name = opts.pool_name
    if pool_name is None:
        # Derive a valid container name from the image name by default.
        pool_name = re.sub('[^a-zA-Z0_.-]+', '', opts.image.split(':')[0])

    container_config = dockworker.ContainerConfig(
        image=opts.image,
        command=opts.command,
        mem_limit=opts.mem_limit,
        cpu_shares=opts.cpu_shares,
        container_ip=opts.container_ip,
        container_port=opts.container_port,
    )

    spawner = dockworker.DockerSpawner(
        docker_host,
        version=opts.docker_version,
        timeout=30,
        max_workers=opts.max_dock_workers,
        assert_hostname=opts.assert_hostname,
    )

    static_path = os.path.join(os.path.dirname(__file__), "static")

    pool = spawnpool.SpawnPool(
        proxy_endpoint=proxy_endpoint,
        proxy_token=proxy_token,
        spawner=spawner,
        container_config=container_config,
        capacity=opts.pool_size,
        max_age=max_age,
        static_files=opts.static_files,
        static_dump_path=static_path,
        pool_name=pool_name,
    )

    ioloop = tornado.ioloop.IOLoop().instance()

    settings = dict(
        default_handler_class=BaseHandler,
        static_path=static_path,
        cookie_secret=uuid.uuid4(),
        xsrf_cookies=False,
        debug=True,
        cull_period=opts.cull_period,
        allow_origin=opts.allow_origin,
        spawner=spawner,
        pool=pool,
        autoescape=None,
        proxy_token=proxy_token,
        template_path=os.path.join(os.path.dirname(__file__), 'templates'),
        proxy_endpoint=proxy_endpoint,
        redirect_uri=opts.redirect_uri.lstrip('/'),
    )

    # Synchronously cull any existing, inactive containers, and pre-launch a set number of
    # containers, ready to serve.
    ioloop.run_sync(pool.heartbeat)

    if (opts.static_files):
        ioloop.run_sync(pool.copy_static)

    # Periodically execute a heartbeat function to cull used containers and regenerated failed
    # ones, self-healing the cluster.
    cull_ms = opts.cull_period * 1e3
    app_log.info("Culling containers unused for %i seconds every %i seconds.",
                 opts.cull_timeout, opts.cull_period)
    culler = tornado.ioloop.PeriodicCallback(pool.heartbeat, cull_ms)
    culler.start()

    app_log.info("Listening on {}:{}".format(opts.ip or '*', opts.port))
    application = tornado.web.Application(handlers, **settings)
    application.listen(opts.port, opts.ip)
    ioloop.start()
コード例 #3
0
ファイル: orchestrate.py プロジェクト: utsavkesharwani/tmpnb
def main():
    tornado.options.define('cull_period',
                           default=600,
                           help="Interval (s) for culling idle containers.")
    tornado.options.define('cull_timeout',
                           default=3600,
                           help="Timeout (s) for culling idle containers.")
    tornado.options.define('cull_max',
                           default=14400,
                           help=dedent("""
        Maximum age of a container (s), regardless of activity.

        Default: 14400 (4 hours)

        A container that has been running for this long will be culled,
        even if it is not idle.
        """))
    tornado.options.define(
        'container_ip',
        default='127.0.0.1',
        help="""Host IP address for containers to bind to. If host_network=True,
the host IP address for notebook servers to bind to.""")
    tornado.options.define(
        'container_port',
        default='8888',
        help="""Within container port for notebook servers to bind to.
If host_network=True, the starting port assigned to notebook servers on the host."""
    )
    tornado.options.define(
        'use_tokens',
        default=False,
        help="""Enable token-authentication of notebook servers.
If host_network=True, the starting port assigned to notebook servers on the host."""
    )

    command_default = ('jupyter notebook --no-browser'
                       ' --port {port} --ip=0.0.0.0'
                       ' --NotebookApp.base_url=/{base_path}'
                       ' --NotebookApp.port_retries=0'
                       ' --NotebookApp.token="{token}"')

    tornado.options.define(
        'command',
        default=command_default,
        help="""Command to run when booting the image. A placeholder for
{base_path} should be provided. A placeholder for {port} and {ip} can be provided."""
    )
    tornado.options.define('port',
                           default=9999,
                           help="port for the main server to listen on")
    tornado.options.define(
        'ip',
        default=None,
        help="ip for the main server to listen on [default: all interfaces]")
    tornado.options.define('admin_port',
                           default=10000,
                           help="port for the admin server to listen on")
    tornado.options.define(
        'admin_ip',
        default='127.0.0.1',
        help="ip for the admin server to listen on [default: 127.0.0.1]")
    tornado.options.define('max_dock_workers',
                           default=2,
                           help="Maximum number of docker workers")
    tornado.options.define('mem_limit',
                           default="512m",
                           help="Limit on Memory, per container")
    tornado.options.define('cpu_shares',
                           default=None,
                           type=int,
                           help="Limit CPU shares, per container")
    tornado.options.define('cpu_quota',
                           default=None,
                           type=int,
                           help=dedent("""
        Limit CPU quota, per container.

        Units are CPU-µs per 100ms, so 1 CPU/container would be:

            --cpu-quota=100000

        """))
    tornado.options.define(
        'image',
        default="jupyter/minimal-notebook",
        help=
        "Docker container to spawn for new users. Must be on the system already"
    )
    tornado.options.define('docker_version',
                           default="auto",
                           help="Version of the Docker API to use")
    tornado.options.define(
        'redirect_uri',
        default="/tree",
        help="URI to redirect users to upon initial notebook launch")
    tornado.options.define(
        'pool_size',
        default=10,
        help=
        "Capacity for containers on this system. Will be prelaunched at startup."
    )
    tornado.options.define(
        'pool_name',
        default=None,
        help=
        "Container name fragment used to identity containers that belong to this instance."
    )
    tornado.options.define(
        'static_files',
        default=None,
        help="Static files to extract from the initial container launch")
    tornado.options.define(
        'allow_origin',
        default=None,
        help=
        "Set the Access-Control-Allow-Origin header. Use '*' to allow any origin to access."
    )
    tornado.options.define(
        'expose_headers',
        default=None,
        help="Sets the Access-Control-Expose-Headers header.")
    tornado.options.define('max_age',
                           default=None,
                           help="Sets the Access-Control-Max-Age header.")
    tornado.options.define(
        'allow_credentials',
        default=None,
        help="Sets the Access-Control-Allow-Credentials header.")
    tornado.options.define(
        'allow_methods',
        default=None,
        help="Sets the Access-Control-Allow-Methods header.")
    tornado.options.define(
        'allow_headers',
        default=None,
        help="Sets the Access-Control-Allow-Headers header.")
    tornado.options.define('assert_hostname',
                           default=False,
                           help="Verify hostname of Docker daemon.")
    tornado.options.define('container_user',
                           default=None,
                           help="User to run container command as")
    tornado.options.define(
        'host_network',
        default=False,
        help="""Attaches the containers to the host networking instead of the
default docker bridge. Affects the semantics of container_port and container_ip."""
    )
    tornado.options.define(
        'docker_network',
        default=None,
        help="""Attaches the containers to the specified docker network.
        For use when the proxy, tmpnb, and containers are all in docker.""")
    tornado.options.define('host_directories',
                           default=None,
                           help=dedent("""
        Mount the specified directory as a data volume, multiple
        directories can be specified by using a comma-delimited string, directory
        path must provided in full (eg: /home/steve/data/:r), permissions default to
        rw"""))
    tornado.options.define(
        'user_length',
        default=12,
        help="Length of the unique /user/:id path generated per container")
    tornado.options.define('extra_hosts',
                           default=[],
                           multiple=True,
                           help=dedent("""
        Extra hosts for the containers, multiple hosts can be specified
        by using a comma-delimited string, specified in the form hostname:IP"""
                                       ))

    tornado.options.parse_command_line()
    opts = tornado.options.options

    api_token = os.getenv('API_AUTH_TOKEN')
    admin_token = os.getenv('ADMIN_AUTH_TOKEN')
    proxy_token = os.environ['CONFIGPROXY_AUTH_TOKEN']
    proxy_endpoint = os.environ.get('CONFIGPROXY_ENDPOINT',
                                    "http://127.0.0.1:8001")
    docker_host = os.environ.get('DOCKER_HOST', 'unix://var/run/docker.sock')

    handlers = [
        (r"/api/spawn/?", APISpawnHandler),
        (r"/api/stats/?", APIStatsHandler),
        (r"/stats/?", RedirectHandler, {
            "url": "/api/stats"
        }),
    ]

    # Only add human-facing handlers if there's no spawn API key set
    if api_token is None:
        handlers.extend([
            (r"/", LoadingHandler),
            (r"/spawn/?(/user/\w+(?:/.*)?)?", SpawnHandler),
            (r"/spawn/((?:notebooks|tree|files)(?:/.*)?)", SpawnHandler),
            (r"/(user/\w+)(?:/.*)?", LoadingHandler),
            (r"/((?:notebooks|tree|files)(?:/.*)?)", LoadingHandler),
            (r"/info/?", InfoHandler),
        ])

    admin_handlers = [(r"/api/pool/?", APIPoolHandler)]

    max_idle = datetime.timedelta(seconds=opts.cull_timeout)
    max_age = datetime.timedelta(seconds=opts.cull_max)
    pool_name = opts.pool_name
    if pool_name is None:
        # Derive a valid container name from the image name by default.
        pool_name = re.sub('[^a-zA-Z0_.-]+', '', opts.image.split(':')[0])

    container_config = dockworker.ContainerConfig(
        image=opts.image,
        command=opts.command,
        use_tokens=opts.use_tokens,
        mem_limit=opts.mem_limit,
        cpu_quota=opts.cpu_quota,
        cpu_shares=opts.cpu_shares,
        container_ip=opts.container_ip,
        container_port=opts.container_port,
        container_user=opts.container_user,
        host_network=opts.host_network,
        docker_network=opts.docker_network,
        host_directories=opts.host_directories,
        extra_hosts=opts.extra_hosts,
    )

    spawner = dockworker.DockerSpawner(
        docker_host,
        timeout=30,
        version=opts.docker_version,
        max_workers=opts.max_dock_workers,
        assert_hostname=opts.assert_hostname,
    )

    static_path = os.path.join(os.path.dirname(__file__), "static")

    pool = spawnpool.SpawnPool(proxy_endpoint=proxy_endpoint,
                               proxy_token=proxy_token,
                               spawner=spawner,
                               container_config=container_config,
                               capacity=opts.pool_size,
                               max_idle=max_idle,
                               max_age=max_age,
                               static_files=opts.static_files,
                               static_dump_path=static_path,
                               pool_name=pool_name,
                               user_length=opts.user_length)

    ioloop = tornado.ioloop.IOLoop().current()

    settings = dict(
        default_handler_class=BaseHandler,
        static_path=static_path,
        cookie_secret=uuid.uuid4(),
        xsrf_cookies=False,
        debug=True,
        cull_period=opts.cull_period,
        allow_origin=opts.allow_origin,
        expose_headers=opts.expose_headers,
        max_age=opts.max_age,
        allow_credentials=opts.allow_credentials,
        allow_methods=opts.allow_methods,
        allow_headers=opts.allow_headers,
        spawner=spawner,
        pool=pool,
        autoescape=None,
        proxy_token=proxy_token,
        api_token=api_token,
        template_path=os.path.join(os.path.dirname(__file__), 'templates'),
        proxy_endpoint=proxy_endpoint,
        redirect_uri=opts.redirect_uri.lstrip('/'),
    )

    admin_settings = dict(pool=pool, admin_token=admin_token)

    # Cleanup on a fresh state (likely a restart)
    ioloop.run_sync(pool.cleanout)

    # Synchronously cull any existing, inactive containers, and pre-launch a set number of
    # containers, ready to serve.
    ioloop.run_sync(pool.heartbeat)

    if (opts.static_files):
        ioloop.run_sync(pool.copy_static)

    # Periodically execute a heartbeat function to cull used containers and regenerated failed
    # ones, self-healing the cluster.
    cull_ms = opts.cull_period * 1e3
    app_log.info("Culling containers unused for %i seconds every %i seconds.",
                 opts.cull_timeout, opts.cull_period)
    culler = tornado.ioloop.PeriodicCallback(pool.heartbeat, cull_ms)
    culler.start()

    app_log.info("Listening on {}:{}".format(opts.ip or '*', opts.port))
    application = tornado.web.Application(handlers, **settings)
    application.listen(opts.port, opts.ip)

    app_log.info("Admin listening on {}:{}".format(opts.admin_ip or '*',
                                                   opts.admin_port))
    admin_application = tornado.web.Application(admin_handlers,
                                                **admin_settings)
    admin_application.listen(opts.admin_port, opts.admin_ip)

    ioloop.start()
コード例 #4
0
    # TODO: read from env / config file
    container_config = dockworker.ContainerConfig(command=command_default,
                                                  image=container_image,
                                                  mem_limit=container_memlimit,
                                                  cpu_shares=None,
                                                  container_ip=container_ip,
                                                  container_port='8888',
                                                  container_user='******',
                                                  host_network=False,
                                                  host_directories=None,
                                                  extra_hosts=[])

    spawner = dockworker.DockerSpawner(
        docker_host,
        timeout=30,
        version="auto",
        max_workers=4,
        assert_hostname=False,
    )

    settings = dict(
        spawner=spawner,
        container_name_pattern=re.compile('tmp\.([^.]+)\.(.+)\Z'),
        pool_name="tmpnb",
        container_config=container_config,
        proxy_token=os.environ.get('CONFIGPROXY_AUTH_TOKEN', "devtoken"),
        proxy_endpoint=os.environ.get('CONFIGPROXY_ENDPOINT',
                                      "http://127.0.0.1:8001"),
    )
    app = tornado.web.Application(handlers, **settings)
    app.listen(9005)
コード例 #5
0
def main():
    tornado.options.define('cull_timeout',
                           default=3600,
                           help="Timeout (s) for culling idle")
    tornado.options.define('container_ip',
                           default='127.0.0.1',
                           help="IP address for containers to bind to")
    tornado.options.define('container_port',
                           default='8888',
                           help="Port for containers to bind to")
    tornado.options.define(
        'ipython_executable',
        default='ipython3',
        help="IPython Notebook startup (e.g. ipython, ipython2, ipython3)")
    tornado.options.define('port',
                           default=9999,
                           help="port for the main server to listen on")
    tornado.options.define('max_dock_workers',
                           default=24,
                           help="Maximum number of docker workers")
    tornado.options.define('mem_limit',
                           default="512m",
                           help="Limit on Memory, per container")
    tornado.options.define('cpu_shares',
                           default=None,
                           help="Limit CPU shares, per container")
    tornado.options.define(
        'image',
        default="jupyter/demo",
        help=
        "Docker container to spawn for new users. Must be on the system already"
    )
    tornado.options.define('docker_version',
                           default="1.13",
                           help="Version of the Docker API to use")
    tornado.options.define(
        'redirect_uri',
        default="/tree",
        help="URI to redirect users to upon initial notebook launch")

    tornado.options.parse_command_line()
    opts = tornado.options.options

    handlers = [
        (r"/", LoadingHandler),
        (r"/spawn/?(/user-\w+/.+)?", SpawnHandler),
        (r"/(user-\w+)/.*", LoadingHandler),
    ]

    proxy_token = os.environ['CONFIGPROXY_AUTH_TOKEN']
    proxy_endpoint = os.environ.get('CONFIGPROXY_ENDPOINT',
                                    "http://127.0.0.1:8001")
    docker_host = os.environ.get('DOCKER_HOST', 'unix://var/run/docker.sock')

    blocking_docker_client = docker.Client(base_url=docker_host,
                                           version=opts.docker_version,
                                           timeout=20)

    executor = ThreadPoolExecutor(max_workers=opts.max_dock_workers)

    async_docker_client = AsyncDockerClient(blocking_docker_client, executor)

    spawner = dockworker.DockerSpawner(docker_host,
                                       version=opts.docker_version,
                                       timeout=20,
                                       max_workers=opts.max_dock_workers)

    settings = dict(
        static_path=os.path.join(os.path.dirname(__file__), "static"),
        cookie_secret=uuid.uuid4(),
        xsrf_cookies=True,
        debug=True,
        spawner=spawner,
        autoescape=None,
        container_ip=opts.container_ip,
        container_port=opts.container_port,
        ipython_executable=opts.ipython_executable,
        proxy_token=proxy_token,
        template_path=os.path.join(os.path.dirname(__file__), 'templates'),
        proxy_endpoint=proxy_endpoint,
        mem_limit=opts.mem_limit,
        cpu_shares=opts.cpu_shares,
        image=opts.image,
        redirect_uri=opts.redirect_uri,
    )

    # check for idle containers and cull them
    cull_timeout = opts.cull_timeout

    if cull_timeout:
        delta = datetime.timedelta(seconds=cull_timeout)
        cull_ms = cull_timeout * 1e3
        app_log.info("Culling every %i seconds", cull_timeout)
        culler = tornado.ioloop.PeriodicCallback(
            lambda: cull_idle(async_docker_client, proxy_endpoint, proxy_token,
                              delta), cull_ms)
        culler.start()
    else:
        app_log.info("Not culling idle containers")

    app_log.info("Listening on {}".format(opts.port))

    application = tornado.web.Application(handlers, **settings)
    application.listen(opts.port)
    tornado.ioloop.IOLoop().instance().start()
コード例 #6
0
ファイル: orchestrate.py プロジェクト: smashwilson/tmpnb
def main():
    tornado.options.define('cull_period',
                           default=600,
                           help="Interval (s) for culling idle containers.")
    tornado.options.define('cull_timeout',
                           default=3600,
                           help="Timeout (s) for culling idle containers.")
    tornado.options.define('container_ip',
                           default='127.0.0.1',
                           help="IP address for containers to bind to")
    tornado.options.define('container_port',
                           default='8888',
                           help="Port for containers to bind to")
    tornado.options.define(
        'ipython_executable',
        default='ipython3',
        help="IPython Notebook startup (e.g. ipython, ipython2, ipython3)")
    tornado.options.define('port',
                           default=9999,
                           help="port for the main server to listen on")
    tornado.options.define('max_dock_workers',
                           default=24,
                           help="Maximum number of docker workers")
    tornado.options.define('mem_limit',
                           default="512m",
                           help="Limit on Memory, per container")
    tornado.options.define('cpu_shares',
                           default=None,
                           help="Limit CPU shares, per container")
    tornado.options.define(
        'image',
        default="jupyter/demo",
        help=
        "Docker container to spawn for new users. Must be on the system already"
    )
    tornado.options.define('docker_version',
                           default="1.13",
                           help="Version of the Docker API to use")
    tornado.options.define(
        'redirect_uri',
        default="/tree",
        help="URI to redirect users to upon initial notebook launch")
    tornado.options.define(
        'pool_size',
        default=128,
        help=
        "Capacity for containers on this system. Will be prelaunched at startup."
    )

    tornado.options.parse_command_line()
    opts = tornado.options.options

    handlers = [
        (r"/", LoadingHandler),
        (r"/spawn/?(/user-\w+/.+)?", SpawnHandler),
        (r"/(user-\w+)/.*", LoadingHandler),
    ]

    proxy_token = os.environ['CONFIGPROXY_AUTH_TOKEN']
    proxy_endpoint = os.environ.get('CONFIGPROXY_ENDPOINT',
                                    "http://127.0.0.1:8001")
    docker_host = os.environ.get('DOCKER_HOST', 'unix://var/run/docker.sock')

    max_age = datetime.timedelta(seconds=opts.cull_timeout)

    container_config = dockworker.ContainerConfig(
        image=opts.image,
        ipython_executable=opts.ipython_executable,
        mem_limit=opts.mem_limit,
        cpu_shares=opts.cpu_shares,
        container_ip=opts.container_ip,
        container_port=opts.container_port)

    spawner = dockworker.DockerSpawner(docker_host,
                                       version=opts.docker_version,
                                       timeout=20,
                                       max_workers=opts.max_dock_workers)

    pool = spawnpool.SpawnPool(proxy_endpoint=proxy_endpoint,
                               proxy_token=proxy_token,
                               spawner=spawner,
                               container_config=container_config,
                               capacity=opts.pool_size,
                               max_age=max_age)

    ioloop = tornado.ioloop.IOLoop().instance()

    settings = dict(
        static_path=os.path.join(os.path.dirname(__file__), "static"),
        cookie_secret=uuid.uuid4(),
        xsrf_cookies=True,
        debug=True,
        cull_period=opts.cull_period,
        spawner=spawner,
        pool=pool,
        autoescape=None,
        proxy_token=proxy_token,
        template_path=os.path.join(os.path.dirname(__file__), 'templates'),
        proxy_endpoint=proxy_endpoint,
        redirect_uri=opts.redirect_uri.lstrip('/'),
    )

    # Synchronously cull any existing, inactive containers, and pre-launch a set number of
    # containers, ready to serve.
    ioloop.run_sync(pool.heartbeat)

    # Periodically execute a heartbeat function to cull used containers and regenerated failed
    # ones, self-healing the cluster.
    cull_ms = opts.cull_period * 1e3
    app_log.info("Culling containers unused for %i seconds every %i seconds.",
                 opts.cull_timeout, opts.cull_period)
    culler = tornado.ioloop.PeriodicCallback(pool.heartbeat, cull_ms)
    culler.start()

    app_log.info("Listening on {}".format(opts.port))
    application = tornado.web.Application(handlers, **settings)
    application.listen(opts.port)
    ioloop.start()