示例#1
0
 async def query_started(self) -> None:
     channel = Docker().events.subscribe()
     while channel:
         event = await channel.get()
         if event['Type'] == 'container' and event['status'] == 'start':
             container = Docker().containers.container(event['id'])
             await container.show()
             await self.handle(container)
 def __init__(
     self,
     rabbitmq_port: int,
 ) -> None:
     self._rabbitmq_port = rabbitmq_port
     self._running = False
     self._docker = Docker()
示例#3
0
async def list_containers():
    """
    Retrieve the list of local images being built by repo2docker.
    Images are built in a Docker container.
    """
    async with Docker() as docker:
        r2d_containers = await docker.containers.list(
            filters=json.dumps({"label": ["repo2docker.ref"]}))
    containers = [{
        "repo":
        container["Labels"]["repo2docker.repo"],
        "ref":
        container["Labels"]["repo2docker.ref"],
        "image_name":
        container["Labels"]["repo2docker.build"],
        "display_name":
        container["Labels"]["tljh_repo2docker.display_name"],
        "mem_limit":
        container["Labels"]["tljh_repo2docker.mem_limit"],
        "cpu_limit":
        container["Labels"]["tljh_repo2docker.cpu_limit"],
        "status":
        "building",
    } for container in r2d_containers
                  if "repo2docker.build" in container["Labels"]]
    return containers
 async def service_start_(self, service_id):
     service_id = str(service_id)
     docker = Docker()
     try:
         service = self.application['services'][service_id]
         if not service.get('image_pulled'):
             logger.debug('Image pull started')
             auth = service['image'].get('auth')
             repository = service['image']['repository'] + \
                 service['image']['name'] if \
                     service['image'].get('repository') else \
                         service['image']['name']
             if auth:
                 logger.debug('Pulling {} with auth {}'.format(
                     repository, auth))
                 image = await docker.images.pull(repository, auth=auth)
             else:
                 logger.debug('Pulling {}'.format(repository))
                 image = await docker.images.pull(repository)
             logger.debug('Image pull ended')
             service['image_pulled'] = True
         logger.debug('Start service: {}'.format(service['name']))
         container = await docker.containers.create_or_replace(
             name=service['name'], config=service['container'])
         self.application['services'][service_id][
             'container_id'] = container._id
         logger.debug('service started ({}): container {}'.format(
             service['name'], container._id))
         await container.start()
         return True
     except IndexError as e:
         logger.exception(e)  # See error locally
         raise  # Return back RPC error
     finally:
         await docker.close()
示例#5
0
    async def _pull_image(self):
        docker_image = f"{config.DOCKER_REGISTRY}/{self.task.image['name']}:{self.task.image['tag']}"
        log.debug(
            "PULLING IMAGE %s as %s with pwd %s",
            docker_image,
            config.DOCKER_USER,
            config.DOCKER_PASSWORD,
        )
        async with Docker() as docker_client:
            await self._post_messages(
                LogType.LOG,
                f"[sidecar]Pulling {self.task.image['name']}:{self.task.image['tag']}...",
            )
            await docker_client.images.pull(
                docker_image,
                auth={
                    "username": config.DOCKER_USER,
                    "password": config.DOCKER_PASSWORD,
                },
            )

            # get integration version
            image_cfg = await docker_client.images.inspect(docker_image)
            # NOTE: old services did not have that label
            if "io.simcore.integration-version" in image_cfg["Config"][
                    "Labels"]:
                self.integration_version = version.parse(
                    json.loads(image_cfg["Config"]["Labels"]
                               ["io.simcore.integration-version"])
                    ["integration-version"])
示例#6
0
async def docker(loop):
    client = Docker()

    try:
        yield client
    finally:
        await client.close()
    async def services_log(self):
        if not self.application.get('services'):
            logger.debug('No services to log')
            return
        docker = Docker()
        try:
            for service_id, service in self.application['services'].items():
                if not service.get('container_id'):
                    continue  # Container not yet started
                container = await docker.containers.get(service['container_id']
                                                        )
                logs = await container.log(stdout=True,
                                           stderr=True,
                                           details=True,
                                           since=self.last_logs)
                await self.device_log('\n'.join(logs), service_id=service_id)

        except Exception as e:
            logger.exception(e)

        finally:
            self.last_logs = datetime.now().timestamp()
            await docker.close()

        await asyncio.sleep(LOG_INTERVAL)
        await self.scheduler.spawn(self.services_log())
示例#8
0
    async def set_limits(self):
        """
        Set the user environment limits if they are defined in the image
        """
        imagename = self.user_options.get("image")
        async with Docker() as docker:
            image = await docker.images.inspect(imagename)

        mem_limit = image["ContainerConfig"]["Labels"].get(
            "tljh_repo2docker.mem_limit", None)
        cpu_limit = image["ContainerConfig"]["Labels"].get(
            "tljh_repo2docker.cpu_limit", None)

        # override the spawner limits if defined in the image
        if mem_limit:
            self.mem_limit = mem_limit
        if cpu_limit:
            self.cpu_limit = float(cpu_limit)

        if self.cpu_limit:
            self.extra_host_config.update({
                "cpu_period":
                CPU_PERIOD,
                "cpu_quota":
                int(float(CPU_PERIOD) * self.cpu_limit),
            })
示例#9
0
def test_execute():
    client = Docker.local_client()
    conf = {
        'name': 'aio-1',
        'image': 'gliderlabs/alpine:3.1',
        'command': ['sleep', '60']
    }

    with pytest.raises(NotFound):
        yield from client.containers.get('aio-1')

    container_id = yield from client.containers.create(**conf)
    assert container_id

    started = yield from client.containers.start(container_id)
    assert started

    # the real test starts now

    exec_id = yield from client.executors.create(container_id, cmd=[
        'echo', 'FOOBAR'
    ])

    yield from client.executors.start(exec_id)

    results = yield from client.executors.inspect(exec_id)
    assert results['exit_code'] == 0, 'should been 0'

    # end of the test

    stoped = yield from client.containers.stop(container_id)
    assert stoped, 'Should be stoped'

    deleted = yield from client.containers.delete(container_id)
    assert deleted, 'Should be deleted'
示例#10
0
def test_delete():
    client = Docker.local_client()
    images = yield from client.images.items()
    for image in images:
        if '<none>:<none>' in image['repo_tags']:
            deleted = yield from client.images.delete(image['id'], force=True)
            assert deleted, 'cannot remove image'
示例#11
0
def events(bot):
    docker = Docker()
    events = docker.events
    events.saferun()

    stream = events.listen()
    while True:
        el = yield from stream.get()
        yield from bot.post("#cron", "`{}`".format(str(el)))
示例#12
0
    async def delete(self):
        data = self.get_json_body()
        name = data["name"]
        async with Docker() as docker:
            try:
                await docker.images.delete(name)
            except DockerError as e:
                raise web.HTTPError(e.status, e.message)

        self.set_status(200)
        self.finish(json.dumps({"status": "ok"}))
示例#13
0
def test_conflict():
    client = Docker.local_client()
    conf = {
        'name': 'aio-1',
        'image': 'gliderlabs/alpine:3.1',
        'command': ['sleep', '60']
    }

    with pytest.raises(NotFound):
        yield from client.containers.get('aio-1')

    container_id = yield from client.containers.create(**conf)
    assert container_id

    data_1 = yield from client.containers.get('aio-1')
    data_2 = yield from client.containers.get(container_id)

    assert data_1 == data_2, 'Fetch by name or id must returns same data'
    assert data_1['id'] == container_id

    # Cannot create container with the same name
    with pytest.raises(ConflictError):
        yield from client.containers.create(**conf)

    c1 = containers = yield from client.containers.items(status='all')
    assert {'id': container_id} in containers, 'Must match by id'

    c2 = containers = yield from client.containers.items(status='running')
    assert {'id': container_id} not in containers, 'Must not be running'

    c3 = containers = yield from client.containers.items(status='exited')
    assert {'id': container_id} in containers, 'Must be exited'

    started = yield from client.containers.start(container_id)
    assert started

    started = yield from client.containers.start(container_id)
    assert not started

    # Cannot delete running container
    with pytest.raises(ConflictError):
        yield from client.containers.delete(container_id)

    stoped = yield from client.containers.stop(container_id)
    assert stoped, 'Should be stoped'

    stoped = yield from client.containers.stop(container_id)
    assert not stoped, 'Should be already stoped'

    deleted = yield from client.containers.delete(container_id)
    assert deleted, 'Should be deleted'

    deleted = yield from client.containers.delete(container_id)
    assert not deleted, 'Should be already deleted'
示例#14
0
async def test_delete_environment(app, minimal_repo, image_name):
    name, ref = image_name.split(":")
    await add_environment(app, repo=minimal_repo, name=name, ref=ref)
    await wait_for_image(image_name=image_name)
    r = await remove_environment(app, image_name=image_name)
    assert r.status_code == 200

    # make sure the image does not exist anymore
    docker = Docker()
    with pytest.raises(DockerError):
        await docker.images.inspect(image_name)
    await docker.close()
示例#15
0
async def wait_for_image(*, image_name):
    """wait until an image is built"""
    count, retries = 0, 60 * 10
    image = None
    async with Docker() as docker:
        while count < retries:
            await asyncio.sleep(1)
            try:
                image = await docker.images.inspect(image_name)
            except DockerError:
                count += 1
                continue
            else:
                break
    return image
示例#16
0
    async def application_start_(self):
        docker = Docker()
        try:
            for service_id in self.application['services']:
                await self.service_start_(service_id)
            return True

        except (DockerError, ValueError) as e:
            await self.device_log('{}'.format(e))

        except Exception as e:
            logger.exception(e)
            await self.device_log('{}'.format(e))

        finally:
            await docker.close()
示例#17
0
    async def get(self, name):
        self.set_header("Content-Type", "text/event-stream")
        self.set_header("Cache-Control", "no-cache")

        async with Docker() as docker:
            containers = await docker.containers.list(
                filters=json.dumps({"label": [f"repo2docker.build={name}"]}))

            if not containers:
                raise web.HTTPError(404, f"No logs for image: {name}")

            async for line in containers[0].log(stdout=True,
                                                stderr=True,
                                                follow=True):
                await self._emit({"phase": "log", "message": line})

        await self._emit({"phase": "built", "message": "built"})
示例#18
0
def test_pull():
    client = Docker.local_client()

    ref = 'gliderlabs/alpine:2.6'

    pulled = yield from client.dockerhub.pull(ref)
    assert pulled, 'Should download gliderlabs/alpine:2.6'

    # is it present locally?
    image = yield from client.images.inspect(ref)
    assert image

    # start a new container with this image
    container_id = yield from client.containers.create(**{
        'name': 'aio-2',
        'image': 'gliderlabs/alpine:2.6',
        'command': ['sleep', '60']
    })
    assert container_id

    started = yield from client.containers.start(container_id)
    assert started

    pulled = yield from client.dockerhub.pull(ref)
    assert pulled, 'Should still download gliderlabs/alpine:2.6'

    with pytest.raises(ConflictError):
        destroyed = yield from client.images.delete(ref)
        assert not destroyed, 'Should not allow destroy'

    stoped = yield from client.containers.stop(container_id)
    assert stoped, 'Should stop container'

    removed = yield from client.containers.remove(container_id)
    assert removed, 'Should remove container'

    images = yield from client.images.items()
    assert {'repo_tag': ref} in images, 'Should be present'

    destroyed = yield from client.images.delete(ref, force=True)
    assert destroyed, 'Should be destroyed'

    images = yield from client.images.items()
    assert {'repo_tag': ref} not in images, 'Should be absent'
示例#19
0
 async def service_stop_(self, service_id):
     service_id = str(service_id)
     docker = Docker()
     try:
         service = self.application['services'][service_id]
         logger.debug('Stop service: {}'.format(service['name']))
         if not self.application['services'][service_id].get('container_id'):
             logger.warning(
                 'service stopped ({}): no container'.format(service['name']))
             return 'no container'
         container = await docker.containers.get(
             self.application['services'][service_id]['container_id'])
         await container.stop()
         return True
     except IndexError as e:
         logger.exception(e)  # See error locally
         raise  # Return back RPC error
     finally:
         await docker.close()
示例#20
0
 async def service_start_(self, service_id):
     service_id = str(service_id)
     service = self.application['services'][service_id]
     docker = Docker()
     try:
         await self.service_pull_(service_id)
         logger.debug('Start service: {}'.format(service['name']))
         container = await docker.containers.create_or_replace(
             name=service['name'], config=service['container'])
         self.application['services'][service_id][
             'container_id'] = container._id
         logger.debug('service started ({}): container {}'.format(
             service['name'], container._id))
         await container.start()
         return True
     except IndexError as e:
         logger.exception(e)  # See error locally
         raise  # Return back RPC error
     finally:
         await docker.close()
示例#21
0
async def list_images():
    """
    Retrieve local images built by repo2docker
    """
    async with Docker() as docker:
        r2d_images = await docker.images.list(filters=json.dumps({
            "dangling": ["false"],
            "label": ["repo2docker.ref"]
        }))
    images = [{
        "repo": image["Labels"]["repo2docker.repo"],
        "ref": image["Labels"]["repo2docker.ref"],
        "image_name": image["Labels"]["tljh_repo2docker.image_name"],
        "display_name": image["Labels"]["tljh_repo2docker.display_name"],
        "mem_limit": image["Labels"]["tljh_repo2docker.mem_limit"],
        "cpu_limit": image["Labels"]["tljh_repo2docker.cpu_limit"],
        "status": "built",
    } for image in r2d_images
              if "tljh_repo2docker.image_name" in image["Labels"]]
    return images
示例#22
0
 async def service_status_(self, service_id):
     service_id = str(service_id)  # JSON keys are always strings
     docker = Docker()
     begin = self.loop.time()
     try:
         logger.debug('Service status for {}'.format(
             self.application['services'][service_id]['name']))
         if not self.application['services'][service_id].get('container_id'):
             return 'starting'
         container = await docker.containers.get(
             self.application['services'][service_id]['container_id'])
         data = await container.show()
         logger.debug('Service status took {}'.format(
             self.loop.time() - begin))
         return data['State']['Status']
     except IndexError:
         raise RPCError('Service not found')
     except Exception as e:
         logger.exception(e)
     finally:
         await docker.close()
示例#23
0
 async def service_pull_(self, service_id):
     service_id = str(service_id)
     docker = Docker()
     try:
         service = self.application['services'][service_id]
         if not service.get('image_pulled'):
             logger.debug('Image pull started')
             auth = service['image'].get('auth')
             name = service['image'].get('name')
             if auth:
                 logger.debug('Pulling {} with auth {}'.format(
                                                         name, auth))                    
                 image = await docker.images.pull(name, auth=auth)
             else:
                 logger.debug('Pulling {}'.format(name))
                 image = await docker.images.pull(name)
             logger.debug('Image pull ended')
             service['image_pulled'] = True
             return True
     finally:
         await docker.close()
示例#24
0
    async def _pull_image(
        self, ) -> Tuple[SimcoreServiceSettingsLabel, Version]:
        docker_image = f"{config.DOCKER_REGISTRY}/{self.task.image['name']}:{self.task.image['tag']}"
        async with Docker() as docker_client:
            await self._post_messages(
                LogType.LOG,
                f"[sidecar]Pulling {self.task.image['name']}:{self.task.image['tag']}...",
            )
            await docker_client.images.pull(
                docker_image,
                auth={
                    "username": config.DOCKER_USER,
                    "password": config.DOCKER_PASSWORD,
                },
            )

            # get integration version
            image_cfg = await docker_client.images.inspect(docker_image)
            # NOTE: old services did not have that label
            integration_version = version.parse("0.0.0")
            if "io.simcore.integration-version" in image_cfg["Config"][
                    "Labels"]:
                integration_version = version.parse(
                    json.loads(image_cfg["Config"]["Labels"]
                               ["io.simcore.integration-version"])
                    ["integration-version"])
            # get service settings
            service_settings_labels = SimcoreServiceSettingsLabel.parse_raw(
                image_cfg["Config"]["Labels"].get("simcore.service.settings",
                                                  "[]"))
            log.debug(
                "found following service settings: %s",
                pformat(service_settings_labels),
            )
            await self._post_messages(
                LogType.LOG,
                f"[sidecar]Pulled {self.task.image['name']}:{self.task.image['tag']}",
            )
            return (service_settings_labels, integration_version)
示例#25
0
def test_pull():
    client = Docker.local_client()
    images = yield from client.images.items()
示例#26
0
async def docker():
    client = Docker()
    await yield_(client)
    await client.close()
示例#27
0
 def __init__(self):
     super(ContainerService, self).__init__()
     self._containers = weakref.WeakValueDictionary()
     self._docker = Docker()
     self._database = Service.resolve("moxie.cores.database.DatabaseService")
示例#28
0
class ContainerService(Service):
    """
    This provides an interface to run container jobs somewhere off in the
    ether somewhere.
    """

    identifier = "moxie.cores.container.ContainerService"

    def __init__(self):
        super(ContainerService, self).__init__()
        self._containers = weakref.WeakValueDictionary()
        self._docker = Docker()
        self._database = Service.resolve("moxie.cores.database.DatabaseService")

    def _check_container(self, name):
        job = yield from self._database.job.get(name)
        # Check if active
        if job is None:
            raise ValueError("Sorry, that's not something you can kill")

    @asyncio.coroutine
    def events(self, name):
        return (yield from self._docker.events)

    @asyncio.coroutine
    def pull(self, name):
        return (yield from self._docker.pull(name))

    def _purge_cache(self, name):
        if name in self._containers:
            self._containers.pop(name)

    @asyncio.coroutine
    def delete(self, name):
        yield from self._check_container(name)
        try:
            obj = yield from self.get(name)
        except ValueError:
            return

        self._purge_cache(name)
        yield from obj.delete()

    @asyncio.coroutine
    def create(self, config, **kwargs):
        return (yield from self._docker.containers.create(config, **kwargs))

    @asyncio.coroutine
    def start(self, name, config, **kwargs):
        yield from self._check_container(name)
        obj = yield from self.get(name)
        return (yield from obj.start(config, **kwargs))

    @asyncio.coroutine
    def kill(self, name, *args, **kwargs):
        yield from self._check_container(name)
        obj = yield from self.get(name)
        return (yield from obj.kill(*args, **kwargs))

    @asyncio.coroutine
    def get(self, name):
        yield from self._check_container(name)
        if name in self._containers:
            obj = self._containers[name]
            try:
                yield from obj.show()  # update cache
                return obj
            except ValueError:
                self._purge_cache(name)
        container = yield from self._docker.containers.get(name)
        self._containers[name] = container
        return container

    @asyncio.coroutine
    def list(self, **kwargs):
        containers = yield from self._docker.containers.list(**kwargs)
        return containers

    @asyncio.coroutine
    def __call__(self):
        pass
示例#29
0
    async def _run_container(self):
        start_time = time.perf_counter()
        docker_image = f"{config.DOCKER_REGISTRY}/{self.task.image['name']}:{self.task.image['tag']}"
        container_config = await self._create_container_config(docker_image)

        # volume paths for car container (w/o prefix)
        result = "FAILURE"
        log_processor_task = None
        try:
            async with Docker() as docker_client:
                await self._post_messages(
                    LogType.LOG,
                    f"[sidecar]Running {self.task.image['name']}:{self.task.image['tag']}...",
                )
                container = await docker_client.containers.create(
                    config=container_config)
                log_processor_task = await self._start_monitoring_container(
                    container)
                # start the container
                await container.start()
                # indicate container is started
                await self.rabbit_mq.post_instrumentation_message({
                    "metrics":
                    "service_started",
                    "user_id":
                    self.user_id,
                    "project_id":
                    self.task.project_id,
                    "service_uuid":
                    self.task.node_id,
                    "service_type":
                    "COMPUTATIONAL",
                    "service_key":
                    self.task.image["name"],
                    "service_tag":
                    self.task.image["tag"],
                })

                # wait until the container finished, either success or fail or timeout
                container_data = await container.show()
                TIME_TO_NEXT_PERDIODIC_CHECK_SECS = 2
                while container_data["State"]["Running"]:
                    await asyncio.sleep(TIME_TO_NEXT_PERDIODIC_CHECK_SECS)
                    # reload container data
                    container_data = await container.show()
                    if ((time.perf_counter() - start_time) >
                            config.SERVICES_TIMEOUT_SECONDS
                            and config.SERVICES_TIMEOUT_SECONDS > 0):
                        log.error(
                            "Running container timed-out after %ss and will be stopped now\nlogs: %s",
                            config.SERVICES_TIMEOUT_SECONDS,
                            container.log(stdout=True, stderr=True),
                        )
                        await container.stop()
                        break

                # reload container data to check the error code with latest info
                container_data = await container.show()
                if container_data["State"]["ExitCode"] > 0:
                    logs = await container.log(stdout=True,
                                               stderr=True,
                                               tail=10)
                    exc = exceptions.SidecarException(
                        f"{docker_image} completed with error code {container_data['State']['ExitCode']}:\n {container_data['State']['Error']}\n:Last logs:\n{logs}"
                    )
                    # clean up the container
                    await container.delete(force=True)
                    raise exc
                # clean up the container
                await container.delete(force=True)
                # ensure progress 1.0 is sent
                await self._post_messages(LogType.PROGRESS, "1.0")
                result = "SUCCESS"
                log.info("%s completed with successfully!", docker_image)
        except DockerContainerError:
            log.exception(
                "Error while running %s with parameters %s",
                docker_image,
                container_config,
            )
            raise
        except DockerError:
            log.exception(
                "Unknown error while trying to run %s with parameters %s",
                docker_image,
                container_config,
            )
            raise
        except asyncio.CancelledError:
            log.warning("Container run was cancelled")
            raise

        finally:
            stop_time = time.perf_counter()
            log.info("Running %s took %sseconds", docker_image,
                     stop_time - start_time)
            # stop monitoring logs now
            if log_processor_task:
                log_processor_task.cancel()
                await log_processor_task
            # instrumentation
            await self.rabbit_mq.post_instrumentation_message({
                "metrics":
                "service_stopped",
                "user_id":
                self.user_id,
                "project_id":
                self.task.project_id,
                "service_uuid":
                self.task.node_id,
                "service_type":
                "COMPUTATIONAL",
                "service_key":
                self.task.image["name"],
                "service_tag":
                self.task.image["tag"],
                "result":
                result,
            })
示例#30
0
def test_search():
    # TODO mock result
    client = Docker.local_client()
    info = yield from client.dockerhub.search('ubuntu')
    assert info[0].name == 'ubuntu'
示例#31
0
class ContainerService(Service):
    """
    This provides an interface to run container jobs somewhere off in the
    ether somewhere.
    """

    identifier = "moxie.cores.container.ContainerService"

    def __init__(self):
        super(ContainerService, self).__init__()
        self._containers = weakref.WeakValueDictionary()
        self._docker = Docker()
        self._database = Service.resolve(
            "moxie.cores.database.DatabaseService")

    def _check_container(self, name):
        job = yield from self._database.job.get(name)
        # Check if active
        if job is None:
            raise ValueError("Sorry, that's not something you can kill")

    @asyncio.coroutine
    def events(self, name):
        return (yield from self._docker.events)

    @asyncio.coroutine
    def pull(self, name):
        return (yield from self._docker.pull(name))

    def _purge_cache(self, name):
        if name in self._containers:
            self._containers.pop(name)

    @asyncio.coroutine
    def delete(self, name):
        yield from self._check_container(name)
        try:
            obj = yield from self.get(name)
        except ValueError:
            return

        self._purge_cache(name)
        yield from obj.delete()

    @asyncio.coroutine
    def create(self, config, **kwargs):
        return (yield from self._docker.containers.create(config, **kwargs))

    @asyncio.coroutine
    def start(self, name, config, **kwargs):
        yield from self._check_container(name)
        obj = yield from self.get(name)
        return (yield from obj.start(config, **kwargs))

    @asyncio.coroutine
    def kill(self, name, *args, **kwargs):
        yield from self._check_container(name)
        obj = yield from self.get(name)
        return (yield from obj.kill(*args, **kwargs))

    @asyncio.coroutine
    def get(self, name):
        yield from self._check_container(name)
        if name in self._containers:
            obj = self._containers[name]
            try:
                yield from obj.show()  # update cache
                return obj
            except ValueError:
                self._purge_cache(name)
        container = yield from self._docker.containers.get(name)
        self._containers[name] = container
        return container

    @asyncio.coroutine
    def list(self, **kwargs):
        containers = yield from self._docker.containers.list(**kwargs)
        return containers

    @asyncio.coroutine
    def __call__(self):
        pass
示例#32
0
def test_version():
    client = Docker.local_client()
    version = yield from client.version()
    assert client.api.version <= version['api_version'], 'api versions should be equals'
示例#33
0
def test_ping():
    client = Docker.local_client()
    assert (yield from client.ping())
示例#34
0
def test_info():
    client = Docker.local_client()
    info = yield from client.info()
    assert 'containers' in info, 'must have containers key'
示例#35
0
async def docker(loop):
    client = Docker()
    yield client
    await client.close()
示例#36
0
文件: d1.py 项目: whg517/study-python
async def list_images():
    client = Docker()
    client.images.list()
示例#37
0
文件: bot.py 项目: maubot/dockerbot
 async def start(self) -> None:
     self.docker = Docker()
示例#38
0
 def __init__(self):
     super(ContainerService, self).__init__()
     self._containers = weakref.WeakValueDictionary()
     self._docker = Docker()
     self._database = Service.resolve(
         "moxie.cores.database.DatabaseService")
示例#39
0
async def build_image(repo, ref, name="", memory=None, cpu=None):
    """
    Build an image given a repo, ref and limits
    """
    ref = ref or "master"
    if len(ref) >= 40:
        ref = ref[:7]

    # default to the repo name if no name specified
    # and sanitize the name of the docker image
    name = name or urlparse(repo).path.strip("/")
    name = name.lower().replace("/", "-")
    image_name = f"{name}:{ref}"

    # memory is specified in GB
    memory = f"{memory}G" if memory else ""
    cpu = cpu or ""

    # add extra labels to set additional image properties
    labels = [
        f"LABEL tljh_repo2docker.display_name={name}",
        f"LABEL tljh_repo2docker.image_name={image_name}",
        f"LABEL tljh_repo2docker.mem_limit={memory}",
        f"LABEL tljh_repo2docker.cpu_limit={cpu}",
    ]
    cmd = [
        "jupyter-repo2docker",
        "--ref",
        ref,
        "--user-name",
        "jovyan",
        "--user-id",
        "1100",
        "--no-run",
        "--image-name",
        image_name,
        "--appendix",
        "\n".join(labels),
        repo,
    ]
    async with Docker() as docker:
        await docker.containers.run(
            config={
                "Cmd": cmd,
                "Image": "jupyter/repo2docker:master",
                "Labels": {
                    "repo2docker.repo": repo,
                    "repo2docker.ref": ref,
                    "repo2docker.build": image_name,
                    "tljh_repo2docker.display_name": name,
                    "tljh_repo2docker.mem_limit": memory,
                    "tljh_repo2docker.cpu_limit": cpu,
                },
                "Volumes": {
                    "/var/run/docker.sock": {"bind": "/var/run/docker.sock", "mode": "rw",}
                },
                "HostConfig": {"Binds": ["/var/run/docker.sock:/var/run/docker.sock"],},
                "Tty": False,
                "AttachStdout": False,
                "AttachStderr": False,
                "OpenStdin": False,
            }
        )
示例#40
0
async def remove_docker_image(image_name):
    async with Docker() as docker:
        try:
            await docker.images.delete(image_name, force=True)
        except DockerError:
            pass
示例#41
0
    async def run(self, command: List[str]) -> TaskOutputData:
        await self._publish_sidecar_state(RunningState.STARTED)
        await self._publish_sidecar_log(
            f"Starting task for {self.service_key}:{self.service_version} on {socket.gethostname()}..."
        )

        settings = Settings.create_from_envs()
        run_id = f"{uuid4()}"
        async with Docker() as docker_client, TaskSharedVolumes(
                Path(f"{settings.SIDECAR_COMP_SERVICES_SHARED_FOLDER}/{run_id}"
                     )) as task_volumes:
            # PRE-PROCESSING
            await pull_image(
                docker_client,
                self.docker_auth,
                self.service_key,
                self.service_version,
                self._publish_sidecar_log,
            )

            integration_version = await get_integration_version(
                docker_client, self.docker_auth, self.service_key,
                self.service_version)
            computational_shared_data_mount_point = (
                await get_computational_shared_data_mount_point(docker_client))
            config = await create_container_config(
                docker_registry=self.docker_auth.server_address,
                service_key=self.service_key,
                service_version=self.service_version,
                command=command,
                comp_volume_mount_point=
                f"{computational_shared_data_mount_point}/{run_id}",
                boot_mode=self.boot_mode,
                task_max_resources=self.task_max_resources,
            )
            await self._write_input_data(task_volumes, integration_version)

            # PROCESSING
            async with managed_container(docker_client, config) as container:
                async with managed_monitor_container_log_task(
                        container=container,
                        service_key=self.service_key,
                        service_version=self.service_version,
                        progress_pub=self.task_publishers.progress,
                        logs_pub=self.task_publishers.logs,
                        integration_version=integration_version,
                        task_volumes=task_volumes,
                        log_file_url=self.log_file_url,
                        log_publishing_cb=self._publish_sidecar_log,
                ):
                    await container.start()
                    await self._publish_sidecar_log(
                        f"Container started as '{container.id}' on {socket.gethostname()}..."
                    )
                    # wait until the container finished, either success or fail or timeout
                    while (container_data := await
                           container.show())["State"]["Running"]:
                        await asyncio.sleep(CONTAINER_WAIT_TIME_SECS)
                    if container_data["State"]["ExitCode"] > os.EX_OK:
                        await self._publish_sidecar_state(
                            RunningState.FAILED,
                            msg=
                            f"error while running container '{container.id}' for '{self.service_key}:{self.service_version}'",
                        )

                        raise ServiceRunError(
                            service_key=self.service_key,
                            service_version=self.service_version,
                            container_id=container.id,
                            exit_code=container_data["State"]["ExitCode"],
                            service_logs=await container.log(stdout=True,
                                                             stderr=True,
                                                             tail=20),
                        )
                    await self._publish_sidecar_log(
                        "Container ran successfully.")

            # POST-PROCESSING
            results = await self._retrieve_output_data(task_volumes,
                                                       integration_version)
            await self._publish_sidecar_log("Task completed successfully.")
            return results