async def nginx_job_async(
        nmrc_path: Path,
        loop: asyncio.AbstractEventLoop) -> AsyncIterator[Tuple[str, str]]:
    async with api_get(path=nmrc_path) as client:
        secret = uuid4()
        command = (
            f"bash -c \"echo -n '{secret}' > /usr/share/nginx/html/secret.txt; "
            f"timeout 15m /usr/sbin/nginx -g 'daemon off;'\"")
        container = Container(
            image=RemoteImage("nginx", "latest"),
            command=command,
            resources=Resources(20, 0.1, None, None, True, None, None),
        )

        job = await client.jobs.run(container,
                                    is_preemptible=False,
                                    description="test NGINX job")
        try:
            for i in range(60):
                status = await client.jobs.status(job.id)
                if status.status == JobStatus.RUNNING:
                    break
                await asyncio.sleep(1)
            else:
                raise AssertionError("Cannot start NGINX job")
            yield job.id, str(secret)
        finally:
            with suppress(Exception):
                await client.jobs.kill(job.id)
async def test_parse_remote_registry_image(make_client: _MakeClient) -> None:
    async with make_client(
        "https://api.localhost.localdomain", registry_url="http://localhost:5000"
    ) as client:
        result = client.parse.remote_image("localhost:5000/bob/library/bananas:latest")
    assert result == RemoteImage(
        "library/bananas", "latest", owner="bob", registry="localhost:5000"
    )
Esempio n. 3
0
 def test_as_docker_url_in_neuro_registry(self) -> None:
     image = RemoteImage(
         name="ubuntu",
         tag="v10.04",
         owner="me",
         cluster_name="test-cluster",
         registry="registry.io",
     )
     assert image.as_docker_url() == "registry.io/me/ubuntu:v10.04"
Esempio n. 4
0
async def cp_s3(
    root: Root, src: URL, dst: URL, recursive: bool, update: bool, progress: bool
) -> None:
    if src.scheme == "file" and dst.scheme == "storage":
        storage_uri = dst
        local_uri = src
        upload = True
    elif src.scheme == "storage" and dst.scheme == "file":
        storage_uri = src
        local_uri = dst
        upload = False
    else:
        raise RuntimeError(
            f"Copy operation of the file with scheme '{src.scheme}'"
            f" to the file with scheme '{dst.scheme}'"
            f" is not supported"
        )

    access_key = secrets.token_urlsafe(nbytes=16)
    secret_key = secrets.token_urlsafe(nbytes=16)
    minio_dir = f"minio-{secrets.token_hex(nbytes=8)}"
    s3_uri = f"s3://bucket{storage_uri.path}"
    minio_script = f"""\
mkdir /mnt/{minio_dir}
ln -s /mnt /mnt/{minio_dir}/bucket
minio server /mnt/{minio_dir}
"""
    volume = Volume(
        storage_uri=storage_uri.with_path(""), container_path="/mnt", read_only=False
    )
    server_container = Container(
        image=RemoteImage(MINIO_IMAGE_NAME, MINIO_IMAGE_TAG),
        entrypoint="sh",
        command=f"-c {shlex.quote(minio_script)}",
        http=HTTPPort(port=9000, requires_auth=False),
        resources=Resources(
            memory_mb=1024,
            cpu=1,
            gpu=0,
            gpu_model=None,
            shm=True,
            tpu_type=None,
            tpu_software_version=None,
        ),
        env={"MINIO_ACCESS_KEY": access_key, "MINIO_SECRET_KEY": secret_key},
        volumes=[volume],
    )

    log.info(f"Launching Amazon S3 gateway for {str(storage_uri.with_path(''))!r}")
    job_name = f"neuro-upload-server-{secrets.token_hex(nbytes=8)}"
    job = await root.client.jobs.run(server_container, name=job_name)
    try:
        jsprogress = JobStartProgress.create(
            tty=root.tty, color=root.color, quiet=root.quiet
        )
        while job.status == JobStatus.PENDING:
            await asyncio.sleep(0.2)
            job = await root.client.jobs.status(job.id)
            jsprogress(job)
        jsprogress.close()

        local_path = "/data"
        if not os.path.isdir(local_uri.path):
            local_path = f"/data/{local_uri.name}"
            local_uri = local_uri.parent
        binding = f"{local_uri.path}:/data"
        if upload:
            binding += ":ro"
        cp_cmd = ["sync" if update else "cp"]
        if recursive:
            cp_cmd.append("--recursive")
        if root.verbosity < 0:
            cp_cmd.append("--quiet")
        if upload:
            cp_cmd.append(local_path)
            cp_cmd.append(s3_uri)
        else:
            cp_cmd.append(s3_uri)
            cp_cmd.append(local_path)

        aws_script = f"""\
aws configure set default.s3.max_concurrent_requests 100
aws configure set default.s3.max_queue_size 10000
aws --endpoint-url {job.http_url} s3 {" ".join(map(shlex.quote, cp_cmd))}
"""
        if root.verbosity >= 2:
            aws_script = "set -x\n" + aws_script
        log.info(f"Launching Amazon S3 client for {local_uri.path!r}")
        docker = aiodocker.Docker()
        try:
            aws_image = f"{AWS_IMAGE_NAME}:{AWS_IMAGE_TAG}"
            async for info in await docker.images.pull(aws_image, stream=True):
                # TODO Use some of Progress classes
                log.debug(str(info))
            client_container = await docker.containers.create(
                config={
                    "Image": aws_image,
                    "Entrypoint": "sh",
                    "Cmd": ["-c", aws_script],
                    "Env": [
                        f"AWS_ACCESS_KEY_ID={access_key}",
                        f"AWS_SECRET_ACCESS_KEY={secret_key}",
                    ],
                    "HostConfig": {"Binds": [binding]},
                    "Tty": True,
                },
                name=f"neuro-upload-client-{secrets.token_hex(nbytes=8)}",
            )
            try:
                await client_container.start()
                tasks = [client_container.wait()]

                async def printlogs(err: bool) -> None:
                    async for piece in await client_container.log(
                        stdout=not err,
                        stderr=err,
                        follow=True,
                        details=(root.verbosity > 1),
                    ):
                        click.echo(piece, nl=False, err=err)

                if not root.quiet:
                    tasks.append(printlogs(err=True))
                if root.verbosity > 0 or progress:
                    tasks.append(printlogs(err=False))
                await asyncio.gather(*tasks)
                exit_code = (await client_container.show())["State"]["ExitCode"]
                if exit_code:
                    raise RuntimeError(f"AWS copying failed with code {exit_code}")
            finally:
                await client_container.delete(force=True)
        finally:
            await docker.close()
    finally:
        try:
            await root.client.jobs.kill(job.id)
        finally:
            attempts = 10
            delay = 0.2
            while True:
                try:
                    await root.client.storage.rm(
                        URL(f"storage:{minio_dir}"), recursive=True
                    )
                except IllegalArgumentError:
                    attempts -= 1
                    if not attempts:
                        raise
                    log.info(
                        "Failed attempt to remove the MinIO directory", exc_info=True
                    )
                    await asyncio.sleep(delay)
                    delay *= 2
                    continue
                break
async def test_parse_remote_public(make_client: _MakeClient) -> None:
    async with make_client(
        "https://api.localhost.localdomain", registry_url="http://localhost:5000"
    ) as client:
        result = client.parse.remote_image("ubuntu:latest")
    assert result == RemoteImage("ubuntu", "latest", owner=None, registry=None)
async def test_parse_remote(make_client: _MakeClient) -> None:
    async with make_client("https://api.localhost.localdomain") as client:
        result = client.parse.remote_image("image://bob/bananas:latest")
    assert result == RemoteImage(
        "bananas", "latest", owner="bob", registry="registry-dev.neu.ro"
    )
Esempio n. 7
0
 def test_as_docker_url_not_in_neuro_registry(self) -> None:
     image = RemoteImage(name="ubuntu",
                         tag="v10.04",
                         owner=None,
                         registry=None)
     assert image.as_docker_url() == "ubuntu:v10.04"