async def test_ls_repositories(self, aiohttp_server: _TestServerFactory, make_client: _MakeClient) -> None: JSON = {"repositories": ["bob/alpine", "jill/bananas"]} async def handler(request: web.Request) -> web.Response: return web.json_response(JSON) app = web.Application() app.router.add_get("/v2/_catalog", handler) srv = await aiohttp_server(app) url = "http://platform" registry_url = srv.make_url("/v2/") async with make_client(url, registry_url=registry_url) as client: ret = await client.images.ls() registry = _get_url_authority(registry_url) assert registry is not None assert set(ret) == { RemoteImage.new_neuro_image( "alpine", tag=None, owner="bob", cluster_name="default", registry=registry, ), RemoteImage.new_neuro_image( "bananas", tag=None, owner="jill", cluster_name="default", registry=registry, ), }
def test_as_docker_url_in_neuro_registry(self) -> None: image = RemoteImage( name="ubuntu", tag="v10.04", owner="me", cluster_name="test-cluster", registry="registry.io", ) assert image.as_docker_url() == "registry.io/me/ubuntu:v10.04"
async def nginx_job_async( nmrc_path: Path, loop: asyncio.AbstractEventLoop ) -> AsyncIterator[Tuple[str, str]]: async with api_get(path=nmrc_path) as client: secret = uuid4() command = ( f"bash -c \"echo -n '{secret}' > /usr/share/nginx/html/secret.txt; " f"timeout 15m /usr/sbin/nginx -g 'daemon off;'\"" ) container = Container( image=RemoteImage.new_external_image(name="nginx", tag="latest"), command=command, resources=Resources(20, 0.1, None, None, True, None, None), ) job = await client.jobs.run( container, is_preemptible=False, description="test NGINX job" ) try: for i in range(60): status = await client.jobs.status(job.id) if status.status == JobStatus.RUNNING: break await asyncio.sleep(1) else: raise AssertionError("Cannot start NGINX job") yield job.id, str(secret) finally: with suppress(Exception): await client.jobs.kill(job.id)
async def test_parse_remote_registry_image(make_client: _MakeClient) -> None: async with make_client( "https://api.localhost.localdomain", registry_url="http://localhost:5000" ) as client: result = client.parse.remote_image("localhost:5000/bob/library/bananas:latest") assert result == RemoteImage( "library/bananas", "latest", owner="bob", registry="localhost:5000" )
def test_quiet_save(self, capfd: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=True) formatter.save( ImageProgressSave("job-id", RemoteImage.new_external_image(name="output"))) formatter.close() out, err = capfd.readouterr() assert err == "" assert out == ""
async def test_parse_remote(make_client: _MakeClient) -> None: async with make_client("https://api.localhost.localdomain") as client: result = client.parse.remote_image("image://test-cluster/bob/bananas:latest") assert result == RemoteImage.new_neuro_image( name="bananas", tag="latest", owner="bob", registry="registry-dev.neu.ro", cluster_name="test-cluster", )
def test_parse_as_neuro_image_with_scheme_with_user_no_tag_2(self) -> None: image = "image://other-cluster/bob/library/ubuntu" parsed = self.parser.parse_as_neuro_image(image) assert parsed == RemoteImage.new_neuro_image( name="library/ubuntu", tag="latest", owner="bob", cluster_name="other-cluster", registry="reg.neu.ro", )
def test_quiet_push(self, capfd: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=True) formatter.push( ImageProgressPush(LocalImage("output"), RemoteImage.new_external_image(name="input"))) formatter.step(ImageProgressStep("message1", "layer1")) formatter.close() out, err = capfd.readouterr() assert err == "" assert out == ""
def test_convert_to_local_image(self) -> None: neuro_image = RemoteImage.new_neuro_image( name="ubuntu", tag="latest", owner="artem", cluster_name="test-cluster", registry="reg.com", ) local_image = self.parser.convert_to_local_image(neuro_image) assert local_image == LocalImage(name="ubuntu", tag="latest")
def test_as_str_in_neuro_registry_tag_yes(self) -> None: image = RemoteImage.new_neuro_image( name="ubuntu", tag="v10.04", owner="me", cluster_name="test-cluster", registry="registry.io", ) assert str(image) == "image://test-cluster/me/ubuntu:v10.04" assert _as_repo_str(image) == "registry.io/me/ubuntu:v10.04"
def test_quiet_commit_started(self, capfd: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=True) formatter.commit_started( ImageCommitStarted( job_id="job-id", target_image=RemoteImage.new_external_image("img"))) formatter.close() out, err = capfd.readouterr() assert err == "" assert out == ""
def test_convert_to_neuro_image(self) -> None: local_image = LocalImage(name="ubuntu", tag="latest") neuro_image = self.parser.convert_to_neuro_image(local_image) assert neuro_image == RemoteImage.new_neuro_image( name="ubuntu", tag="latest", owner="alice", cluster_name="test-cluster", registry="reg.neu.ro", )
def test_parse_as_neuro_image_with_scheme_4_slash_no_cluster_no_tag( self) -> None: image = "image:////bob/ubuntu" parsed = self.parser.parse_as_neuro_image(image) assert parsed == RemoteImage.new_neuro_image( name="ubuntu", tag="latest", owner="bob", cluster_name="test-cluster", registry="reg.neu.ro", )
def test_parse_as_neuro_image_with_scheme_no_slash_no_user_with_tag( self) -> None: image = "image:ubuntu:v10.04" parsed = self.parser.parse_as_neuro_image(image) assert parsed == RemoteImage.new_neuro_image( name="ubuntu", tag="v10.04", owner="alice", cluster_name="test-cluster", registry="reg.neu.ro", )
def test_parse_as_neuro_image_with_scheme_3_slash_no_cluster_with_tag_2( self, ) -> None: image = "image:///bob/library/ubuntu:v10.04" parsed = self.parser.parse_as_neuro_image(image) assert parsed == RemoteImage.new_neuro_image( name="library/ubuntu", tag="v10.04", owner="bob", cluster_name="test-cluster", registry="reg.neu.ro", )
def test_tty_commit_started(self, capfd: Any, click_tty_emulation: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=False) formatter.commit_started( ImageCommitStarted( job_id="job-id", target_image=RemoteImage.new_external_image(name="img"))) formatter.close() out, err = capfd.readouterr() assert err == "" assert "img" in out assert CSI in out
def test_parse_as_neuro_image_allow_tag_false_with_scheme_no_tag( self) -> None: image = "image:ubuntu" parsed = self.parser.parse_as_neuro_image(image, tag_option=TagOption.DENY) assert parsed == RemoteImage.new_neuro_image( name="ubuntu", tag=None, owner="alice", cluster_name="test-cluster", registry="reg.neu.ro", )
async def test_tags_bad_image_without_name( self, make_client: _MakeClient) -> None: url = URL("http://whatever") registry_url = URL("http://whatever-registry") async with make_client(url, registry_url=registry_url) as client: image = RemoteImage.new_neuro_image( name="", tag=None, owner="me", cluster_name="test-cluster", registry="reg", ) with pytest.raises(ValueError, match="missing image name"): await client.images.tags(image)
def test_parse_remote__registry_has_port__image_in_other_repo( self) -> None: my_parser = _ImageNameParser( default_user="******", default_cluster="test-cluster", registry_url=URL("http://localhost:5000"), ) image = "example.com:9999/bob/library/ubuntu:v10.04" parsed = my_parser.parse_remote(image) # NOTE: "owner" is parsed only for images in neuromation registry assert parsed == RemoteImage.new_external_image( name="bob/library/ubuntu", tag="v10.04", registry="example.com:9999", )
def test_parse_remote__registry_has_port__image_in_good_repo(self) -> None: my_parser = _ImageNameParser( default_user="******", default_cluster="test-cluster", registry_url=URL("http://localhost:5000"), ) image = "localhost:5000/bob/library/ubuntu:v10.04" parsed = my_parser.parse_remote(image) assert parsed == RemoteImage.new_neuro_image( name="library/ubuntu", tag="v10.04", owner="bob", cluster_name="test-cluster", registry="localhost:5000", )
def test_no_tty_commit_started(self, capfd: Any) -> None: formatter = DockerImageProgress.create(tty=False, quiet=False) formatter.commit_started( ImageCommitStarted( job_id="job-id", target_image=RemoteImage.new_neuro_image( name="output", tag="stream", owner="bob", registry="https://registry-dev.neu.ro", cluster_name="test-cluster", ), )) formatter.close() out, err = capfd.readouterr() assert "Using remote image 'image://test-cluster/bob/output:stream'" in out assert f"Creating image from the job container..." in out assert err == ""
def test_tty_save(self, capfd: Any, click_tty_emulation: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=False) formatter.save( ImageProgressSave( "job-id", RemoteImage.new_neuro_image( name="output", tag="stream", owner="bob", registry="https://registry-dev.neu.ro", cluster_name="test-cluster", ), )) formatter.close() out, err = capfd.readouterr() assert err == "" assert "job-id" in out assert "image://test-cluster/bob/output:stream" in out assert CSI in out
def test_no_tty_save(self, capfd: Any) -> None: formatter = DockerImageProgress.create(tty=False, quiet=False) formatter.save( ImageProgressSave( "job-id", RemoteImage.new_neuro_image( name="output", tag="stream", owner="bob", registry="https://registry-dev.neu.ro", cluster_name="test-cluster", ), )) formatter.close() out, err = capfd.readouterr() assert ( "Saving job 'job-id' to image 'image://test-cluster/bob/output:stream'" in out) assert err == ""
def test_tty_push(self, capfd: Any, click_tty_emulation: Any) -> None: formatter = DockerImageProgress.create(tty=True, quiet=False) formatter.push( ImageProgressPush( LocalImage("input", "latest"), RemoteImage.new_neuro_image( name="output", tag="stream", owner="bob", registry="https://registry-dev.neu.ro", cluster_name="test-cluster", ), )) formatter.step(ImageProgressStep("message1", "layer1")) formatter.step(ImageProgressStep("message2", "layer1")) formatter.close() out, err = capfd.readouterr() assert err == "" assert "input:latest" in out assert "image://test-cluster/bob/output:stream" in out assert "message1" in out assert "message2" in out assert CSI in out
async def test_parse_remote(make_client: _MakeClient) -> None: async with make_client("https://api.localhost.localdomain") as client: result = client.parse.remote_image("image://bob/bananas:latest") assert result == RemoteImage( "bananas", "latest", owner="bob", registry="registry-dev.neu.ro" )
async def cp_s3( root: Root, src: URL, dst: URL, recursive: bool, update: bool, progress: bool ) -> None: if src.scheme == "file" and dst.scheme == "storage": storage_uri = dst local_uri = src upload = True elif src.scheme == "storage" and dst.scheme == "file": storage_uri = src local_uri = dst upload = False else: raise RuntimeError( f"Copy operation of the file with scheme '{src.scheme}'" f" to the file with scheme '{dst.scheme}'" f" is not supported" ) access_key = secrets.token_urlsafe(nbytes=16) secret_key = secrets.token_urlsafe(nbytes=16) minio_dir = f"minio-{secrets.token_hex(nbytes=8)}" s3_uri = f"s3://bucket{storage_uri.path}" minio_script = f"""\ mkdir /mnt/{minio_dir} ln -s /mnt /mnt/{minio_dir}/bucket minio server /mnt/{minio_dir} """ volume = Volume( storage_uri=storage_uri.with_path(""), container_path="/mnt", read_only=False ) server_container = Container( image=RemoteImage(MINIO_IMAGE_NAME, MINIO_IMAGE_TAG), entrypoint="sh", command=f"-c {shlex.quote(minio_script)}", http=HTTPPort(port=9000, requires_auth=False), resources=Resources( memory_mb=1024, cpu=1, gpu=0, gpu_model=None, shm=True, tpu_type=None, tpu_software_version=None, ), env={"MINIO_ACCESS_KEY": access_key, "MINIO_SECRET_KEY": secret_key}, volumes=[volume], ) log.info(f"Launching Amazon S3 gateway for {str(storage_uri.with_path(''))!r}") job_name = f"neuro-upload-server-{secrets.token_hex(nbytes=8)}" job = await root.client.jobs.run(server_container, name=job_name) try: jsprogress = JobStartProgress.create( tty=root.tty, color=root.color, quiet=root.quiet ) while job.status == JobStatus.PENDING: await asyncio.sleep(0.2) job = await root.client.jobs.status(job.id) jsprogress(job) jsprogress.close() local_path = "/data" if not os.path.isdir(local_uri.path): local_path = f"/data/{local_uri.name}" local_uri = local_uri.parent binding = f"{local_uri.path}:/data" if upload: binding += ":ro" cp_cmd = ["sync" if update else "cp"] if recursive: cp_cmd.append("--recursive") if root.verbosity < 0: cp_cmd.append("--quiet") if upload: cp_cmd.append(local_path) cp_cmd.append(s3_uri) else: cp_cmd.append(s3_uri) cp_cmd.append(local_path) aws_script = f"""\ aws configure set default.s3.max_concurrent_requests 100 aws configure set default.s3.max_queue_size 10000 aws --endpoint-url {job.http_url} s3 {" ".join(map(shlex.quote, cp_cmd))} """ if root.verbosity >= 2: aws_script = "set -x\n" + aws_script log.info(f"Launching Amazon S3 client for {local_uri.path!r}") docker = aiodocker.Docker() try: aws_image = f"{AWS_IMAGE_NAME}:{AWS_IMAGE_TAG}" async for info in await docker.images.pull(aws_image, stream=True): # TODO Use some of Progress classes log.debug(str(info)) client_container = await docker.containers.create( config={ "Image": aws_image, "Entrypoint": "sh", "Cmd": ["-c", aws_script], "Env": [ f"AWS_ACCESS_KEY_ID={access_key}", f"AWS_SECRET_ACCESS_KEY={secret_key}", ], "HostConfig": {"Binds": [binding]}, "Tty": True, }, name=f"neuro-upload-client-{secrets.token_hex(nbytes=8)}", ) try: await client_container.start() tasks = [client_container.wait()] async def printlogs(err: bool) -> None: async for piece in await client_container.log( stdout=not err, stderr=err, follow=True, details=(root.verbosity > 1), ): click.echo(piece, nl=False, err=err) if not root.quiet: tasks.append(printlogs(err=True)) if root.verbosity > 0 or progress: tasks.append(printlogs(err=False)) await asyncio.gather(*tasks) exit_code = (await client_container.show())["State"]["ExitCode"] if exit_code: raise RuntimeError(f"AWS copying failed with code {exit_code}") finally: await client_container.delete(force=True) finally: await docker.close() finally: try: await root.client.jobs.kill(job.id) finally: attempts = 10 delay = 0.2 while True: try: await root.client.storage.rm( URL(f"storage:{minio_dir}"), recursive=True ) except IllegalArgumentError: attempts -= 1 if not attempts: raise log.info( "Failed attempt to remove the MinIO directory", exc_info=True ) await asyncio.sleep(delay) delay *= 2 continue break
async def test_parse_remote_public(make_client: _MakeClient) -> None: async with make_client("https://api.localhost.localdomain", registry_url="http://localhost:5000") as client: result = client.parse.remote_image("ubuntu:latest") assert result == RemoteImage.new_external_image(name="ubuntu", tag="latest")
def test_as_str_not_in_neuro_registry_tag_yes(self) -> None: image = RemoteImage.new_external_image(name="ubuntu", tag="v10.04") assert str(image) == "ubuntu:v10.04" assert _as_repo_str(image) == "ubuntu:v10.04"
def test_as_docker_url_not_in_neuro_registry(self) -> None: image = RemoteImage(name="ubuntu", tag="v10.04", owner=None, registry=None) assert image.as_docker_url() == "ubuntu:v10.04"