def test_container_files(ds_path, local_file, url): # setup things to add # # Note: Since "adding" as a container doesn't actually call anything or use # the container in some way, but simply registers it, for testing any file # is sufficient. local_file = get_local_file_url(op.join(local_file, 'some_container.img')) # prepare dataset: ds = Dataset(ds_path).create() # non-default location: ds.config.add("datalad.containers.location", value=op.join(".datalad", "test-environments"), where='dataset') ds.save(message="Configure container mountpoint") # no containers yet: res = ds.containers_list(**RAW_KWDS) assert_result_count(res, 0) # add first "image": must end up at the configured default location target_path = op.join(ds.path, ".datalad", "test-environments", "first", "image") res = ds.containers_add(name="first", url=local_file) ok_clean_git(ds.repo) assert_result_count(res, 1, status="ok", type="file", path=target_path, action="containers_add") ok_(op.lexists(target_path)) res = ds.containers_list(**RAW_KWDS) assert_result_count(res, 1) assert_result_count(res, 1, name='first', type='file', action='containers', status='ok', path=target_path) # and kill it again # but needs name assert_raises(TypeError, ds.containers_remove) res = ds.containers_remove('first', remove_image=True) assert_status('ok', res) assert_result_count(ds.containers_list(**RAW_KWDS), 0) # image removed assert (not op.lexists(target_path))
def test_container_files(path): ds = Dataset(path).create() # plug in a proper singularity image ds.containers_add( 'mycontainer', url=testimg_url, image='righthere', # the next one is auto-guessed #call_fmt='singularity exec {img} {cmd}' ) assert_result_count( ds.containers_list(), 1, path=op.join(ds.path, 'righthere'), name='mycontainer', updateurl=testimg_url) ok_clean_git(path) # now we can run stuff in the container # and because there is just one, we don't even have to name the container res = ds.containers_run(['dir'] if on_windows else ['ls']) # container becomes an 'input' for `run` -> get request, but "notneeded" assert_result_count( res, 1, action='get', status='notneeded', path=op.join(ds.path, 'righthere'), type='file') # this command changed nothing assert_result_count( res, 1, action='add', status='notneeded', path=ds.path, type='dataset')
def test_list_contains(path): ds = Dataset(path).create() subds_a = ds.create("a") subds_b = ds.create("b") subds_a_c = subds_a.create("c") add_pyscript_image(subds_a_c, "in-c", "img") add_pyscript_image(subds_a, "in-a", "img") add_pyscript_image(subds_b, "in-b", "img") add_pyscript_image(ds, "in-top", "img") ds.save(recursive=True) assert_result_count(ds.containers_list(recursive=True, **RAW_KWDS), 4) assert_result_count(ds.containers_list(contains=["nowhere"], recursive=True, **RAW_KWDS), 1, name="in-top", action='containers') res = ds.containers_list(contains=[subds_a.path], recursive=True, **RAW_KWDS) assert_result_count(res, 3) assert_in_results(res, name="in-top") assert_in_results(res, name="a/in-a") assert_in_results(res, name="a/c/in-c") res = ds.containers_list(contains=[subds_a_c.path], recursive=True, **RAW_KWDS) assert_result_count(res, 3) assert_in_results(res, name="in-top") assert_in_results(res, name="a/in-a") assert_in_results(res, name="a/c/in-c") res = ds.containers_list(contains=[subds_b.path], recursive=True, **RAW_KWDS) assert_result_count(res, 2) assert_in_results(res, name="in-top") assert_in_results(res, name="b/in-b")
def test_docker(path): # Singularity's "docker://" scheme. ds = Dataset(path).create() ds.containers_add( "bb", url=("docker://busybox@sha256:" "7964ad52e396a6e045c39b5a44438424ac52e12e4d5a25d94895f2058cb863a0" )) img = op.join(ds.path, ".datalad", "environments", "bb", "image") assert_result_count(ds.containers_list(), 1, path=img, name="bb") ok_clean_git(path) with swallow_outputs(): ds.containers_run(["ls", "/singularity"])
def test_docker(path): # Singularity's "docker://" scheme. ds = Dataset(path).create() ds.containers_add( "bb", url=("docker://busybox@sha256:" "7964ad52e396a6e045c39b5a44438424ac52e12e4d5a25d94895f2058cb863a0" )) img = op.join(ds.path, ".datalad", "environments", "bb", "image") assert_result_count(ds.containers_list(), 1, path=img, name="bb") ok_clean_git(path) WitlessRunner(cwd=ds.path).run( ["datalad", "containers-run", "ls", "/singularity"], protocol=StdOutCapture)
def test_container_from_subdataset(ds_path, src_subds_path, local_file): # prepare a to-be subdataset with a registered container src_subds = Dataset(src_subds_path).create() src_subds.containers_add(name="first", url=get_local_file_url( op.join(local_file, 'some_container.img'))) # add it as subdataset to a super ds: ds = Dataset(ds_path).create() subds = ds.install("sub", source=src_subds_path) # add it again one level down to see actual recursion: subds.install("subsub", source=src_subds_path) # We come up empty without recursive: res = ds.containers_list(recursive=False, **RAW_KWDS) assert_result_count(res, 0) # query available containers from within super: res = ds.containers_list(recursive=True, **RAW_KWDS) assert_result_count(res, 2) assert_in_results(res, action="containers", refds=ds.path) # default location within the subdataset: target_path = op.join(subds.path, '.datalad', 'environments', 'first', 'image') assert_result_count(res, 1, name='sub/first', type='file', action='containers', status='ok', path=target_path, parentds=subds.path) # not installed subdataset doesn't pose an issue: sub2 = ds.create("sub2") assert_result_count(ds.subdatasets(), 2, type="dataset") ds.uninstall("sub2") from datalad.tests.utils import assert_false assert_false(sub2.is_installed()) # same results as before, not crashing or somehow confused by a not present # subds: res = ds.containers_list(recursive=True, **RAW_KWDS) assert_result_count(res, 2) assert_result_count(res, 1, name='sub/first', type='file', action='containers', status='ok', path=target_path, parentds=subds.path) # The default renderer includes the image names. with swallow_outputs() as out: ds.containers_list(recursive=True) lines = out.out.splitlines() assert_re_in("sub/first", lines) assert_re_in("sub/subsub/first", lines) # But we are careful not to render partial names from subdataset traversals # (i.e. we recurse with containers_list(..., result_renderer=None)). with assert_raises(AssertionError): assert_re_in("subsub/first", lines)
def test_container_files(path, super_path): raise SkipTest('SingularityHub is gone for now') ds = Dataset(path).create() cmd = ['dir'] if on_windows else ['ls'] # plug in a proper singularity image ds.containers_add( 'mycontainer', url=testimg_url, image='righthere', # the next one is auto-guessed #call_fmt='singularity exec {img} {cmd}' ) assert_result_count(ds.containers_list(), 1, path=op.join(ds.path, 'righthere'), name='mycontainer') ok_clean_git(path) def assert_no_change(res, path): # this command changed nothing # # Avoid specifying the action because it will change from "add" to # "save" in DataLad v0.12. assert_result_count(res, 1, status='notneeded', path=path, type='dataset') # now we can run stuff in the container # and because there is just one, we don't even have to name the container res = ds.containers_run(cmd) # container becomes an 'input' for `run` -> get request, but "notneeded" assert_result_count(res, 1, action='get', status='notneeded', path=op.join(ds.path, 'righthere'), type='file') assert_no_change(res, ds.path) # same thing as we specify the container by its name: res = ds.containers_run(cmd, container_name='mycontainer') # container becomes an 'input' for `run` -> get request, but "notneeded" assert_result_count(res, 1, action='get', status='notneeded', path=op.join(ds.path, 'righthere'), type='file') assert_no_change(res, ds.path) # we can also specify the container by its path: res = ds.containers_run(cmd, container_name=op.join(ds.path, 'righthere')) # container becomes an 'input' for `run` -> get request, but "notneeded" assert_result_count(res, 1, action='get', status='notneeded', path=op.join(ds.path, 'righthere'), type='file') assert_no_change(res, ds.path) # Now, test the same thing, but with this dataset being a subdataset of # another one: super_ds = Dataset(super_path).create() super_ds.install("sub", source=path) # When running, we don't discover containers in subdatasets with assert_raises(ValueError) as cm: super_ds.containers_run(cmd) assert_in("No known containers", text_type(cm.exception)) # ... unless we need to specify the name res = super_ds.containers_run(cmd, container_name="sub/mycontainer") # container becomes an 'input' for `run` -> get request (needed this time) assert_result_count(res, 1, action='get', status='ok', path=op.join(super_ds.path, 'sub', 'righthere'), type='file') assert_no_change(res, super_ds.path)
def test_container_files(ds_path, local_file, url): # setup things to add # # Note: Since "adding" as a container doesn't actually call anything or use # the container in some way, but simply registers it, for testing any file # is sufficient. local_file = get_local_file_url(op.join(local_file, 'some_container.img')) remote_file = urljoin(url, 'some_container.img') # prepare dataset: ds = Dataset(ds_path).create() # non-default location: ds.config.add("datalad.containers.location", value=op.join(".datalad", "test-environments"), where='dataset') ds.save(message="Configure container mountpoint") # no containers yet: res = ds.containers_list() assert_result_count(res, 0) # add first "image": res = ds.containers_add(name="first", url=local_file) ok_clean_git(ds.repo) target_path = op.join(ds.path, ".datalad", "test-environments", "first") assert_result_count(res, 1, status="ok", type="file", path=target_path, action="containers_add") ok_(op.lexists(target_path)) eq_(local_file, ds.config.get("datalad.containers.first.url")) # add a "remote" one: # don't provide url in the call, but in a config: ds.config.add("datalad.containers.second.url", value=remote_file, where='dataset') ds.save(message="Configure URL for container 'second'") res = ds.containers_add(name="second") ok_clean_git(ds.repo) target_path = op.join(ds.path, ".datalad", "test-environments", "second") assert_result_count(res, 1, status="ok", type="file", path=target_path, action="containers_add") ok_(op.lexists(target_path)) # config wasn't changed: eq_(remote_file, ds.config.get("datalad.containers.second.url")) res = ds.containers_list() assert_result_count(res, 2, status='ok', type='file', action='containers_list')