예제 #1
0
def check_failon_no_permissions(use_ssh, src_path, target_path):
    if use_ssh:
        sshurl = "ssh://datalad-test" + opj(target_path, 'ds')
    else:
        sshurl = opj(target_path, 'ds')
    ds = Dataset(src_path).create()
    # remove user write permissions from target path
    chmod(target_path, stat.S_IREAD | stat.S_IEXEC)
    assert_raises(CommandError,
                  ds.create_sibling,
                  name='noperm',
                  sshurl=sshurl)
    # restore permissions
    chmod(target_path, stat.S_IREAD | stat.S_IWRITE | stat.S_IEXEC)
    assert_create_sshwebserver(name='goodperm', dataset=ds, sshurl=sshurl)
예제 #2
0
def test_archive(path=None):
    ds = Dataset(opj(path, 'ds')).create(force=True)
    ds.save()
    committed_date = ds.repo.get_commit_date()
    default_outname = opj(path, 'datalad_{}.tar.gz'.format(ds.id))
    with chpwd(path):
        res = list(ds.export_archive())
        assert_status('ok', res)
        assert_result_count(res, 1)
        assert(isabs(res[0]['path']))
    assert_true(os.path.exists(default_outname))
    custom_outname = opj(path, 'myexport.tar.gz')
    # feed in without extension
    ds.export_archive(filename=custom_outname[:-7])
    assert_true(os.path.exists(custom_outname))
    custom1_md5 = md5sum(custom_outname)
    # encodes the original archive filename -> different checksum, despit
    # same content
    assert_not_equal(md5sum(default_outname), custom1_md5)
    # should really sleep so if they stop using time.time - we know
    time.sleep(1.1)
    ds.export_archive(filename=custom_outname)
    # should not encode mtime, so should be identical
    assert_equal(md5sum(custom_outname), custom1_md5)

    def check_contents(outname, prefix):
        with tarfile.open(outname) as tf:
            nfiles = 0
            for ti in tf:
                # any annex links resolved
                assert_false(ti.issym())
                ok_startswith(ti.name, prefix + '/')
                assert_equal(ti.mtime, committed_date)
                if '.datalad' not in ti.name:
                    # ignore any files in .datalad for this test to not be
                    # susceptible to changes in how much we generate a meta info
                    nfiles += 1
            # we have exactly four files (includes .gitattributes for default
            # MD5E backend), and expect no content for any directory
            assert_equal(nfiles, 4)
    check_contents(default_outname, 'datalad_%s' % ds.id)
    check_contents(custom_outname, 'myexport')

    # now loose some content
    ds.drop('file_up', reckless='kill')
    assert_raises(IOError, ds.export_archive, filename=opj(path, 'my'))
    ds.export_archive(filename=opj(path, 'partial'), missing_content='ignore')
    assert_true(os.path.exists(opj(path, 'partial.tar.gz')))
예제 #3
0
def test_name_with_underscore(path=None):
    ds = Dataset(path).create(force=True)

    # we are using the presence of a managed branch as a proxy indicator
    # for a crippled FS, were we cannot trust the executable bit
    # which is our only indicator in the absence of a file extension
    if not ds.repo.is_managed_branch():
        # Procedure name with underscore can't be reached directly with a DATALAD_
        # environment variable.
        with patch.dict(
                "os.environ", {
                    "DATALAD_PROCEDURES_PRINT_ARGS_CALL__FORMAT":
                    '%s {script}' % sys.executable
                }):
            with assert_raises(ValueError):
                ds.run_procedure(spec=["print_args"])

    # But it can be set via DATALAD_CONFIG_OVERRIDES_JSON.
    with patch.dict(
            "os.environ", {
                "DATALAD_CONFIG_OVERRIDES_JSON":
                json.dumps({
                    "datalad.procedures.print_args.call-format":
                    "%s {script}" % sys.executable
                })
            }):
        ds.config.reload()
        ds.run_procedure(spec=["print_args"])
예제 #4
0
def test_testsui():
    # just one for now to test conflicting arguments
    with assert_raises(ValueError):

        @with_testsui(responses='some', interactive=False)
        def some_func():  # pragma: no cover
            pass

    from datalad.ui import ui

    @with_testsui(responses=['yes', "maybe so"])
    def func2(x):
        assert x == 1
        eq_(ui.yesno("title"), True)
        eq_(ui.question("title2"), "maybe so")
        assert_raises(AssertionError, ui.question, "asking more than we know")
        return x * 2

    eq_(func2(1), 2)

    @with_testsui(interactive=False)
    def func3(x):
        assert_false(ui.is_interactive)
        return x * 3

    eq_(func3(2), 6)
예제 #5
0
def test_require_dataset(topdir=None, *, ds_path):
    path = opj(topdir, ds_path)
    os.mkdir(path)
    with chpwd(path):
        assert_raises(NoDatasetFound, require_dataset, None)
        create('.')
        # in this folder by default
        assert_equal(require_dataset(None).path, path)

        assert_equal(
            require_dataset('some', check_installed=False).path,
            abspath('some'))
        assert_raises(NoDatasetFound,
                      require_dataset,
                      'some',
                      check_installed=True)
예제 #6
0
def test_search_non_dataset(tdir=None):
    from datalad.support.gitrepo import GitRepo
    GitRepo(tdir, create=True)
    with assert_raises(NoDatasetFound) as cme:
        list(search('smth', dataset=tdir))
    # Should instruct user how that repo could become a datalad dataset
    assert_in("datalad create --force", str(cme.value))
예제 #7
0
def test_runner_failure(dir_=None):
    runner = Runner()
    with assert_raises(CommandError) as cme:
        runner.run(
            py2cmd('import sys; sys.exit(53)')
        )
    eq_(53, cme.value.code)
예제 #8
0
def test_ok_file_under_git_symlinks(path=None):
    # Test that works correctly under symlinked path
    orepo = GitRepo(path)
    orepo.add('ingit')
    orepo.commit('msg')
    orepo.add('staged')
    lpath = path + "-symlink"  # will also be removed AFAIK by our tempfile handling
    Path(lpath).symlink_to(Path(path))
    ok_symlink(lpath)
    ok_file_under_git(op.join(path, 'ingit'))
    ok_file_under_git(op.join(lpath, 'ingit'))
    ok_file_under_git(op.join(lpath, 'staged'))
    with assert_raises(AssertionError):
        ok_file_under_git(op.join(lpath, 'notingit'))
    with assert_raises(AssertionError):
        ok_file_under_git(op.join(lpath, 'nonexisting'))
예제 #9
0
def test_initremote(store_path=None, store_url=None, ds_path=None):
    ds = Dataset(ds_path).create()
    store_path = Path(store_path)
    url = "ria+" + store_url
    init_opts = common_init_opts + ['url={}'.format(url)]

    # fail when there's no RIA store at the destination
    assert_raises(CommandError,
                  ds.repo.init_remote,
                  'ora-remote',
                  options=init_opts)
    # Doesn't actually create a remote if it fails
    assert_not_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])

    # now make it a store
    io = LocalIO()
    create_store(io, store_path, '1')
    create_ds_in_store(io, store_path, ds.id, '2', '1')

    # fails on non-RIA URL
    assert_raises(CommandError,
                  ds.repo.init_remote,
                  'ora-remote',
                  options=common_init_opts +
                  ['url={}'
                   ''.format(store_path.as_uri())])
    # Doesn't actually create a remote if it fails
    assert_not_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])

    ds.repo.init_remote('ora-remote', options=init_opts)
    assert_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])
    assert_repo_status(ds.path)
    # git-annex:remote.log should have:
    #   - url
    #   - common_init_opts
    #   - archive_id (which equals ds id)
    remote_log = ds.repo.call_git(['cat-file', 'blob', 'git-annex:remote.log'],
                                  read_only=True)
    assert_in("url={}".format(url), remote_log)
    [assert_in(c, remote_log) for c in common_init_opts]
    assert_in("archive-id={}".format(ds.id), remote_log)
예제 #10
0
def test_get_file_annexinfo(path=None):
    ds = Dataset(path).create(force=True)
    ds.save('ingit.txt', to_git=True)
    ds.save()
    # have some content-less component for testing
    ds.drop(ds.pathobj / 'dir1', reckless='kill')

    repo = ds.repo
    # only handles a single file at a time
    assert_raises(ValueError, repo.get_file_annexinfo, repo.pathobj / 'dir2')
    # however, it only functionally matters that there is only a single file to
    # report on not that the exact query path matches, the matching path is in
    # the report
    assert_equal(repo.pathobj / 'dir1' / 'dropped',
                 repo.get_file_annexinfo(repo.pathobj / 'dir1')['path'])

    # does not raise on a non-annex file, instead it returns no properties
    assert_equal(repo.get_file_annexinfo('ingit.txt'), {})

    # but does raise on path that doesn exist
    assert_raises(NoSuchPathError, repo.get_file_annexinfo, 'nothere')

    # check return properties for utility
    props = repo.get_file_annexinfo('inannex.txt')
    # to replace get_file_backend()
    assert_equal(props['backend'], 'MD5E')
    # to replace get_file_key()
    assert_equal(props['key'], 'MD5E-s7--3b158c5b0a18c247ebad28c09fc3e180.txt')
    # for size reporting
    assert_equal(props['bytesize'], 7)
    # all records have a pathobj
    assert_equal(props['path'], repo.pathobj / 'inannex.txt')
    # test if `eval_availability` has desired effect
    assert_not_in('has_content', props)

    # extended set of properties, after more expensive availability check
    props = repo.get_file_annexinfo('inannex.txt', eval_availability=True)
    # to replace file_has_content()
    assert_equal(props['has_content'], True)
    # to replace get_contentlocation()
    assert_equal(Path(props['objloc']).read_text(), 'inannex')

    # make sure has_content is not always True
    props = repo.get_file_annexinfo(ds.pathobj / 'dir1' / 'dropped',
                                    eval_availability=True)
    assert_equal(props['has_content'], False)
    assert_not_in('objloc', props)
예제 #11
0
def test_ignore_nose_capturing_stdout():
    # Just test the logic, not really a situation under overwritten stdout
    def raise_exc():
        raise AttributeError('nose causes a message which includes words '
                             'StringIO and fileno')

    with assert_raises(AttributeError):
        ignore_nose_capturing_stdout(raise_exc)()
예제 #12
0
def test_noannex_fail_if_has_annexed(path=None):
    ds = Dataset(path).create(force=True)
    ds.save()
    assert_true(isinstance(ds.repo, AnnexRepo))
    # internally procedure raises RuntimeError, but since we run it via runner, we
    # get CommandError here
    with assert_raises(CommandError):
        ds.run_procedure('cfg_noannex')  # we are killing annex while ds.repo
예제 #13
0
def test_with_tempfile_content_raises_on_mkdir():
    @with_tempfile(content="test", mkdir=True)
    def t():  # pragma: no cover
        raise AssertionError("must not be run")

    with assert_raises(ValueError):
        # after this commit, it will check when invoking, not when decorating
        t()
예제 #14
0
def test_drop_file_need_nocheck(path=None):
    ds = Dataset(path).create(force=True)
    ds.save()
    with assert_raises(IncompleteResultsError) as cme:
        ds.drop("foo")
    # The --force suggestion from git-annex-drop is translated to --reckless.
    assert_in("--reckless", str(cme.value))
    assert_status("ok", ds.drop("foo", reckless='kill', on_failure="ignore"))
예제 #15
0
def test_too_long():
    with swallow_logs(new_level=logging.ERROR) as cml:
        with assert_raises(OSError):  # we still raise an exception if we exceed too much
            Runner().run(
                [sys.executable, '-c', 'import sys; print(len(sys.argv))'] + [str(i) for i in range(CMD_MAX_ARG)],
                protocol=StdOutCapture
            )
        cml.assert_logged('.*use.*ulimit.*')
예제 #16
0
def test_push_wanted(srcpath=None, dstpath=None):
    src = Dataset(srcpath).create()
    (src.pathobj / 'data.0').write_text('0')
    (src.pathobj / 'secure.1').write_text('1')
    (src.pathobj / 'secure.2').write_text('2')
    src.save()

    # Dropping a file to mimic a case of simply not having it locally (thus not
    # to be "pushed")
    src.drop('secure.2', reckless='kill')

    # Annotate sensitive content, actual value "verysecure" does not matter in
    # this example
    src.repo.set_metadata(add={'distribution-restrictions': 'verysecure'},
                          files=['secure.1', 'secure.2'])

    src.create_sibling(
        dstpath,
        annex_wanted="not metadata=distribution-restrictions=*",
        name='target',
    )
    # check that wanted is obeyed, since set in sibling configuration
    res = src.push(to='target')
    assert_in_results(res,
                      action='copy',
                      path=str(src.pathobj / 'data.0'),
                      status='ok')
    for p in ('secure.1', 'secure.2'):
        assert_not_in_results(res, path=str(src.pathobj / p))
    assert_status('notneeded', src.push(to='target'))

    # check the target to really make sure
    dst = Dataset(dstpath)
    # normal file, yes
    eq_((dst.pathobj / 'data.0').read_text(), '0')
    # secure file, no
    if dst.repo.is_managed_branch():
        neq_((dst.pathobj / 'secure.1').read_text(), '1')
    else:
        assert_raises(FileNotFoundError, (dst.pathobj / 'secure.1').read_text)

    # reset wanted config, which must enable push of secure file
    src.repo.set_preferred_content('wanted', '', remote='target')
    res = src.push(to='target')
    assert_in_results(res, path=str(src.pathobj / 'secure.1'))
    eq_((dst.pathobj / 'secure.1').read_text(), '1')
예제 #17
0
def test_addurls_nonannex_repo(path=None):
    ds = Dataset(path).create(force=True, annex=False)
    with assert_raises(IncompleteResultsError) as raised:
        ds.addurls("dummy_arg0",
                   "dummy_arg1",
                   "dummy_arg2",
                   result_renderer='disabled')
    assert_in("not an annex repo", str(raised.value))
예제 #18
0
def test_probe_known_failure():
    # should raise assert error if function no longer fails
    with patch_config({'datalad.tests.knownfailures.probe': True}):
        with assert_raises(Failed):
            probe_known_failure(lambda: True)()

    with patch_config({'datalad.tests.knownfailures.probe': False}):
        ok_(probe_known_failure(lambda: True))
예제 #19
0
def test_get_containingds_from_agginfo():
    eq_(None, _get_containingds_from_agginfo({}, 'any'))
    # direct hit returns itself
    eq_('match', _get_containingds_from_agginfo({'match': {}, 'other': {}}, 'match'))
    # matches
    down = op.join('match', 'down')
    eq_('match', _get_containingds_from_agginfo({'match': {}}, down))
    # closest match
    down_under = op.join(down, 'under')
    eq_(down, _get_containingds_from_agginfo({'match': {}, down: {}}, down_under))
    # absolute works too
    eq_(op.abspath(down),
        _get_containingds_from_agginfo(
            {op.abspath('match'): {}, op.abspath(down): {}}, op.abspath(down_under)))
    # will not tollerate mix'n'match
    assert_raises(ValueError, _get_containingds_from_agginfo, {'match': {}}, op.abspath(down))
    assert_raises(ValueError, _get_containingds_from_agginfo, {op.abspath('match'): {}}, down)
예제 #20
0
def test_addurls_unknown_placeholder(path=None):
    ds = Dataset(path).create(force=True)
    # Close but wrong URL placeholder
    with assert_raises(IncompleteResultsError) as exc:
        ds.addurls("in.csv",
                   "{link}",
                   "{abcd}",
                   dry_run=True,
                   result_renderer='disabled')
    assert_in("linky", str(exc.value))
    # Close but wrong file name placeholder
    with assert_raises(IncompleteResultsError) as exc:
        ds.addurls("in.csv",
                   "{linky}",
                   "{abc}",
                   dry_run=True,
                   result_renderer='disabled')
    assert_in("abcd", str(exc.value))
    # Out-of-bounds index.
    with assert_raises(IncompleteResultsError) as exc:
        ds.addurls("in.csv",
                   "{linky}",
                   "{3}",
                   dry_run=True,
                   result_renderer='disabled')
    assert_in("index", str(exc.value))

    # Suggestions also work for automatic file name placeholders
    with assert_raises(IncompleteResultsError) as exc:
        ds.addurls("in.csv",
                   "{linky}",
                   "{_url_hostnam}",
                   dry_run=True,
                   result_renderer='disabled')
    assert_in("_url_hostname", str(exc.value))
    # ... though if you whiff on the beginning prefix, we don't suggest
    # anything because we decide to generate those fields based on detecting
    # the prefix.
    with assert_raises(IncompleteResultsError) as exc:
        ds.addurls("in.csv",
                   "{linky}",
                   "{_uurl_hostnam}",
                   dry_run=True,
                   result_renderer='disabled')
    assert_not_in("_url_hostname", str(exc.value))
예제 #21
0
def test_run_under_dir(d=None):
    orig_pwd = getpwd()
    orig_cwd = os.getcwd()

    @run_under_dir(d)
    def f(arg, kwarg=None):
        eq_(arg, 1)
        eq_(kwarg, 2)
        eq_(getpwd(), d)

    f(1, 2)
    eq_(getpwd(), orig_pwd)
    eq_(os.getcwd(), orig_cwd)

    # and if fails
    assert_raises(AssertionError, f, 1, 3)
    eq_(getpwd(), orig_pwd)
    eq_(os.getcwd(), orig_cwd)
예제 #22
0
def test_incorrect_msg_interpolation():
    with assert_raises(TypeError) as cme:
        TestUtils2().__call__()
    # this must be our custom exception
    assert_re_in("Failed to render.*kaboom.*not enough arguments",
                 str(cme.value))

    # there should be no exception if reported in the record path contains %
    TestUtils2().__call__("%eatthis")
예제 #23
0
def test_guess_dot_git(path=None, url=None, tdir=None, *, annex):
    repo = (AnnexRepo if annex else GitRepo)(path, create=True)
    repo.add('file.txt', git=not annex)
    repo.commit()

    # we need to prepare to be served via http, otherwise it must fail
    with swallow_logs() as cml:
        assert_raises(IncompleteResultsError, install, path=tdir, source=url)
    ok_(not exists(tdir))

    Runner(cwd=path).run(['git', 'update-server-info'])

    with swallow_logs() as cml:
        installed = install(tdir, source=url)
        assert_not_in("Failed to get annex.uuid", cml.out)
    eq_(installed.pathobj.resolve(), Path(tdir).resolve())
    ok_(exists(tdir))
    assert_repo_status(tdir, annex=annex)
예제 #24
0
def test_assert_re_in():
    assert_re_in(".*", "")
    assert_re_in(".*", ["any"])

    # should do match not search
    assert_re_in("ab", "abc")
    assert_raises(AssertionError, assert_re_in, "ab", "cab")
    assert_raises(AssertionError, assert_re_in, "ab$", "abc")

    # Sufficient to have one entry matching
    assert_re_in("ab", ["", "abc", "laskdjf"])
    assert_raises(AssertionError, assert_re_in, "ab$", ["ddd", ""])

    # Tuples should be ok too
    assert_re_in("ab", ("", "abc", "laskdjf"))
    assert_raises(AssertionError, assert_re_in, "ab$", ("ddd", ""))

    # shouldn't "match" the empty list
    assert_raises(AssertionError, assert_re_in, "", [])
예제 #25
0
def test_str():
    c = ct.EnsureStr()
    # this should always work
    assert_equal(c('hello'), 'hello')
    assert_equal(c('7.0'), '7.0')
    # this should always fail
    assert_raises(ValueError, lambda: c(['ab']))
    assert_raises(ValueError, lambda: c(['a', 'b']))
    assert_raises(ValueError, lambda: c(('a', 'b')))
    # no automatic conversion attempted
    assert_raises(ValueError, lambda: c(7.0))
    assert_equal(c.short_description(), 'str')
예제 #26
0
def test_rerun_commit_message_check():
    assert_raises(ValueError,
                  get_run_info,
                  None,
                  """\
[DATALAD RUNCMD] no command

=== Do not change lines below ===
{
 "pwd": ".",
 "exit": 0
}
^^^ Do not change lines above ^^^""")

    assert_raises(ValueError,
                  get_run_info,
                  None,
                  """\
[DATALAD RUNCMD] junk json

=== Do not change lines below ===
{
 "pwd": ".,
 "cmd": "echo ok >okfile",
 "exit": 0
}
^^^ Do not change lines above ^^^""")

    subject, info = get_run_info(
        None,
        """\
[DATALAD RUNCMD] fine

=== Do not change lines below ===
{
 "pwd": ".",
 "cmd": "echo ok >okfile",
 "exit": 0
}
^^^ Do not change lines above ^^^""")
    eq_(subject, "fine")
    assert_dict_equal(info,
                      {"pwd": ".", "cmd": "echo ok >okfile", "exit": 0})
예제 #27
0
def test_install_skip_list_arguments(src=None, path=None, path_outside=None):
    _mk_submodule_annex(src, fname="test-annex.dat", fcontent="whatever")

    ds = install(path, source=src)
    ok_(ds.is_installed())

    # install a list with valid and invalid items:
    result = ds.install(path=['subm 1', 'not_existing', path_outside, '2'],
                        get_data=False,
                        on_failure='ignore',
                        result_xfm=None,
                        return_type='list')
    # good and bad results together
    ok_(isinstance(result, list))
    eq_(len(result), 4)
    # check that we have an 'impossible/error' status for both invalid args
    # but all the other tasks have been accomplished
    assert_result_count(result,
                        1,
                        status='impossible',
                        message="path does not exist",
                        path=opj(ds.path, 'not_existing'))
    assert_result_count(result,
                        1,
                        status='error',
                        message=("path not associated with dataset %s", ds),
                        path=path_outside)
    for sub in [Dataset(opj(path, 'subm 1')), Dataset(opj(path, '2'))]:
        assert_result_count(result,
                            1,
                            status='ok',
                            message=('Installed subdataset in order to get %s',
                                     sub.path))
        ok_(sub.is_installed())

    # return of get is always a list, by default, even if just one thing was gotten
    # in this case 'subm1' was already obtained above, so this will get this
    # content of the subdataset
    with assert_raises(IncompleteResultsError) as cme:
        ds.install(path=['subm 1', 'not_existing'])
    with assert_raises(IncompleteResultsError) as cme:
        ds.get(path=['subm 1', 'not_existing'])
예제 #28
0
 def test_addurls_invalid_input(self=None, path=None):
     ds = Dataset(path).create(force=True)
     in_file = op.join(path, "in")
     for in_type in au.INPUT_TYPES:
         with assert_raises(IncompleteResultsError) as exc:
             ds.addurls(in_file,
                        "{url}",
                        "{name}",
                        input_type=in_type,
                        result_renderer='disabled')
         assert_in("Failed to read", str(exc.value))
예제 #29
0
def test_ok_generator():
    def func(a, b=1):
        return a + b

    def gen(a, b=1):  # pragma: no cover
        yield a + b

    # not sure how to determine if xrange is a generator
    assert_raises(AssertionError, ok_generator, range(2))
    assert_raises(AssertionError, ok_generator, gen)
    ok_generator(gen(1))
    assert_raises(AssertionError, ok_generator, func)
    assert_raises(AssertionError, ok_generator, func(1))
예제 #30
0
def test_skip_if_no_network():
    cleaned_env = os.environ.copy()
    cleaned_env.pop('DATALAD_TESTS_NONETWORK', None)
    # we need to run under cleaned env to make sure we actually test in both conditions
    with patch('os.environ', cleaned_env):

        @skip_if_no_network
        def somefunc(a1):
            return a1

        #ok_(hasattr(somefunc, "network"))
        with patch_config({'datalad.tests.nonetwork': '1'}):
            assert_raises(Skipped, somefunc, 1)
        with patch.dict('os.environ', {}):
            eq_(somefunc(1), 1)
        # and now if used as a function, not a decorator
        with patch_config({'datalad.tests.nonetwork': '1'}):
            assert_raises(Skipped, skip_if_no_network)
        with patch.dict('os.environ', {}):
            eq_(skip_if_no_network(), None)