예제 #1
0
def test_install_known_subdataset(src=None, path=None):

    _mk_submodule_annex(src, fname="test-annex.dat", fcontent="whatever")

    # get the superdataset:
    ds = install(path, source=src)
    # subdataset not installed:
    subds = Dataset(opj(path, 'subm 1'))
    assert_false(subds.is_installed())
    assert_in('subm 1', ds.subdatasets(state='absent', result_xfm='relpaths'))
    assert_not_in('subm 1',
                  ds.subdatasets(state='present', result_xfm='relpaths'))
    # install it:
    ds.install('subm 1')
    ok_(subds.is_installed())
    ok_(AnnexRepo.is_valid_repo(subds.path, allow_noninitialized=False))
    # Verify that it is the correct submodule installed and not
    # new repository initiated
    assert_in("test-annex.dat", subds.repo.get_indexed_files()),
    assert_not_in('subm 1',
                  ds.subdatasets(state='absent', result_xfm='relpaths'))
    assert_in('subm 1', ds.subdatasets(state='present', result_xfm='relpaths'))

    # now, get the data by reinstalling with -g:
    ok_(subds.repo.file_has_content('test-annex.dat') is False)
    with chpwd(ds.path):
        result = get(path='subm 1', dataset=os.curdir)
        assert_in_results(result, path=opj(subds.path, 'test-annex.dat'))
        ok_(subds.repo.file_has_content('test-annex.dat') is True)
        ok_(subds.is_installed())
예제 #2
0
def test_annexinfo_init(path=None):
    ds = Dataset(path).create()
    foo = ds.pathobj / "foo"
    foo_cont = b"foo content"
    foo.write_bytes(foo_cont)
    bar = ds.pathobj / "bar"
    bar.write_text(u"bar content")
    ds.save()

    # Custom init limits report, with original dict getting updated.
    cinfo_custom_init = ds.repo.get_content_annexinfo(
        init={foo: {
            "bytesize": 0,
            "this-is-surely-only-here": "right?"
        }})
    assert_not_in(bar, cinfo_custom_init)
    assert_in(foo, cinfo_custom_init)
    assert_equal(cinfo_custom_init[foo]["bytesize"], len(foo_cont))
    assert_equal(cinfo_custom_init[foo]["this-is-surely-only-here"], "right?")

    # "git" injects get_content_info() values.
    cinfo_init_git = ds.repo.get_content_annexinfo(init="git")
    assert_in("gitshasum", cinfo_init_git[foo])

    # init=None, on the other hand, does not.
    cinfo_init_none = ds.repo.get_content_annexinfo(init=None)
    assert_in(foo, cinfo_init_none)
    assert_in(bar, cinfo_init_none)
    assert_not_in("gitshasum", cinfo_init_none[foo])
예제 #3
0
def test_install_list(path=None, top_path=None):

    _mk_submodule_annex(path, fname="test-annex.dat", fcontent="whatever")

    # we want to be able to install several things, if these are known
    # (no 'source' allowed). Therefore first toplevel:
    ds = install(top_path, source=path, recursive=False)
    assert_not_in('annex.hardlink', ds.config)
    ok_(ds.is_installed())
    sub1 = Dataset(opj(top_path, 'subm 1'))
    sub2 = Dataset(opj(top_path, '2'))
    ok_(not sub1.is_installed())
    ok_(not sub2.is_installed())

    # fails, when `source` is passed:
    assert_raises(ValueError,
                  ds.install,
                  path=['subm 1', '2'],
                  source='something')

    # now should work:
    result = ds.install(path=['subm 1', '2'], result_xfm='paths')
    ok_(sub1.is_installed())
    ok_(sub2.is_installed())
    eq_(set(result), {sub1.path, sub2.path})
    # and if we request it again via get, result should be empty
    get_result = ds.get(path=['subm 1', '2'], get_data=False)
    assert_status('notneeded', get_result)
예제 #4
0
def test_overrides():
    cfg = ConfigManager()
    # any sensible (and also our CI) test environment(s) should have this
    assert_in('user.name', cfg)
    # set
    cfg.set('user.name', 'myoverride', scope='override')
    assert_equal(cfg['user.name'], 'myoverride')
    # unset just removes override, not entire config
    cfg.unset('user.name', scope='override')
    assert_in('user.name', cfg)
    assert_not_equal('user.name', 'myoverride')
    # add
    # there is no initial increment
    cfg.add('user.name', 'myoverride', scope='override')
    assert_equal(cfg['user.name'], 'myoverride')
    # same as with add, not a list
    assert_equal(cfg['user.name'], 'myoverride')
    # but then there is
    cfg.add('user.name', 'myother', scope='override')
    assert_equal(cfg['user.name'], ['myoverride', 'myother'])
    # rename
    assert_not_in('ups.name', cfg)
    cfg.rename_section('user', 'ups', scope='override')
    # original variable still there
    assert_in('user.name', cfg)
    # rename of override in effect
    assert_equal(cfg['ups.name'], ['myoverride', 'myother'])
    # remove entirely by section
    cfg.remove_section('ups', scope='override')
    from datalad.utils import Path
    assert_not_in('ups.name', cfg, (
        cfg._stores,
        cfg.overrides,
    ))
예제 #5
0
def test_no_leaks(path1=None, path2=None):
    ds1 = Dataset(path1).create()
    ds1.config.set('i.was.here', 'today', scope='local')
    assert_in('i.was.here', ds1.config.keys())
    ds1.config.reload()
    assert_in('i.was.here', ds1.config.keys())
    # now we move into this one repo, and create another
    # make sure that no config from ds1 leaks into ds2
    with chpwd(path1):
        ds2 = Dataset(path2)
        assert_not_in('i.was.here', ds2.config.keys())
        ds2.config.reload()
        assert_not_in('i.was.here', ds2.config.keys())

        ds2.create()
        assert_not_in('i.was.here', ds2.config.keys())

        # and that we do not track the wrong files
        assert_not_in(ds1.pathobj / '.git' / 'config',
                      ds2.config._stores['git']['files'])
        assert_not_in(ds1.pathobj / '.datalad' / 'config',
                      ds2.config._stores['branch']['files'])
        # these are the right ones
        assert_in(ds2.pathobj / '.git' / 'config',
                  ds2.config._stores['git']['files'])
        assert_in(ds2.pathobj / '.datalad' / 'config',
                  ds2.config._stores['branch']['files'])
예제 #6
0
def test_add_mimetypes(path=None):
    ds = Dataset(path).create(force=True)
    ds.repo.add('.gitattributes')
    ds.repo.commit('added attributes to git explicitly')
    # now test that those files will go into git/annex correspondingly
    # WINDOWS FAILURE NEXT
    __not_tested__ = ds.save(['file.txt', 'empty'])
    assert_repo_status(path, untracked=['file2.txt'])
    # But we should be able to force adding file to annex when desired
    ds.save('file2.txt', to_git=False)
    # check annex file status
    annexinfo = ds.repo.get_content_annexinfo()
    for path, in_annex in (
            # Empty one considered to be  application/octet-stream
            # i.e. non-text
        ('empty', True),
        ('file.txt', False),
        ('file2.txt', True)):
        # low-level API report -> repo path reference, no ds path
        p = ds.repo.pathobj / path
        assert_in(p, annexinfo)
        if in_annex:
            assert_in('key', annexinfo[p], p)
        else:
            assert_not_in('key', annexinfo[p], p)
예제 #7
0
def test_audio(path=None):
    ds = Dataset(path).create()
    ds.config.add('datalad.metadata.nativetype', 'audio', scope='branch')
    copy(
        opj(dirname(dirname(dirname(__file__))), 'tests', 'data', 'audio.mp3'),
        path)
    ds.save()
    assert_repo_status(ds.path)
    res = ds.aggregate_metadata()
    assert_status('ok', res)
    res = ds.metadata('audio.mp3')
    assert_result_count(res, 1)

    # from this extractor
    meta = res[0]['metadata']['audio']
    for k, v in target.items():
        eq_(meta[k], v)

    assert_in('@context', meta)

    uniques = ds.metadata(reporton='datasets', return_type='item-or-list'
                          )['metadata']['datalad_unique_content_properties']
    # test file has it, but uniques have it blanked out, because the extractor considers it worthless
    # for discovering whole datasets
    assert_in('bitrate', meta)
    eq_(uniques['audio']['bitrate'], None)

    # 'date' field carries not value, hence gets exclude from the unique report
    assert_in('date', meta)
    assert (not meta['date'])
    assert_not_in('date', uniques['audio'])
예제 #8
0
def test_rerun_script(path=None):
    ds = Dataset(path).create()
    ds.run("echo a >foo")
    ds.run([touch_command + "bar"], message='BAR', sidecar=True)
    # a run record sidecar file was added with the last commit
    assert(any(d['path'].startswith(op.join(ds.path, '.datalad', 'runinfo'))
               for d in ds.rerun(report=True, return_type='item-or-list')['diff']))
    bar_hexsha = ds.repo.get_hexsha(DEFAULT_BRANCH)

    script_file = op.join(path, "commands.sh")

    ds.rerun(script=script_file)
    ok_exists(script_file)
    with open(script_file) as sf:
        lines = sf.readlines()
        assert_in(touch_command + "bar\n", lines)
        # The commit message is there too.
        assert_in("# BAR\n", lines)
        assert_in("# (record: {})\n".format(bar_hexsha), lines)
        assert_not_in("echo a >foo\n", lines)

    ds.rerun(since="", script=script_file)
    with open(script_file) as sf:
        lines = sf.readlines()
        assert_in(touch_command + "bar\n", lines)
        # Automatic commit messages aren't included.
        assert_not_in("# echo a >foo\n", lines)
        assert_in("echo a >foo\n", lines)

    # --script=- writes to stdout.
    with patch("sys.stdout", new_callable=StringIO) as cmout:
        ds.rerun(script="-")
        assert_in(touch_command + "bar",
                  cmout.getvalue().splitlines())
예제 #9
0
def test_assert_cwd_unchanged_not_masking_exceptions():
    # Test that we are not masking out other "more important" exceptions

    orig_cwd = os.getcwd()

    @assert_cwd_unchanged
    def do_chdir_value_error():
        os.chdir(os.pardir)
        raise ValueError("error exception")

    with swallow_logs(new_level=logging.WARN) as cml:
        with assert_raises(ValueError) as cm:
            do_chdir_value_error()
        # retrospect exception
        eq_(orig_cwd, os.getcwd(),
            "assert_cwd_unchanged didn't return us back to %s" % orig_cwd)
        assert_in("Mitigating and changing back", cml.out)

    # and again but allowing to chdir
    @assert_cwd_unchanged(ok_to_chdir=True)
    def do_chdir_value_error():
        os.chdir(os.pardir)
        raise ValueError("error exception")

    with swallow_logs(new_level=logging.WARN) as cml:
        assert_raises(ValueError, do_chdir_value_error)
        eq_(orig_cwd, os.getcwd(),
            "assert_cwd_unchanged didn't return us back to %s" % orig_cwd)
        assert_not_in("Mitigating and changing back", cml.out)
예제 #10
0
def test_gitrepo_call_git_methods(path=None):
    gr = GitRepo(path).init()
    gr.call_git(['add', "foo", "bar"])
    gr.call_git(['commit', '-m', "foobar"])
    gr.call_git(["mv"], files=["foo", "foo.txt"])
    ok_((gr.pathobj / 'foo.txt').exists())

    for expect_fail, check in [(False, assert_in),
                               (True, assert_not_in)]:
        with swallow_logs(new_level=logging.DEBUG) as cml:
            with assert_raises(CommandError):
                gr.call_git(["mv"], files=["notthere", "dest"],
                            expect_fail=expect_fail)
            check("fatal: bad source", cml.out)

    eq_(list(gr.call_git_items_(["ls-files"], read_only=True)),
        ["bar", "foo.txt"])
    eq_(list(gr.call_git_items_(["ls-files", "-z"], sep="\0", read_only=True)),
        # Note: The custom separator has trailing empty item, but this is an
        # arbitrary command with unknown output it isn't safe to trim it.
        ["bar", "foo.txt"])

    with assert_raises(AssertionError):
        gr.call_git_oneline(["ls-files"], read_only=True)

    eq_(gr.call_git_oneline(["ls-files"], files=["bar"], read_only=True),
        "bar")

    ok_(gr.call_git_success(["rev-parse", "HEAD^{commit}"], read_only=True))
    with swallow_logs(new_level=logging.DEBUG) as cml:
        assert_false(gr.call_git_success(["rev-parse", "HEAD^{blob}"],
                                         read_only=True))
        assert_not_in("expected blob type", cml.out)
예제 #11
0
def test_eval_results_plus_build_doc():

    # test docs

    # docstring was build already:
    with swallow_logs(new_level=logging.DEBUG) as cml:
        TestUtils().__call__(1)
        assert_not_in("Building doc for", cml.out)
    # docstring accessible both ways:
    doc1 = Dataset.fake_command.__doc__
    doc2 = TestUtils().__call__.__doc__

    # docstring was built from Test_Util's definition:
    assert_equal(doc1, doc2)
    assert_in("TestUtil's fake command", doc1)
    assert_in("Parameters", doc1)
    assert_in("It's a number", doc1)

    # docstring shows correct override values of defaults in eval_params
    assert_re_in("Default:\\s+'tailored'", doc1, match=False)
    assert_re_in("Default:\\s+'item-or-list'", doc1, match=False)

    # docstring also contains eval_result's parameters:
    assert_in("result_filter", doc1)
    assert_in("return_type", doc1)
    assert_in("list", doc1)
    assert_in("None", doc1)
    assert_in("return value behavior", doc1)
    assert_in("dictionary is passed", doc1)

    # test eval_results is able to determine the call, a method of which it is
    # decorating:
    with swallow_logs(new_level=logging.DEBUG) as cml:
        Dataset('/does/not/matter').fake_command(3)
        assert_in(
            "Determined class of decorated function: {}"
            "".format(TestUtils().__class__), cml.out)

    # test results:
    result = TestUtils().__call__(2)
    assert_equal(len(list(result)), 2)
    result = Dataset('/does/not/matter').fake_command(3)
    assert_equal(len(list(result)), 3)

    # test absent side-effect of popping eval_defaults
    kwargs = dict(return_type='list')
    TestUtils().__call__(2, **kwargs)
    assert_equal(list(kwargs), ['return_type'])

    # test signature:
    from datalad.utils import getargspec
    assert_equal(
        getargspec(Dataset.fake_command)[0],
        ['number', 'dataset', 'result_fn'])
    assert_equal(
        getargspec(TestUtils.__call__)[0], ['number', 'dataset', 'result_fn'])
예제 #12
0
def test_windows_incompatible_names(path=None):
    ds = Dataset(path).create()
    create_tree(
        path, {
            'imgood': 'Look what a nice name I have',
            'illegal:character.txt': 'strange choice of name',
            'spaceending ': 'who does these things?',
            'lookmumadot.': 'why would you do this?',
            'COM1.txt': 'I am a serial port',
            'dirs with spaces': {
                'seriously?': 'you are stupid',
                'why somuch?wrongstuff.': "I gave up"
            },
        })
    ds.repo.config.set('datalad.save.windows-compat-warning', 'error')
    ds.save('.datalad/config')
    res = ds.save(on_failure='ignore')
    # check that none of the 6 problematic files was saved, but the good one was
    assert_result_count(res, 6, status='impossible', action='save')
    assert_result_count(res, 1, status='ok', action='save')

    # check that the warning is emitted
    ds.repo.config.set('datalad.save.windows-compat-warning', 'warning')
    ds.save('.datalad/config')
    with swallow_logs(new_level=logging.WARN) as cml:
        ds.save()
        cml.assert_logged(
            "Some elements of your dataset are not compatible with Windows "
            "systems. Disable this check by changing "
            "datalad.save.windows-compat-warning or consider renaming the "
            "following elements:")
        assert_in("Elements using a reserved filename:", cml.out)
        assert_in("Elements with illegal characters:", cml.out)
        assert_in("Elements ending with a dot:", cml.out)
        assert_in("Elements ending with a space:", cml.out)

    # check that a setting of 'none' really does nothing
    ds.repo.config.set('datalad.save.windows-compat-warning', 'none')
    ds.save('.datalad/config')
    create_tree(
        path, {
            'more illegal:characters?.py':
            'My arch nemesis uses Windows and I will'
            'destroy them! Muahahaha'
        })
    with swallow_logs(new_level=logging.WARN) as cml:
        res = ds.save()
        # we shouldn't see warnings
        assert_not_in(
            "Some elements of your dataset are not compatible with Windows "
            "systems. Disable this check by changing "
            "datalad.save.windows-compat-warning or consider renaming the "
            "following elements:", cml.out)
        # make sure the file is saved successfully
        assert_result_count(res, 1, status='ok', action='save')
예제 #13
0
def test_bare(src=None, path=None):
    # create a proper datalad dataset with all bells and whistles
    ds = Dataset(src).create()
    dlconfig_sha = ds.repo.call_git(['rev-parse', 'HEAD:.datalad/config'])
    # can we handle a bare repo version of it?
    gr = AnnexRepo.clone(src,
                         path,
                         clone_options=['--bare', '-b', DEFAULT_BRANCH])
    # we had to specifically checkout the standard branch, because on crippled
    # FS, HEAD will point to an adjusted branch by default, and the test logic
    # below does not account for this case.
    # this should just make sure the bare repo has the expected setup,
    # but it should still be bare. Let's check that to be sure
    assert_true(gr.bare)
    # do we read the correct local config?
    assert_in(gr.pathobj / 'config', gr.config._stores['git']['files'])
    # do we pick up the default branch config too?
    assert_in('blob:HEAD:.datalad/config',
              gr.config._stores['branch']['files'])
    # and track its reload stamp via its file shasum
    assert_equal(
        dlconfig_sha,
        gr.config._stores['branch']['stats']['blob:HEAD:.datalad/config'])
    # check that we can pick up the dsid from the commit branch config
    assert_equal(ds.id, gr.config.get('datalad.dataset.id'))
    # and it is coming from the correct source
    assert_equal(ds.id,
                 gr.config.get_from_source('branch', 'datalad.dataset.id'))
    assert_equal(None, gr.config.get_from_source('local',
                                                 'datalad.dataset.id'))
    # any sensible (and also our CI) test environment(s) should have this
    assert_in('user.name', gr.config)
    # not set something that wasn't there
    obscure_key = 'sec.reallyobscurename!@@.key'
    assert_not_in(obscure_key, gr.config)
    # to the local config, which is easily accessible
    gr.config.set(obscure_key, 'myvalue', scope='local')
    assert_equal(gr.config.get(obscure_key), 'myvalue')
    # now make sure the config is where we think it is
    assert_in(obscure_key.split('.')[1], (gr.pathobj / 'config').read_text())
    # update committed config and check update
    old_id = ds.id
    ds.config.set('datalad.dataset.id', 'surprise!', scope='branch')
    ds.save()
    # fetch into default branch (like `update`, but for bare-repos)
    gr.call_git(
        ['fetch', f'{DEFAULT_REMOTE}', f'{DEFAULT_BRANCH}:{DEFAULT_BRANCH}'])
    # without a reload, no state change, like with non-bare repos
    assert_equal(old_id,
                 gr.config.get_from_source('branch', 'datalad.dataset.id'))
    # a non-forced reload() must be enough, because state change
    # detection kicks in
    gr.config.reload()
    assert_equal('surprise!', gr.config.get('datalad.dataset.id'))
예제 #14
0
파일: test_log.py 프로젝트: datalad/datalad
def check_filters(name):
    with swallow_logs(new_level=logging.DEBUG, name=name) as cml:
        lgr1 = logging.getLogger(name + '.goodone')
        lgr2 = logging.getLogger(name + '.anotherone')
        lgr3 = logging.getLogger(name + '.bad')
        lgr1.debug('log1')
        lgr2.info('log2')
        lgr3.info('log3')
        assert_in('log1', cml.out)
        assert_in('log2', cml.out)
        assert_not_in('log3', cml.out)
예제 #15
0
def test_ensure_datalad_remote_maybe_enable(path=None, *, autoenable):
    path = Path(path)
    ds_a = Dataset(path / "a").create(force=True)
    init_datalad_remote(ds_a.repo, DATALAD_SPECIAL_REMOTE,
                        autoenable=autoenable)

    ds_b = clone(source=ds_a.path, path=path / "b")
    repo = ds_b.repo
    if not autoenable:
        assert_not_in("datalad", repo.get_remotes())
    ensure_datalad_remote(repo)
    assert_in("datalad", repo.get_remotes())
예제 #16
0
def check_python_exec(cmd, path):
    ds = Dataset(path).create()

    # but exec has no result
    res = ds.foreach_dataset(cmd, cmd_type='exec')
    assert_not_in('result', res[0])
    # but allows for more complete/interesting setups in which we could import modules etc
    res = ds.foreach_dataset('import sys; print("DIR: %s" % str(dir()))',
                             output_streams='capture',
                             cmd_type='exec')
    assert_in('ds', res[0]['stdout'])
    assert_in('sys', res[0]['stdout'])
    eq_(res[0]['stderr'], '')
예제 #17
0
def test_dataset_local_mode(path=None):
    ds = create(path)
    # any sensible (and also our CI) test environment(s) should have this
    assert_in('user.name', ds.config)
    # from .datalad/config
    assert_in('datalad.dataset.id', ds.config)
    # from .git/config
    assert_in('annex.version', ds.config)
    # now check that dataset-local mode doesn't have the global piece
    cfg = ConfigManager(ds, source='branch-local')
    assert_not_in('user.name', cfg)
    assert_in('datalad.dataset.id', cfg)
    assert_in('annex.version', cfg)
예제 #18
0
def test__version__():
    # in released stage, version in the last CHANGELOG entry
    # should correspond to the one in datalad
    CHANGELOG_filename = op.join(
        op.dirname(__file__), op.pardir, op.pardir, 'CHANGELOG.md')
    if not op.exists(CHANGELOG_filename):
        raise SkipTest("no %s found" % CHANGELOG_filename)
    regex = re.compile(r'^# '
                       r'(?P<version>[0-9]+\.[0-9.abcrc~]+)\s+'
                       r'\((?P<date>.*)\)'
                       )
    with open(CHANGELOG_filename, 'rb') as f:
        for line in f:
            line = line.rstrip()
            if not line.startswith(b'# '):
                # The first section header we hit, must be our changelog entry
                continue
            reg = regex.match(ensure_unicode(line))
            if not reg:  # first one at that level is the one
                raise AssertionError(
                    "Following line must have matched our regex: %r" % line)
            regd = reg.groupdict()
            changelog_version = regd['version']
            lv_changelog_version = Version(changelog_version)
            # we might have a suffix - sanitize
            san__version__ = __version__.rstrip('.dirty')
            lv__version__ = Version(san__version__)
            if '???' in regd['date'] and 'will be better than ever' in regd['codename']:
                # we only have our template
                # we can only assert that its version should be higher than
                # the one we have now
                assert_greater(lv_changelog_version, lv__version__)
            else:
                # should be a "release" record
                assert_not_in('???', regd['date'])
                ok_startswith(__version__, changelog_version)
                if lv__version__ != lv_changelog_version:
                    # It was not tagged yet and Changelog has no new records
                    # (they are composed by auto upon release)
                    assert_greater(lv__version__, lv_changelog_version)
                    assert_in('+', san__version__)  # we have build suffix
                else:
                    # all is good, tagged etc
                    assert_equal(lv_changelog_version, lv__version__)
                    assert_equal(changelog_version, san__version__)
            return

    raise AssertionError(
        "No log line matching our regex found in %s" % CHANGELOG_filename
    )
예제 #19
0
def test_crazy_cfg(path=None):
    cfg = ConfigManager(GitRepo(opj(path, 'ds'), create=True), source='branch')
    assert_in('crazy.padry', cfg)
    # make sure crazy config is not read when in local mode
    cfg = ConfigManager(Dataset(opj(path, 'ds')), source='local')
    assert_not_in('crazy.padry', cfg)
    # it will make it in in 'any' mode though
    cfg = ConfigManager(Dataset(opj(path, 'ds')), source='any')
    assert_in('crazy.padry', cfg)
    # typos in the source mode arg will not have silent side-effects
    assert_raises(ValueError,
                  ConfigManager,
                  Dataset(opj(path, 'ds')),
                  source='locale')
예제 #20
0
def test_update_docstring_with_parameters_no_kwds():
    from datalad.support.param import Parameter

    def fn(pos0):
        "fn doc"

    assert_not_in("3", fn.__doc__)
    # Call doesn't crash when there are no keyword arguments.
    update_docstring_with_parameters(
        fn,
        dict(pos0=Parameter(doc="pos0 param doc"),
             pos1=Parameter(doc="pos1 param doc")),
        add_args={"pos1": 3})
    assert_in("3", fn.__doc__)
예제 #21
0
def test_alter_interface_docs_for_cmdline():
    alt = alter_interface_docs_for_cmdline(demo_doc)
    alt_l = alt.split('\n')
    # dedented
    assert_false(alt_l[0].startswith(' '))
    assert_false(alt_l[-1].startswith(' '))
    assert_not_in('PY', alt)
    assert_not_in('CMD', alt)
    assert_not_in('REFLOW', alt)
    assert_in('a b', alt)
    assert_in('not\n   reflowed', alt)
    assert_in("Something for the cmdline only Multiline!", alt)
    # args
    altarg = alter_interface_docs_for_cmdline(demo_argdoc)
    # RST role markup
    eq_(alter_interface_docs_for_cmdline(':murks:`me and my buddies`'),
        'me and my buddies')
    # spread across lines
    eq_(alter_interface_docs_for_cmdline(':term:`Barbara\nStreisand`'),
        'Barbara\nStreisand')
    # multiple on one line
    eq_(
        alter_interface_docs_for_cmdline(
            ':term:`one` bla bla :term:`two` bla'), 'one bla bla two bla')

    altpd = alter_interface_docs_for_cmdline(demo_paramdoc)
    assert_not_in('python', altpd)
    assert_in('in between', altpd)
    assert_in('appended', altpd)
    assert_in('cmdline', altpd)
예제 #22
0
def test_surprise_subds(path=None):
    # https://github.com/datalad/datalad/issues/3139
    ds = create(path, force=True)
    # a lonely repo without any commit
    somerepo = AnnexRepo(path=op.join(path, 'd1', 'subrepo'), create=True)
    # a proper subdataset
    subds = create(op.join(path, 'd2', 'subds'), force=True)

    # If subrepo is an adjusted branch, it would have a commit, making most of
    # this test irrelevant because it is about the unborn branch edge case.
    adjusted = somerepo.is_managed_branch()
    # This edge case goes away with Git v2.22.0.
    fixed_git = somerepo.git_version >= '2.22.0'

    # save non-recursive
    res = ds.save(recursive=False, on_failure='ignore')
    if not adjusted and fixed_git:
        # We get an appropriate error about no commit being checked out.
        assert_in_results(res, action='add_submodule', status='error')

    # the content of both subds and subrepo are not added to their
    # respective parent as no --recursive was given
    assert_repo_status(subds.path, untracked=['subfile'])
    assert_repo_status(somerepo.path, untracked=['subfile'])

    if adjusted or fixed_git:
        if adjusted:
            # adjusted branch: #datalad/3178 (that would have a commit)
            modified = [subds.repo.pathobj, somerepo.pathobj]
            untracked = []
        else:
            # Newer Git versions refuse to add a sub-repository with no commits
            # checked out.
            modified = [subds.repo.pathobj]
            untracked = ['d1']
        assert_repo_status(ds.path, modified=modified, untracked=untracked)
        assert_not_in(ds.repo.pathobj / 'd1' / 'subrepo' / 'subfile',
                      ds.repo.get_content_info())
    else:
        # however, while the subdataset is added (and reported as modified
        # because it content is still untracked) the subrepo
        # cannot be added (it has no commit)
        # worse: its untracked file add been added to the superdataset
        assert_repo_status(ds.path, modified=['d2/subds'])
        assert_in(ds.repo.pathobj / 'd1' / 'subrepo' / 'subfile',
                  ds.repo.get_content_info())
    # with proper subdatasets, all evil is gone
    assert_not_in(ds.repo.pathobj / 'd2' / 'subds' / 'subfile',
                  ds.repo.get_content_info())
예제 #23
0
def test_dataset_systemglobal_mode(path=None):
    ds = create(path)
    # any sensible (and also our CI) test environment(s) should have this
    assert_in('user.name', ds.config)
    # from .datalad/config
    assert_in('datalad.dataset.id', ds.config)
    # from .git/config
    assert_in('annex.version', ds.config)
    with chpwd(path):
        # now check that no config from a random dataset at PWD is picked up
        # if not dataset instance was provided
        cfg = ConfigManager(dataset=None, source='any')
        assert_in('user.name', cfg)
        assert_not_in('datalad.dataset.id', cfg)
        assert_not_in('annex.version', cfg)
예제 #24
0
파일: test_log.py 프로젝트: datalad/datalad
def test_log_progress_noninteractive_filter():
    name = "dl-test"
    lgr = LoggerHelper(name).get_initialized_logger()
    pbar_id = "lp_test"
    with swallow_logs(new_level=logging.INFO, name=name) as cml:
        log_progress(lgr.info, pbar_id, "Start", label="testing", total=3)
        log_progress(lgr.info, pbar_id, "THERE0", update=1)
        log_progress(lgr.info, pbar_id, "NOT", update=1,
                     noninteractive_level=logging.DEBUG)
        log_progress(lgr.info, pbar_id, "THERE1", update=1,
                     noninteractive_level=logging.INFO)
        log_progress(lgr.info, pbar_id, "Done")
        for present in ["Start", "THERE0", "THERE1", "Done"]:
            assert_in(present, cml.out)
        assert_not_in("NOT", cml.out)
예제 #25
0
def test_cmdline_example_to_rst():
    # don't puke on nothing
    out = fmt.cmdline_example_to_rst(SIO(''))
    out.seek(0)
    ok_startswith(out.read(), '.. AUTO-GENERATED')
    out = fmt.cmdline_example_to_rst(SIO(''), ref='dummy')
    out.seek(0)
    assert_in('.. dummy:', out.read())
    # full scale test
    out = fmt.cmdline_example_to_rst(SIO(demo_example), ref='mydemo')
    out.seek(0)
    out_text = out.read()
    assert_in('.. code-block:: sh', out_text)
    assert_not_in('shame', out_text)  # no SKIP'ed
    assert_not_in('#', out_text)  # no comments
예제 #26
0
def test_aggregate_query(path=None):
    ds = Dataset(path).create(force=True)
    # no magic change to actual dataset metadata due to presence of
    # aggregated metadata
    res = ds.metadata(reporton='datasets', on_failure='ignore')
    assert_result_count(res, 1)
    assert_not_in('metadata', res[0])
    # but we can now ask for metadata of stuff that is unknown on disk
    res = ds.metadata(opj('sub', 'deep', 'some'), reporton='datasets')
    assert_result_count(res, 1)
    eq_({'homepage': 'http://top.example.com'}, res[0]['metadata'])
    # when a reference dataset is given, it will be used as the metadata
    # provider
    res = ds.metadata(opj('sub', 'deep', 'some'), reporton='datasets')
    assert_result_count(res, 1)
    eq_({'homepage': 'http://top.example.com'}, res[0]['metadata'])
예제 #27
0
def test_gh1597(path=None):
    ds = Dataset(path).create()
    sub = ds.create('sub')
    res = ds.subdatasets()
    assert_result_count(res, 1, path=sub.path)
    # now modify .gitmodules with another command
    ds.subdatasets(contains=sub.path, set_property=[('this', 'that')])
    # now modify low-level
    with open(op.join(ds.path, '.gitmodules'), 'a') as f:
        f.write('\n')
    assert_repo_status(ds.path, modified=['.gitmodules'])
    ds.save('.gitmodules')
    # must not come under annex management
    assert_not_in(
        'key',
        ds.repo.get_content_annexinfo(paths=['.gitmodules']).popitem()[1])
예제 #28
0
def test_download_url_archive(toppath=None, topurl=None, path=None):
    ds = Dataset(path).create()
    ds.download_url([topurl + "archive.tar.gz"], archive=True)
    ok_(ds.repo.file_has_content(opj("archive", "file1.txt")))
    assert_not_in(opj(ds.path, "archive.tar.gz"), ds.repo.format_commit("%B"))
    # we should yield an impossible from add archive content when there is
    # untracked content (gh-#6170)
    create_tree(ds.path, {'this': 'dirty'})
    assert_in_results(
        ds.download_url([topurl + "archive.tar.gz"],
                        archive=True,
                        on_failure='ignore'),
        status='impossible',
        action='add-archive-content',
        message='clean dataset required. Use `datalad status` to inspect '
        'unsaved changes')
예제 #29
0
def test_call_from_parser_result_filter():
    class DummyOne(Interface):
        @staticmethod
        def __call__(**kwargs):
            yield kwargs

    # call_from_parser doesn't add result_filter to the keyword arguments
    assert_not_in("result_filter", call_from_parser(DummyOne, _new_args())[0])
    # with dissolution of _OLD_STYLE_COMMANDS and just relying on having
    # @eval_results, no result_filter is added, since those commands are
    # not guaranteed to return/yield any record suitable for filtering.
    # The effect is the same -- those "common" options are not really applicable
    # to Interface's which do not return/yield expected records
    assert_not_in(
        "result_filter",
        call_from_parser(DummyOne, _new_args(common_report_type="dataset"))[0])
예제 #30
0
def test_initremote(store_path=None, store_url=None, ds_path=None):
    ds = Dataset(ds_path).create()
    store_path = Path(store_path)
    url = "ria+" + store_url
    init_opts = common_init_opts + ['url={}'.format(url)]

    # fail when there's no RIA store at the destination
    assert_raises(CommandError,
                  ds.repo.init_remote,
                  'ora-remote',
                  options=init_opts)
    # Doesn't actually create a remote if it fails
    assert_not_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])

    # now make it a store
    io = LocalIO()
    create_store(io, store_path, '1')
    create_ds_in_store(io, store_path, ds.id, '2', '1')

    # fails on non-RIA URL
    assert_raises(CommandError,
                  ds.repo.init_remote,
                  'ora-remote',
                  options=common_init_opts +
                  ['url={}'
                   ''.format(store_path.as_uri())])
    # Doesn't actually create a remote if it fails
    assert_not_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])

    ds.repo.init_remote('ora-remote', options=init_opts)
    assert_in(
        'ora-remote',
        [cfg['name'] for uuid, cfg in ds.repo.get_special_remotes().items()])
    assert_repo_status(ds.path)
    # git-annex:remote.log should have:
    #   - url
    #   - common_init_opts
    #   - archive_id (which equals ds id)
    remote_log = ds.repo.call_git(['cat-file', 'blob', 'git-annex:remote.log'],
                                  read_only=True)
    assert_in("url={}".format(url), remote_log)
    [assert_in(c, remote_log) for c in common_init_opts]
    assert_in("archive-id={}".format(ds.id), remote_log)