def test_ds_extraction(path=None): skip_if_no_module('libxmp') ds = Dataset(path).create() copy(testpath, path) ds.save() assert_repo_status(ds.path) res = extract_metadata( types=['xmp'], dataset=ds, # artificially disable extraction from any file in the dataset files=[]) assert_result_count( res, 1, type='dataset', status='ok', action='metadata', path=path, refds=ds.path) assert_in('xmp', res[0]['metadata']) # now the more useful case: getting everything for xmp from a dataset res = extract_metadata( types=['xmp'], dataset=ds) assert_result_count(res, 2) assert_result_count( res, 1, type='dataset', status='ok', action='metadata', path=path, refds=ds.path) assert_result_count( res, 1, type='file', status='ok', action='metadata', path=opj(path, 'xmp.pdf'), parentds=ds.path) for r in res: assert_in('xmp', r['metadata'])
def test_completion(out_fn=None): skip_if_no_module('argcomplete') from datalad.cmd import WitlessRunner runner = WitlessRunner() def get_completions(s: str, expected) -> list: """Run 'datalad' external command and collect completions Parameters ---------- s: str what to append to 'datalad ' invocation expected: iterable of str What entries to expect - would raise AssertionError if any is not present in output exit_code: int, optional If incomplete/malformed we seems to get 2, most frequently used so default Returns ------- list of str Entries output """ if os.path.exists(out_fn): # reuse but ensure it is gone os.unlink(out_fn) comp_line = f'datalad {s}' runner.run( comp_line.split(' '), env=dict( os.environ, _ARGCOMPLETE='1', _ARGCOMPLETE_STDOUT_FILENAME=out_fn, COMP_LINE=comp_line, # without -1 seems to get "finished completion", someone can investigate more COMP_POINT=str(len(comp_line) - 1), # always at the end ATM )) with open(out_fn, 'rb') as f: entries = f.read().split(b'\x0b') entries = [e.decode() for e in entries] diff = set(expected).difference(entries) if diff: raise AssertionError( f"Entries {sorted(diff)} were expected but not found in the completion output: {entries}" ) return entries # for extra analyzes if so desired all_commands = get_all_commands() get_completions('i', {'install'}) get_completions(' ', ['--dbg', '-c'] + all_commands) # if command already matches -- we get only that hit ATM, not others which begin with it get_completions('create', ['create ']) get_completions('create -', ['--dataset']) # but for incomplete one we do get all create* commands get_completions('creat', [c for c in all_commands if c.startswith('create')])
def test_file_extraction(path=None): skip_if_no_module('libxmp') # go into virgin dir to avoid detection of any dataset with chpwd(path): res = extract_metadata( types=['xmp'], files=[testpath]) assert_result_count(res, 1, type='file', status='ok', action='metadata', path=testpath) assert_in('xmp', res[0]['metadata'])
def test_check_dates_invalid_date(): skip_if_no_module("dateutil") with swallow_outputs() as cmo: assert_raises(IncompleteResultsError, check_dates, [], reference_date="not a valid date", return_type="list") out = cmo.out # The error makes it through the standard renderer. assert_in('"status": "error"', out)
def test_check_dates(path=None): skip_if_no_module("dateutil") ref_ts = 1218182889 # Fri, 08 Aug 2008 04:08:09 -0400 refdate = "@{}".format(ref_ts) repo = os.path.join(path, "repo") with set_date(ref_ts + 5000): ar = AnnexRepo(repo) ar.add(".") ar.commit() # The standard renderer outputs json. with swallow_outputs() as cmo: # Set level to WARNING to avoid the progress bar when # DATALAD_TESTS_UI_BACKEND=console. with swallow_logs(new_level=logging.WARNING): check_dates([repo], reference_date=refdate, return_type="list") assert_in("report", json.loads(cmo.out).keys()) # We find the newer objects. newer = call([path], reference_date=refdate) eq_(len(newer), 1) ok_(newer[0]["report"]["objects"]) # There are no older objects to find. older = call([repo], reference_date=refdate, older=True) assert_false(older[0]["report"]["objects"]) # We can pass the date in RFC 2822 format. assert_dict_equal( newer[0], call([path], reference_date="08 Aug 2008 04:08:09 -0400")[0]) # paths=None defaults to the current directory. with chpwd(path): assert_dict_equal( newer[0]["report"], call(paths=None, reference_date=refdate)[0]["report"]) # Only commit type is present when annex='none'. newer_noannex = call([path], reference_date=refdate, annex="none") for entry in newer_noannex[0]["report"]["objects"].values(): ok_(entry["type"] == "commit")
def testish2(): skip_if_no_module("datalad") return "magic"
def testish(): skip_if_no_module("nonexistingforsuremodule") raise ValueError
def test_wtf(topdir=None): path = opj(topdir, OBSCURE_FILENAME) # smoke test for now with swallow_outputs() as cmo: wtf(dataset=path, on_failure="ignore") assert_not_in('## dataset', cmo.out) assert_in('## configuration', cmo.out) # Those sections get sensored out by default now assert_not_in('user.name: ', cmo.out) with chpwd(path): with swallow_outputs() as cmo: wtf() assert_not_in('## dataset', cmo.out) assert_in('## configuration', cmo.out) # now with a dataset ds = create(path) with swallow_outputs() as cmo: wtf(dataset=ds.path) assert_in('## configuration', cmo.out) assert_in('## dataset', cmo.out) assert_in(u'path: {}'.format(ds.path), ensure_unicode(cmo.out)) assert_in('branches', cmo.out) assert_in(DEFAULT_BRANCH + '@', cmo.out) assert_in('git-annex@', cmo.out) # and if we run with all sensitive for sensitive in ('some', True): with swallow_outputs() as cmo: wtf(dataset=ds.path, sensitive=sensitive) # we fake those for tests anyways, but we do show cfg in this mode # and explicitly not showing them assert_in('user.name: %s' % _HIDDEN, cmo.out) with swallow_outputs() as cmo: wtf(dataset=ds.path, sensitive='all') assert_not_in(_HIDDEN, cmo.out) # all is shown assert_in('user.name: ', cmo.out) # Sections selection # # If we ask for no sections and there is no dataset with chpwd(path): with swallow_outputs() as cmo: wtf(sections=[]) assert_not_in('## dataset', cmo.out) for s in SECTION_CALLABLES: assert_not_in('## %s' % s.lower(), cmo.out.lower()) # ask for a selected set secs = ['git-annex', 'configuration'] with chpwd(path): with swallow_outputs() as cmo: wtf(sections=secs) for s in SECTION_CALLABLES: (assert_in if s in secs else assert_not_in)('## %s' % s.lower(), cmo.out.lower()) # order should match our desired one, not alphabetical # but because of https://github.com/datalad/datalad/issues/3915 # alphanum is now desired assert cmo.out.index('## git-annex') > cmo.out.index( '## configuration') # not achievable from cmdline is to pass an empty list of sections. with chpwd(path): with swallow_outputs() as cmo: wtf(sections=[]) eq_(cmo.out.rstrip(), '# WTF') # and we could decorate it nicely for embedding e.g. into github issues with swallow_outputs() as cmo: wtf(sections=['dependencies'], decor='html_details') ok_startswith(cmo.out, '<details><summary>DataLad %s WTF' % __version__) assert_in('## dependencies', cmo.out) # short flavor with swallow_outputs() as cmo: wtf(flavor='short') assert_in("- datalad: version=%s" % __version__, cmo.out) assert_in("- dependencies: ", cmo.out) eq_(len(cmo.out.splitlines()), 4) # #WTF, datalad, dependencies, trailing new line with swallow_outputs() as cmo: wtf(flavor='short', sections='*') assert_greater(len(cmo.out.splitlines()), 10) # many more # check that wtf of an unavailable section yields impossible result (#6712) res = wtf(sections=['murkie'], on_failure='ignore') eq_(res[0]["status"], "impossible") # should result only in '# WTF' skip_if_no_module('pyperclip') # verify that it works correctly in the env/platform import pyperclip with swallow_outputs() as cmo: try: pyperclip.copy("xxx") pyperclip_works = pyperclip.paste().strip() == "xxx" wtf(dataset=ds.path, clipboard=True) except (AttributeError, pyperclip.PyperclipException) as exc: # AttributeError could come from pyperclip if no DISPLAY raise SkipTest(str(exc)) assert_in("WTF information of length", cmo.out) assert_not_in('user.name', cmo.out) if not pyperclip_works: # Some times does not throw but just fails to work raise SkipTest( "Pyperclip seems to be not functioning here correctly") assert_not_in('user.name', pyperclip.paste()) assert_in(_HIDDEN, pyperclip.paste()) # by default no sensitive info assert_in("cmd:annex:", pyperclip.paste()) # but the content is there