def test_smoke_convertall(tmpdir): runner( ("-f convertall -c dcm2niix -o %s -b --datalad " "-s fmap_acq-3mm -d %s/{subject}/*" % (tmpdir, TESTS_DATA_PATH) ).split(' ') )
def test_ls(stdout): args = ( "-f reproin --command ls --files %s" % (TESTS_DATA_PATH) ).split(' ') runner(args) out = stdout.getvalue() assert 'StudySessionInfo(locator=' in out assert 'Halchenko/Yarik/950_bids_test4' in out
def test_reproin_largely_smoke(tmpdir, heuristic, invocation): is_bids = True if heuristic == 'reproin' else False arg = "--random-seed 1 -f %s -c dcm2niix -o %s" \ % (heuristic, tmpdir) if is_bids: arg += " -b" arg += " --datalad " args = ( arg + invocation ).split(' ') # Test some safeguards if invocation == "--files %s" % TESTS_DATA_PATH: # Multiple subjects must not be specified -- only a single one could # be overridden from the command line with pytest.raises(ValueError): runner(args + ['--subjects', 'sub1', 'sub2']) if heuristic != 'reproin': # none other heuristic has mighty infotoids atm with pytest.raises(NotImplementedError): runner(args) return runner(args) ds = Dataset(str(tmpdir)) assert ds.is_installed() assert not ds.repo.dirty head = ds.repo.get_hexsha() # and if we rerun -- should fail lgr.info( "RERUNNING, expecting to FAIL since the same everything " "and -c specified so we did conversion already" ) with pytest.raises(RuntimeError): runner(args) # but there should be nothing new assert not ds.repo.dirty assert head == ds.repo.get_hexsha() # unless we pass 'overwrite' flag runner(args + ['--overwrite']) # but result should be exactly the same, so it still should be clean # and at the same commit assert ds.is_installed() assert not ds.repo.dirty assert head == ds.repo.get_hexsha()
def test_scout_conversion(tmpdir): tmppath = tmpdir.strpath args = ("-b -f reproin --files %s" % (TESTS_DATA_PATH)).split(' ') + ['-o', tmppath] runner(args) assert not op.exists( pjoin( tmppath, 'Halchenko/Yarik/950_bids_test4/sub-phantom1sid1/ses-localizer/anat' )) assert op.exists( pjoin( tmppath, 'Halchenko/Yarik/950_bids_test4/sourcedata/sub-phantom1sid1/' 'ses-localizer/anat/sub-phantom1sid1_ses-localizer_scout.dicom.tgz' ))
def test_scans_keys_reproin(tmpdir, invocation): args = "-f reproin -c dcm2niix -o %s -b " % (tmpdir) args += invocation runner(args.split()) # for now check it exists scans_keys = glob(pjoin(tmpdir.strpath, '*/*/*/*/*/*.tsv')) assert(len(scans_keys) == 1) with open(scans_keys[0]) as f: reader = csv.reader(f, delimiter='\t') for i, row in enumerate(reader): if i == 0: assert(row == ['filename', 'acq_time', 'operator', 'randstr']) assert(len(row) == 4) if i != 0: assert(os.path.exists(pjoin(dirname(scans_keys[0]), row[0]))) assert(re.match( '^[\d]{4}-[\d]{2}-[\d]{2}T[\d]{2}:[\d]{2}:[\d]{2}$', row[1]))
def test_scout_conversion(tmpdir): tmppath = tmpdir.strpath args = ( "-b -f reproin --files %s" % (TESTS_DATA_PATH) ).split(' ') + ['-o', tmppath] runner(args) assert not op.exists(pjoin( tmppath, 'Halchenko/Yarik/950_bids_test4/sub-phantom1sid1/ses-localizer/anat')) assert op.exists(pjoin( tmppath, 'Halchenko/Yarik/950_bids_test4/sourcedata/sub-phantom1sid1/' 'ses-localizer/anat/sub-phantom1sid1_ses-localizer_scout.dicom.tgz' ) )
def test_populate_bids_templates(tmpdir): populate_bids_templates( str(tmpdir), defaults={'Acknowledgements': 'something'}) for f in "README", "dataset_description.json", "CHANGES": # Just test that we have created them and they all have stuff TODO assert "TODO" in tmpdir.join(f).read() description_file = tmpdir.join('dataset_description.json') assert "something" in description_file.read() # it should also be available as a command os.unlink(str(description_file)) runner([ '--command', 'populate-templates', '-f', 'convertall', '--files', str(tmpdir) ]) assert "something" not in description_file.read() assert "TODO" in description_file.read()
def test_notop(tmpdir, bidsoptions): tmppath = tmpdir.strpath args = ("-f reproin --files %s" % (TESTS_DATA_PATH)).split(' ') + ['-o', tmppath] + ['-b' ] + bidsoptions runner(args) assert op.exists(pjoin(tmppath, 'Halchenko/Yarik/950_bids_test4')) for fname in [ 'CHANGES', 'dataset_description.json', 'participants.tsv', 'README', 'participants.json' ]: if 'notop' in bidsoptions: assert not op.exists( pjoin(tmppath, 'Halchenko/Yarik/950_bids_test4', fname)) else: assert op.exists( pjoin(tmppath, 'Halchenko/Yarik/950_bids_test4', fname))
def test_scans_keys_reproin(tmpdir, invocation): args = "-f reproin -c dcm2niix -o %s -b " % (tmpdir) args += invocation runner(args.split()) # for now check it exists scans_keys = glob(pjoin(tmpdir.strpath, '*/*/*/*/*/*.tsv')) assert (len(scans_keys) == 1) with open(scans_keys[0]) as f: reader = csv.reader(f, delimiter='\t') for i, row in enumerate(reader): if i == 0: assert (row == ['filename', 'acq_time', 'operator', 'randstr']) assert (len(row) == 4) if i != 0: assert (os.path.exists(pjoin(dirname(scans_keys[0]), row[0]))) assert (re.match( '^[\d]{4}-[\d]{2}-[\d]{2}T[\d]{2}:[\d]{2}:[\d]{2}$', row[1]))
def test_reproin_largely_smoke(tmpdir, heuristic, invocation): is_bids = True if heuristic == 'reproin' else False arg = "--random-seed 1 -f %s -c dcm2niix -o %s" \ % (heuristic, tmpdir) if is_bids: arg += " -b" arg += " --datalad " args = (arg + invocation).split(' ') # Test some safeguards if invocation == "--files %s" % TESTS_DATA_PATH: # Multiple subjects must not be specified -- only a single one could # be overridden from the command line with pytest.raises(ValueError): runner(args + ['--subjects', 'sub1', 'sub2']) if heuristic != 'reproin': # none other heuristic has mighty infotoids atm with pytest.raises(NotImplementedError): runner(args) return runner(args) ds = Dataset(str(tmpdir)) assert ds.is_installed() assert not ds.repo.dirty head = ds.repo.get_hexsha() # and if we rerun -- should fail lgr.info("RERUNNING, expecting to FAIL since the same everything " "and -c specified so we did conversion already") with pytest.raises(RuntimeError): runner(args) # but there should be nothing new assert not ds.repo.dirty assert head == ds.repo.get_hexsha() # unless we pass 'overwrite' flag runner(args + ['--overwrite']) # but result should be exactly the same, so it still should be clean # and at the same commit assert ds.is_installed() assert not ds.repo.dirty assert head == ds.repo.get_hexsha()
def test_conversion(tmpdir, subject, heuristic): tmpdir.chdir() datadir = fetch_data(tmpdir.strpath, subject) outdir = tmpdir.mkdir('out').strpath args = gen_heudiconv_args(datadir, outdir, subject, heuristic) runner(args) # run conversion # verify functionals were converted assert glob('{}/{}/func/*'.format(outdir, subject)) == \ glob('{}/{}/func/*'.format(datadir, subject)) # compare some json metadata json_ = '{}/task-rest_acq-24mm64sl1000tr32te600dyn_bold.json'.format orig, conv = (json.load(open(json_(datadir))), json.load(open(json_(outdir)))) keys = ['EchoTime', 'MagneticFieldStrength', 'Manufacturer', 'SliceTiming'] for key in keys: assert orig[key] == conv[key]
def test_scout_conversion(tmpdir): tmppath = tmpdir.strpath args = ("-b -f reproin --files %s" % (TESTS_DATA_PATH)).split(' ') + ['-o', tmppath] runner(args) dspath = Path(tmppath) / 'Halchenko/Yarik/950_bids_test4' sespath = dspath / 'sub-phantom1sid1/ses-localizer' assert not (sespath / 'anat').exists() assert (dspath / 'sourcedata/sub-phantom1sid1/ses-localizer/' 'anat/sub-phantom1sid1_ses-localizer_scout.dicom.tgz').exists() # Let's do some basic checks on produced files j = load_json(sespath / 'fmap/sub-phantom1sid1_ses-localizer_acq-3mm_phasediff.json') # We store HeuDiConv version in each produced .json file # TODO: test that we are not somehow overwritting that version in existing # files which we have not produced in a particular run. assert j[HEUDICONV_VERSION_JSON_KEY] == __version__
def test_cache(tmpdir): tmppath = tmpdir.strpath args = ("-f convertall --files %s/axasc35.dcm -s S01" % (TESTS_DATA_PATH)).split(' ') + ['-o', tmppath] runner(args) cachedir = (tmpdir / '.heudiconv' / 'S01' / 'info') assert cachedir.exists() # check individual files assert (cachedir / 'heuristic.py').exists() assert (cachedir / 'filegroup.json').exists() assert (cachedir / 'dicominfo.tsv').exists() assert (cachedir / 'S01.auto.txt').exists() assert (cachedir / 'S01.edit.txt').exists() # check dicominfo has "time" as last column: with open(str(cachedir / 'dicominfo.tsv'), 'r') as f: cols = f.readline().split() assert cols[26] == "time"
def test_conversion(tmpdir, subject, heuristic): tmpdir.chdir() try: datadir = fetch_data(tmpdir.strpath, subject) except IncompleteResultsError as exc: pytest.skip("Failed to fetch test data: %s" % str(exc)) outdir = tmpdir.mkdir('out').strpath args = gen_heudiconv_args(datadir, outdir, subject, heuristic) runner(args) # run conversion # verify functionals were converted assert glob('{}/{}/func/*'.format(outdir, subject)) == \ glob('{}/{}/func/*'.format(datadir, subject)) # compare some json metadata json_ = '{}/task-rest_acq-24mm64sl1000tr32te600dyn_bold.json'.format orig, conv = (json.load(open(json_(datadir))), json.load(open(json_(outdir)))) keys = ['EchoTime', 'MagneticFieldStrength', 'Manufacturer', 'SliceTiming'] for key in keys: assert orig[key] == conv[key]
def test_grouping(tmpdir, subject): dicoms = [ op.join(TESTS_DATA_PATH, fl) for fl in ['axasc35.dcm', 'phantom.dcm'] ] # ensure DICOMs are different studies studyuids = { dcm.read_file(fl, stop_before_pixels=True).StudyInstanceUID for fl in dicoms } assert len(studyuids) == len(dicoms) # symlink to common location outdir = tmpdir.mkdir('out') datadir = tmpdir.mkdir(subject) for fl in dicoms: os.symlink(fl, (datadir / op.basename(fl)).strpath) template = op.join("{subject}/*.dcm") hargs = gen_heudiconv_args( tmpdir.strpath, outdir.strpath, subject, 'convertall.py', template=template ) with pytest.raises(AssertionError): runner(hargs) # group all found DICOMs under subject, despite conflicts hargs += ["-g", "all"] runner(hargs) assert len([fl for fl in outdir.visit(fil='run0*')]) == 4 tsv = (outdir / 'participants.tsv') assert tsv.check() lines = tsv.open().readlines() assert len(lines) == 2 assert lines[1].split('\t')[0] == 'sub-{}'.format(subject)
def test_dbic_bids_largely_smoke(tmpdir, heuristic, invocation): is_bids = True if heuristic == 'dbic_bids' else False arg = "-f heuristics/%s.py -c dcm2niix -o %s" % (heuristic, tmpdir) if is_bids: arg += " -b" arg += " --datalad " args = ( arg + invocation ).split(' ') if heuristic != 'dbic_bids' and invocation == '--files tests/data': # none other heuristic has mighty infotoids atm with pytest.raises(NotImplementedError): runner(args) return runner(args) ds = Dataset(str(tmpdir)) assert ds.is_installed() assert not ds.repo.dirty head = ds.repo.get_hexsha() # and if we rerun -- should fail if heuristic != 'dbic_bids' and invocation != '--files tests/data': # those guys -- they just plow through it ATM without failing, i.e. # the logic is to reprocess runner(args) # don't raise error by rerun... # else: # with pytest.raises(RuntimeError): # runner(args) # but there should be nothing new assert not ds.repo.dirty assert head == ds.repo.get_hexsha() # unless we pass 'overwrite' flag runner(args + ['--overwrite']) # but result should be exactly the same, so it still should be clean # and at the same commit assert ds.is_installed() assert not ds.repo.dirty assert head == ds.repo.get_hexsha()
def test_ls(stdout): args = "-f heuristics/dbic_bids.py --command ls --files tests/data".split(' ') runner(args) out = stdout.getvalue() assert 'StudySessionInfo(locator=' in out assert 'Halchenko/Yarik/950_bids_test4' in out
def test_smoke_convertall(tmpdir): runner(("-f convertall -c dcm2niix -o %s -b --datalad " "-s fmap_acq-3mm -d %s/{subject}/*" % (tmpdir, TESTS_DATA_PATH)).split(' '))
def test_main_help(stdout): with pytest.raises(SystemExit): runner(['--help']) assert stdout.getvalue().startswith("usage: ")
def test_main_version(std): with pytest.raises(SystemExit): runner(['--version']) assert std.getvalue().rstrip() == __version__
def test_smoke_converall(tmpdir): runner( ("-f heuristics/convertall.py -c dcm2niix -o %s -b --datalad " "-s fmap_acq-3mm -d tests/data/{subject}/*" % tmpdir).split(' ') )