def test_create_1test_dataset():
    # and just a single dataset
    from datalad.api import create_test_dataset
    with swallow_outputs():
        dss = create_test_dataset()
    eq_(len(dss), 1)
    assert_repo_status(dss[0], annex=False)
def test_create_1test_dataset():
    # and just a single dataset
    from datalad.api import create_test_dataset
    with swallow_outputs():
        dss = create_test_dataset()
    eq_(len(dss), 1)
    ok_clean_git(dss[0], annex=False)
def test_create_1test_dataset():
    # and just a single dataset
    from datalad.api import create_test_dataset
    with swallow_outputs():
        dss = create_test_dataset()
    eq_(len(dss), 1)
    ok_clean_git(dss[0], annex=False)
def test_new_relpath(topdir):
    from datalad.api import create_test_dataset
    with swallow_logs(), chpwd(topdir), swallow_outputs():
        dss = create_test_dataset('testds', spec='1')
    eq_(dss[0], opj(topdir, 'testds'))
    eq_(len(dss), 2)  # 1 top + 1 sub-dataset as demanded
    for ds in dss:
        ok_clean_git(ds, annex=False)
def test_new_relpath(topdir):
    from datalad.api import create_test_dataset
    with swallow_logs(), chpwd(topdir), swallow_outputs():
        dss = create_test_dataset('testds', spec='1')
    eq_(dss[0], opj(topdir, 'testds'))
    eq_(len(dss), 2)  # 1 top + 1 sub-dataset as demanded
    for ds in dss:
        ok_clean_git(ds, annex=False)
def test_create_test_dataset():
    # rudimentary smoke test
    from datalad.api import create_test_dataset
    with swallow_logs(), swallow_outputs():
        dss = create_test_dataset(spec='2/1-2')
    ok_(5 <= len(dss) <= 7)  # at least five - 1 top, two on top level, 1 in each
    for ds in dss:
        ok_clean_git(ds, annex=None)  # some of them are annex but we just don't check
        ok_(len(glob(opj(ds, 'file*'))))
def test_create_test_dataset():
    # rudimentary smoke test
    from datalad.api import create_test_dataset
    with swallow_logs(), swallow_outputs():
        dss = create_test_dataset(spec='2/1-2')
    ok_(5 <= len(dss) <= 7)  # at least five - 1 top, two on top level, 1 in each
    for ds in dss:
        assert_repo_status(ds, annex=None)  # some of them are annex but we just don't check
        ok_(len(glob(opj(ds, 'file*'))))
def test_create_test_dataset():
    # rudimentary smoke test
    from datalad.api import create_test_dataset
    with swallow_logs():
        dss = create_test_dataset(spec='2/1-2')
    ok_(4 <= len(dss) <= 6)  # at least four - two on top level, 1 in each
    for ds in dss:
        ok_clean_git(
            ds, annex=False)  # soem of them are annex but we just don't check
        ok_(len(glob(opj(ds, 'file*'))))
def test_hierarchy(topdir):
    # GH 1178
    from datalad.api import create_test_dataset
    with swallow_logs(), swallow_outputs():
        dss = create_test_dataset(topdir, spec='1/1')

    eq_(len(dss), 3)
    eq_(dss[0], topdir)
    for ids, ds in enumerate(dss):
        ok_clean_git(ds, annex=False)
        # each one should have 2 commits (but the last one)-- one for file and
        # another one for sub-dataset
        repo = GitRepo(ds)
        eq_(len(list(repo.get_branch_commits())), 1 + int(ids<2))
Exemple #10
0
 def setup_cache(self):
     ds_path = create_test_dataset(self.dsname, spec='2/-2/-2', seed=0)[0]
     self.log("Setup cache ds path %s. CWD: %s", ds_path, getpwd())
     # Will store into a tarfile since otherwise install -r is way too slow
     # to be invoked for every benchmark
     # Store full path since apparently setup is not ran in that directory
     self.tarfile = op.realpath(SampleSuperDatasetBenchmarks.tarfile)
     with tarfile.open(self.tarfile, "w") as tar:
         # F.CK -- Python tarfile can't later extract those because key dirs are
         # read-only.  For now just a workaround - make it all writeable
         from datalad.utils import rotree
         rotree(self.dsname, ro=False, chmod_files=False)
         tar.add(self.dsname, recursive=True)
     rmtree(self.dsname)
def test_hierarchy(topdir):
    # GH 1178
    from datalad.api import create_test_dataset
    with swallow_logs(), swallow_outputs():
        dss = create_test_dataset(topdir, spec='1/1')

    eq_(len(dss), 3)
    eq_(dss[0], topdir)
    for ids, ds in enumerate(dss):
        ok_clean_git(ds, annex=False)
        # each one should have 2 commits (but the last one)-- one for file and
        # another one for sub-dataset
        repo = GitRepo(ds)
        eq_(len(list(repo.get_branch_commits())), 1 + int(ids < 2))
Exemple #12
0
    def setup_cache(self):
        # creating in CWD so things get removed when ASV is done
        ds_path = create_test_dataset("testds1", spec='2/-2/-2', seed=0)[0]
        # Will store into a tarfile since otherwise install -r is way too slow
        # to be invoked for every benchmark
        tarfile_path = opj(osp.dirname(ds_path), 'testds1.tar')
        with tarfile.open(tarfile_path, "w") as tar:
            # F.CK -- Python tarfile can't later extract those because key dirs are
            # read-only.  For now just a workaround - make it all writeable
            from datalad.utils import rotree
            rotree('testds1', ro=False, chmod_files=False)
            tar.add('testds1', recursive=True)
        rmtree('testds1')

        return tarfile_path
Exemple #13
0
    def setup_cache(self):
        # creating in CWD so things get removed when ASV is done
        ds_path = create_test_dataset("testds1", spec='2/-2/-2', seed=0)[0]
        # Will store into a tarfile since otherwise install -r is way too slow
        # to be invoked for every benchmark
        tarfile_path = opj(osp.dirname(ds_path), 'testds1.tar')
        with tarfile.open(tarfile_path, "w") as tar:
            # F.CK -- Python tarfile can't later extract those because key dirs are
            # read-only.  For now just a workaround - make it all writeable
            from datalad.utils import rotree
            rotree('testds1', ro=False, chmod_files=False)
            tar.add('testds1', recursive=True)
        rmtree('testds1')

        return tarfile_path
def test_hierarchy(topdir):
    # GH 1178
    from datalad.api import create_test_dataset
    with swallow_logs(), swallow_outputs():
        dss = create_test_dataset(topdir, spec='1/1')

    eq_(len(dss), 3)
    eq_(dss[0], topdir)
    for ids, ds in enumerate(dss):
        assert_repo_status(ds, annex=False)
        # each one should have 2 commits (but the last one)-- one for file and
        # another one for sub-dataset
        repo = repo_from_path(ds)
        if not hasattr(repo,
                       'is_managed_branch') or not repo.is_managed_branch():
            eq_(len(list(repo.get_branch_commits_())), 1 + int(ids < 2))
Exemple #15
0
 def setup_cache(self):
     ds_path = create_test_dataset(
         self.dsname
         , spec='2/-2/-2'
         , seed=0
     )[0]
     self.log("Setup cache ds path %s. CWD: %s", ds_path, getpwd())
     # Will store into a tarfile since otherwise install -r is way too slow
     # to be invoked for every benchmark
     # Store full path since apparently setup is not ran in that directory
     self.tarfile = op.realpath(SampleSuperDatasetBenchmarks.tarfile)
     with tarfile.open(self.tarfile, "w") as tar:
         # F.CK -- Python tarfile can't later extract those because key dirs are
         # read-only.  For now just a workaround - make it all writeable
         from datalad.utils import rotree
         rotree(self.dsname, ro=False, chmod_files=False)
         tar.add(self.dsname, recursive=True)
     rmtree(self.dsname)
Exemple #16
0
 def time_create_test_dataset1(self):
     create_test_dataset(spec='1', seed=0)
def test_create_test_dataset_new_relpath(topdir):
    from datalad.api import create_test_dataset
    with swallow_logs(), chpwd(topdir):
        dss = create_test_dataset('testds', spec='1')
    eq_(len(dss), 1)
Exemple #18
0
 def time_create_test_dataset2x2(self):
     self.remove_paths.extend(create_test_dataset(spec='2/2', seed=0))
Exemple #19
0
 def time_create_test_dataset2x2(self):
     create_test_dataset(spec='2/2', seed=0)
Exemple #20
0
 def time_create_test_dataset1(self):
     create_test_dataset(spec='1', seed=0)
Exemple #21
0
 def time_create_test_dataset2x2(self):
     self.remove_paths.extend(
         create_test_dataset(spec='2/2', seed=0)
     )
Exemple #22
0
 def time_create_test_dataset2x2(self):
     create_test_dataset(spec='2/2', seed=0)