Beispiel #1
0
def test_resolve_path(path):
    if str(Path(path).resolve()) != path:
        raise SkipTest("Test assumptions require non-symlinked parent paths")
    # initially ran into on OSX https://github.com/datalad/datalad/issues/2406
    opath = op.join(path, "origin")
    os.makedirs(opath)
    if not on_windows:
        lpath = op.join(path, "linked")
        os.symlink('origin', lpath)

    ds_global = Dataset(path)
    # path resolution of absolute paths is not influenced by symlinks
    # ignore the linked path on windows, it is not a symlink in the POSIX sense
    for d in (opath, ) if on_windows else (opath, lpath):
        ds_local = Dataset(d)
        # no symlink resolution
        eq_(str(resolve_path(d)), d)
        # list comes out as a list
        eq_(resolve_path([d]), [Path(d)])
        # multiple OK
        eq_(resolve_path([d, d]), [Path(d), Path(d)])

        with chpwd(d):
            # be aware: knows about cwd, but this CWD has symlinks resolved
            eq_(str(resolve_path(d).cwd()), opath)
            # using pathlib's `resolve()` will resolve any
            # symlinks
            # also resolve `opath`, as on old windows systems the path might
            # come in crippled (e.g. C:\Users\MIKE~1/...)
            # and comparison would fails unjustified
            eq_(resolve_path('.').resolve(), ut.Path(opath).resolve())
            # no norming, but absolute paths, without resolving links
            eq_(resolve_path('.'), ut.Path(d))
            eq_(str(resolve_path('.')), d)

            # there is no concept of an "explicit" relative path anymore
            # relative is relative, regardless of the specific syntax
            eq_(resolve_path(op.join(os.curdir, 'bu'), ds=ds_global),
                ds_global.pathobj / 'bu')
            # there is no full normpath-ing or other funky resolution of
            # parent directory back-reference
            eq_(str(resolve_path(op.join(os.pardir, 'bu'), ds=ds_global)),
                op.join(ds_global.path, os.pardir, 'bu'))

        # resolve against a dataset given as a path/str
        # (cmdline input scenario)
        eq_(resolve_path('bu', ds=ds_local.path), Path.cwd() / 'bu')
        eq_(resolve_path('bu', ds=ds_global.path), Path.cwd() / 'bu')
        # resolve against a dataset given as a dataset instance
        # (object method scenario)
        eq_(resolve_path('bu', ds=ds_local), ds_local.pathobj / 'bu')
        eq_(resolve_path('bu', ds=ds_global), ds_global.pathobj / 'bu')
        # not being inside a dataset doesn't change the resolution result
        eq_(resolve_path(op.join(os.curdir, 'bu'), ds=ds_global),
            ds_global.pathobj / 'bu')
        eq_(str(resolve_path(op.join(os.pardir, 'bu'), ds=ds_global)),
            op.join(ds_global.path, os.pardir, 'bu'))
Beispiel #2
0
def _describe_system():
    import platform as pl
    from datalad import get_encoding_info
    from datalad.utils import get_linux_distribution
    try:
        dist = get_linux_distribution()
    except Exception as exc:
        ce = CapturedException(exc)
        lgr.warning("Failed to get distribution information: %s", ce)
        dist = tuple()

    return {
        'type':
        os.name,
        'name':
        pl.system(),
        'release':
        pl.release(),
        'version':
        pl.version(),
        'distribution':
        ' '.join([_t2s(dist),
                  _t2s(pl.mac_ver()),
                  _t2s(pl.win32_ver())]).rstrip(),
        'max_path_length':
        get_max_path_length(getpwd()),
        'encoding':
        get_encoding_info(),
        'filesystem': {
            l: _get_fs_type(l, p)
            for l, p in [('CWD',
                          Path.cwd()), ('TMP', Path(tempfile.gettempdir())
                                        ), ('HOME', Path.home())]
        }
    }
Beispiel #3
0
def _uninstall_dataset(ds, check, has_super, **kwargs):
    cwd = Path.cwd()
    if ds.pathobj == cwd or ds.pathobj in cwd.parents:
        yield get_status_dict(
            status='error',
            ds=ds,
            message='refusing to uninstall a dataset at or above the '
            'current working directory',
            **kwargs)
        return
    if check and ds.is_installed():
        # if the checks are on we need to make sure to exit this function
        # whenever any drop failed, because we cannot rely on the error
        # to actually cause a stop in upstairs code
        bad_things_happened = False
        for r in _drop_files(ds,
                             op.curdir,
                             check=True,
                             noannex_iserror=False,
                             **kwargs):
            yield r
            if r['action'] == 'drop' and \
                    not r.get('status', None) in ('ok', 'notneeded'):
                bad_things_happened = True
        if bad_things_happened:
            # error reporting already happened, we can just stop here
            return

    # TODO: uninstall of a subdataset that has a local URL
    #       (e.g. ./anything) implies cannot be undone, decide how, and
    #       if to check for that
    # TODO check that the relevant branched are pushed to a remote
    if ds.subdatasets(fulfilled=True):
        yield get_status_dict(
            status='error',
            ds=ds,
            message=
            ('to be uninstalled dataset %s has present subdatasets, forgot --recursive?',
             ds),
            **kwargs)
        return
    # Close any possibly associated process etc with underlying repo.
    # Otherwise - rmtree could fail to remove e.g. under NFS which would
    # still have some files opened by them (thus having .nfs00000xxxx
    # files) forbidding rmdir to work in rmtree
    ds.close()
    if ds.is_installed():
        rmtree(ds.path)
    if has_super and not op.exists(ds.path):
        # recreate an empty mountpoint to make Git happier
        os.makedirs(ds.path)
    # invalidate loaded ConfigManager:
    ds._cfg = None
    yield get_status_dict(status='ok', ds=ds, **kwargs)
Beispiel #4
0
def check_create_path_semantics(cwd, create_ds, path_arg, base_path,
                                other_path):
    ds = Dataset(base_path).create()
    os.makedirs(op.join(ds.path, 'some'))
    target_path = ds.pathobj / "some" / "what" / "deeper"
    with chpwd(other_path if cwd == 'elsewhere' else base_path if cwd ==
               'parentds' else str(ds.pathobj / 'some') if cwd ==
               'subdir' else str(Path.cwd())):
        subds = create(
            dataset=ds.path if create_ds == 'abspath' else
            str(ds.pathobj.relative_to(cwd)) if create_ds == 'relpath' else
            ds if create_ds == 'instance' else create_ds,
            path=str(target_path) if path_arg == 'abspath' else
            str(target_path.relative_to(ds.pathobj))
            if path_arg == 'relpath' else op.join('what', 'deeper')
            if path_arg == 'subdir_relpath' else path_arg)
        eq_(subds.pathobj, target_path)
 def _flyweight_preproc_path(cls, path):
     """Custom handling for few special abbreviations for datasets"""
     path_ = path
     if path in ('^', '^.'):
         dsroot = get_dataset_root(curdir)
         if dsroot is None:
             raise NoDatasetFound('No dataset contains path: {}'.format(
                 str(Path.cwd())))
         if path == '^':
             # get the topmost dataset from current location. Note that 'zsh'
             # might have its ideas on what to do with ^, so better use as -d^
             path_ = Dataset(dsroot).get_superdataset(topmost=True).path
         elif path == '^.':
             # the dataset containing current directory
             path_ = dsroot
     elif path == '///':
         # TODO: logic/UI on installing a default dataset could move here
         # from search?
         path_ = cfg.obtain('datalad.locations.default-dataset')
     if path != path_:
         lgr.debug("Resolved dataset alias %r to path %r", path, path_)
     return path_
Beispiel #6
0
    def __call__(*,
                 dataset=None,
                 sensitive=None,
                 sections=None,
                 flavor="full",
                 decor=None,
                 clipboard=None):
        from datalad.distribution.dataset import require_dataset
        from datalad.support.exceptions import NoDatasetFound
        from datalad.interface.results import get_status_dict

        ds = None
        try:
            ds = require_dataset(dataset,
                                 check_installed=False,
                                 purpose='report')
        except NoDatasetFound:
            # failure is already logged
            pass
        if ds and not ds.is_installed():
            # warn that the dataset is bogus
            yield dict(
                action='wtf',
                path=ds.path,
                status='impossible',
                message=('No dataset found at %s. Reporting on the dataset is '
                         'not attempted.', ds.path),
                logger=lgr)
            # we don't deal with absent datasets
            ds = None
        if sensitive:
            if ds is None:
                from datalad import cfg
            else:
                cfg = ds.config
        else:
            cfg = None

        from datalad.ui import ui
        from datalad.support.external_versions import external_versions

        infos = OrderedDict()
        res = get_status_dict(
            action='wtf',
            path=ds.path if ds else ensure_unicode(op.abspath(op.curdir)),
            type='dataset' if ds else 'directory',
            status='ok',
            logger=lgr,
            decor=decor,
            infos=infos,
            flavor=flavor,
        )

        # Define section callables which require variables.
        # so there is no side-effect on module level original
        section_callables = SECTION_CALLABLES.copy()
        section_callables['location'] = partial(_describe_location, res)
        section_callables['configuration'] = \
            partial(_describe_configuration, cfg, sensitive)
        if ds:
            section_callables['dataset'] = \
                partial(_describe_dataset, ds, sensitive)
        else:
            section_callables.pop('dataset')
        assert all(section_callables.values())  # check if none was missed

        asked_for_all_sections = sections is not None and any(
            s == '*' for s in sections)
        if sections is None or asked_for_all_sections:
            if flavor == 'full' or asked_for_all_sections:
                sections = sorted(list(section_callables))
            elif flavor == 'short':
                sections = ['datalad', 'dependencies']
            else:
                raise ValueError(flavor)

        for s in sections:
            try:
                infos[s] = section_callables[s]()
            except KeyError:
                yield get_status_dict(
                    action='wtf',
                    path=Path.cwd(),
                    status='impossible',
                    message=('Requested section <%s> was not found among the '
                             'available infos. Skipping report.', s),
                    logger=lgr)
        if clipboard:
            external_versions.check(
                'pyperclip', msg="It is needed to be able to use clipboard")
            import pyperclip
            report = _render_report(res)
            pyperclip.copy(report)
            ui.message("WTF information of length %s copied to clipboard" %
                       len(report))
        yield res
        return