Exemplo n.º 1
0
def test_magic_number():
    # we hard code the magic SHA1 that represents the state of a Git repo
    # prior to the first commit -- used to diff from scratch to a specific
    # commit
    # given the level of dark magic, we better test whether this stays
    # constant across Git versions (it should!)
    out, err = GitRunner().run('git hash-object -t tree /dev/null')
    eq_(out.strip(), PRE_INIT_COMMIT_SHA)
Exemplo n.º 2
0
def test_magic_number():
    # we hard code the magic SHA1 that represents the state of a Git repo
    # prior to the first commit -- used to diff from scratch to a specific
    # commit
    # given the level of dark magic, we better test whether this stays
    # constant across Git versions (it should!)
    out, err = GitRunner().run('git hash-object -t tree /dev/null')
    eq_(out.strip(), PRE_INIT_COMMIT_SHA)
Exemplo n.º 3
0
 def setup(self):
     self.runner = Runner()
     # older versions might not have it
     try:
         from datalad.cmd import GitRunner
         self.git_runner = GitRunner()
     except ImportError:
         pass
Exemplo n.º 4
0
def _get_untracked_content(dspath, report_untracked, paths=None):
    cmd = [
        'git',
        '--work-tree=.',
        'status',
        '--porcelain',
        # file names NULL terminated
        '-z',
        # we never want to touch submodules, they cannot be untracked
        '--ignore-submodules=all',
        # fully untracked dirs as such, the rest as files
        '--untracked={}'.format(report_untracked)
    ]
    try:
        stdout, stderr = GitRunner(cwd=dspath).run(cmd,
                                                   log_stderr=True,
                                                   log_stdout=True,
                                                   log_online=False,
                                                   expect_stderr=False,
                                                   shell=False,
                                                   expect_fail=True)
    except CommandError as e:
        # TODO should we catch any and handle them in here?
        raise e

    if paths:
        paths = [r['path'] for r in paths]
        if len(paths) == 1 and paths[0] == dspath:
            # nothing to filter
            paths = None

    from datalad.utils import assure_unicode

    for line in stdout.split('\0'):
        if not line:
            continue
        line = assure_unicode(line)
        if not line.startswith('?? '):
            # nothing untracked, ignore, task of `diff`
            continue
        apath = opj(
            dspath,
            # strip state marker
            line[3:])
        norm_apath = normpath(apath)
        if paths and not any(norm_apath == p or path_startswith(apath, p)
                             for p in paths):
            # we got a whitelist for paths, don't report any other
            continue
        ap = dict(path=norm_apath,
                  parentds=dspath,
                  state='untracked',
                  type='directory' if isdir(apath) else 'file')
        yield ap
Exemplo n.º 5
0
def _get_untracked_content(dspath, report_untracked, paths=None):
    cmd = ['git', '--work-tree=.', 'status', '--porcelain',
           # file names NULL terminated
           '-z',
           # we never want to touch submodules, they cannot be untracked
           '--ignore-submodules=all',
           # fully untracked dirs as such, the rest as files
           '--untracked={}'.format(report_untracked)]
    try:
        stdout, stderr = GitRunner(cwd=dspath).run(
            cmd,
            log_stderr=True,
            log_stdout=True,
            log_online=False,
            expect_stderr=False,
            shell=False,
            expect_fail=True)
    except CommandError as e:
        # TODO should we catch any and handle them in here?
        raise e

    if paths:
        paths = [r['path'] for r in paths]
        if len(paths) == 1 and paths[0] == dspath:
            # nothing to filter
            paths = None

    from datalad.utils import assure_unicode

    for line in stdout.split('\0'):
        if not line:
            continue
        line = assure_unicode(line)
        if not line.startswith('?? '):
            # nothing untracked, ignore, task of `diff`
            continue
        apath = opj(
            dspath,
            # strip state marker
            line[3:])
        norm_apath = normpath(apath)
        if paths and not any(norm_apath == p or path_startswith(apath, p)
                             for p in paths):
            # we got a whitelist for paths, don't report any other
            continue
        ap = dict(
            path=norm_apath,
            parentds=dspath,
            state='untracked',
            type='directory' if isdir(apath) else 'file')
        yield ap
Exemplo n.º 6
0
 def setup(self):
     self.runner = Runner()
     # older versions might not have it
     try:
         from datalad.cmd import GitRunner
         self.git_runner = GitRunner()
     except ImportError:
         pass
Exemplo n.º 7
0
    def __init__(self, dataset=None, dataset_only=False, overrides=None):
        # store in a simple dict
        # no subclassing, because we want to be largely read-only, and implement
        # config writing separately
        self._store = {}
        self._cfgfiles = set()
        self._cfgmtimes = None
        # public dict to store variables that always override any setting
        # read from a file
        # `hasattr()` is needed because `datalad.cfg` is generated upon first module
        # import, hence when this code runs first, there cannot be any config manager
        # to inherit from
        self.overrides = datalad.cfg.overrides.copy() if hasattr(
            datalad, 'cfg') else {}
        if overrides is not None:
            self.overrides.update(overrides)
        if dataset is None:
            self._dataset_path = None
            self._dataset_cfgfname = None
            self._repo_cfgfname = None
        else:
            self._dataset_path = dataset.path
            self._dataset_cfgfname = opj(self._dataset_path, '.datalad',
                                         'config')
            if not dataset_only:
                self._repo_cfgfname = opj(self._dataset_path, '.git', 'config')
        self._dataset_only = dataset_only
        # Since configs could contain sensitive information, to prevent
        # any "facilitated" leakage -- just disable logging of outputs for
        # this runner
        run_kwargs = dict(log_outputs=False)
        if dataset is not None:
            # make sure we run the git config calls in the dataset
            # to pick up the right config files
            run_kwargs['cwd'] = dataset.path
        self._runner = GitRunner(**run_kwargs)
        try:
            self._gitconfig_has_showorgin = \
                LooseVersion(get_git_version(self._runner)) >= '2.8.0'
        except:
            # no git something else broken, assume git is present anyway
            # to not delay this, but assume it is old
            self._gitconfig_has_showorgin = False

        self.reload(force=True)
Exemplo n.º 8
0
class runner(SuprocBenchmarks):
    """Some rudimentary tests to see if there is no major slowdowns from Runner
    """
    def setup(self):
        from datalad.cmd import Runner
        self.runner = Runner()
        # older versions might not have it
        try:
            from datalad.cmd import GitRunner
            self.git_runner = GitRunner()
        except ImportError:
            pass

    def time_echo(self):
        self.runner.run("echo")

    def time_echo_gitrunner(self):
        self.git_runner.run("echo")
Exemplo n.º 9
0
def _parse_git_submodules(dspath, recursive):
    """All known ones with some properties"""
    if not exists(opj(dspath, ".gitmodules")):
        # easy way out. if there is no .gitmodules file
        # we cannot have (functional) subdatasets
        return

    # this will not work in direct mode, need better way #1422
    cmd = ['git', '--work-tree=.', 'submodule', 'status']
    if recursive:
        cmd.append('--recursive')

    # need to go rogue  and cannot use proper helper in GitRepo
    # as they also pull in all of GitPython's magic
    try:
        stdout, stderr = GitRunner(cwd=dspath).run(
            cmd,
            log_stderr=True,
            log_stdout=True,
            # not sure why exactly, but log_online has to be false!
            log_online=False,
            expect_stderr=False,
            shell=False,
            # we don't want it to scream on stdout
            expect_fail=True)
    except CommandError as e:
        raise InvalidGitRepositoryError(exc_str(e))

    for line in stdout.split('\n'):
        if not line:
            continue
        sm = {}
        sm['state'] = status_map[line[0]]
        props = submodule_full_props.match(line[1:])
        if props:
            sm['revision'] = props.group(1)
            sm['path'] = opj(dspath, props.group(2))
            sm['revision_descr'] = props.group(3)
        else:
            props = submodule_nodescribe_props.match(line[1:])
            sm['revision'] = props.group(1)
            sm['path'] = opj(dspath, props.group(2))
        yield sm
Exemplo n.º 10
0
class RunnerSuite(SuprocBenchmarks):
    """Some rudimentary tests to see if there is no major slowdowns from Runner
    """

    def setup(self):
        from datalad.cmd import Runner
        self.runner = Runner()
        # older versions might not have it
        try:
            from datalad.cmd import GitRunner
            self.git_runner = GitRunner()
        except ImportError:
            pass

    def time_echo(self):
        self.runner.run("echo")

    def time_echo_gitrunner(self):
        self.git_runner.run("echo")
Exemplo n.º 11
0
def test_publish_no_fetch_refspec_configured(path):
    from datalad.cmd import GitRunner

    path = Path(path)
    GitRunner(cwd=str(path)).run(["git", "init", "--bare", "empty-remote"])
    ds = Dataset(path / "ds").create()
    ds.repo.add_remote("origin", str(ds.pathobj.parent / "empty-remote"))
    # Mimic a situation that can happen with an LFS remote. See gh-4199.
    ds.repo.config.unset("remote.origin.fetch", where="local")
    (ds.repo.pathobj / "foo").write_text("a")
    ds.save()
    ds.publish(to="origin")
Exemplo n.º 12
0
Arquivo: wtf.py Projeto: hanke/datalad
def _describe_annex():
    from datalad.cmd import GitRunner

    runner = GitRunner()
    try:
        out, err = runner.run(['git', 'annex', 'version'])
    except CommandError as e:
        return dict(
            version='not available',
            message=exc_str(e),
        )
    info = {}
    for line in out.split(os.linesep):
        key = line.split(':')[0]
        if not key:
            continue
        value = line[len(key) + 2:].strip()
        key = key.replace('git-annex ', '')
        if key.endswith('s'):
            value = value.split()
        info[key] = value
    return info
Exemplo n.º 13
0
Arquivo: wtf.py Projeto: vsoch/datalad
def _describe_annex():
    from datalad.cmd import GitRunner

    runner = GitRunner()
    try:
        out, err = runner.run(['git', 'annex', 'version'])
    except CommandError as e:
        return dict(
            version='not available',
            message=exc_str(e),
        )
    info = {}
    for line in out.split(os.linesep):
        key = line.split(':')[0]
        if not key:
            continue
        value = line[len(key) + 2:].strip()
        key = key.replace('git-annex ', '')
        if key.endswith('s'):
            value = value.split()
        info[key] = value
    return info
Exemplo n.º 14
0
def _parse_git_submodules(dspath):
    """All known ones with some properties"""
    if not exists(opj(dspath, ".gitmodules")):
        # easy way out. if there is no .gitmodules file
        # we cannot have (functional) subdatasets
        return

    # this will not work in direct mode, need better way #1422
    cmd = ['git', 'ls-files', '--stage', '-z']

    # need to go rogue  and cannot use proper helper in GitRepo
    # as they also pull in all of GitPython's magic
    try:
        stdout, stderr = GitRunner(cwd=dspath).run(
            cmd,
            log_stderr=True,
            log_stdout=True,
            # not sure why exactly, but log_online has to be false!
            log_online=False,
            expect_stderr=False,
            shell=False,
            # we don't want it to scream on stdout
            expect_fail=True)
    except CommandError as e:
        raise InvalidGitRepositoryError(exc_str(e))

    for line in stdout.split('\0'):
        if not line or not line.startswith('160000'):
            continue
        sm = {}
        props = submodule_full_props.match(line)
        sm['revision'] = props.group(2)
        subpath = _path_(dspath, props.group(4))
        sm['path'] = subpath
        if not exists(subpath) or not GitRepo.is_valid_repo(subpath):
            sm['state'] = 'absent'
        yield sm
Exemplo n.º 15
0
def _parse_git_submodules(dspath):
    """All known ones with some properties"""
    if not exists(opj(dspath, ".gitmodules")):
        # easy way out. if there is no .gitmodules file
        # we cannot have (functional) subdatasets
        return

    # this will not work in direct mode, need better way #1422
    cmd = ['git', 'ls-files', '--stage', '-z']

    # need to go rogue  and cannot use proper helper in GitRepo
    # as they also pull in all of GitPython's magic
    try:
        stdout, stderr = GitRunner(cwd=dspath).run(
            cmd,
            log_stderr=True,
            log_stdout=True,
            # not sure why exactly, but log_online has to be false!
            log_online=False,
            expect_stderr=False,
            shell=False,
            # we don't want it to scream on stdout
            expect_fail=True)
    except CommandError as e:
        raise InvalidGitRepositoryError(exc_str(e))

    for line in stdout.split('\0'):
        if not line or not line.startswith('160000'):
            continue
        sm = {}
        props = submodule_full_props.match(line)
        sm['revision'] = props.group(2)
        subpath = _path_(dspath, props.group(4))
        sm['path'] = subpath
        if not exists(subpath) or not GitRepo.is_valid_repo(subpath):
            sm['state'] = 'absent'
        yield sm
Exemplo n.º 16
0
def test_gitannex(osf_id, dspath):
    from datalad.cmd import (GitRunner, WitlessRunner)
    dspath = Path(dspath)

    ds = Dataset(dspath).create()

    # add remote parameters here
    init_remote_opts = ["project={}".format(osf_id)]

    # add special remote
    init_opts = common_init_opts + init_remote_opts
    ds.repo.init_remote('osfproject', options=init_opts)

    # run git-annex-testremote
    # note, that we don't want to capture output. If something goes wrong we
    # want to see it in test build's output log.
    WitlessRunner(cwd=dspath, env=GitRunner.get_git_environ_adjusted()).run(
        ['git', 'annex', 'testremote', 'osfproject', "--fast"])
Exemplo n.º 17
0
    def __init__(self, dataset=None, dataset_only=False, overrides=None):
        # store in a simple dict
        # no subclassing, because we want to be largely read-only, and implement
        # config writing separately
        self._store = {}
        self._cfgfiles = set()
        self._cfgmtimes = None
        # public dict to store variables that always override any setting
        # read from a file
        # `hasattr()` is needed because `datalad.cfg` is generated upon first module
        # import, hence when this code runs first, there cannot be any config manager
        # to inherit from
        self.overrides = datalad.cfg.overrides.copy() if hasattr(datalad, 'cfg') else {}
        if overrides is not None:
            self.overrides.update(overrides)
        if dataset is None:
            self._dataset_path = None
            self._dataset_cfgfname = None
            self._repo_cfgfname = None
        else:
            self._dataset_path = dataset.path
            self._dataset_cfgfname = opj(self._dataset_path, DATASET_CONFIG_FILE)
            if not dataset_only:
                self._repo_cfgfname = opj(self._dataset_path, '.git', 'config')
        self._dataset_only = dataset_only
        # Since configs could contain sensitive information, to prevent
        # any "facilitated" leakage -- just disable logging of outputs for
        # this runner
        run_kwargs = dict(log_outputs=False)
        if dataset is not None:
            # make sure we run the git config calls in the dataset
            # to pick up the right config files
            run_kwargs['cwd'] = dataset.path
        self._runner = GitRunner(**run_kwargs)
        try:
            self._gitconfig_has_showorgin = \
                LooseVersion(get_git_version(self._runner)) >= '2.8.0'
        except:
            # no git something else broken, assume git is present anyway
            # to not delay this, but assume it is old
            self._gitconfig_has_showorgin = False

        self.reload(force=True)
Exemplo n.º 18
0
class ConfigManager(object):
    """Thin wrapper around `git-config` with support for a dataset configuration.

    The general idea is to have an object that is primarily used to read/query
    configuration option.  Upon creation, current configuration is read via one
    (or max two, in the case of the presence of dataset-specific configuration)
    calls to `git config`.  If this class is initialized with a Dataset
    instance, it supports reading and writing configuration from
    ``.datalad/config`` inside a dataset too. This file is committed to Git and
    hence useful to ship certain configuration items with a dataset.

    The API aims to provide the most significant read-access API of a
    dictionary, the Python ConfigParser, and GitPython's config parser
    implementations.

    This class is presently not capable of efficiently writing multiple
    configurations items at once.  Instead, each modification results in a
    dedicated call to `git config`. This author thinks this is OK, as he
    cannot think of a situation where a large number of items need to be
    written during normal operation. If such need arises, various solutions are
    possible (via GitPython, or an independent writer).

    Each instance carries a public `overrides` attribute. This dictionary
    contains variables that override any setting read from a file. The overrides
    are persistent across reloads, and are not modified by any of the
    manipulation methods, such as `set` or `unset`.

    Any DATALAD_* environment variable is also presented as a configuration
    item. Settings read from environment variables are not stored in any of the
    configuration file, but are read dynamically from the environment at each
    `reload()` call. Their values take precedence over any specification in
    configuration files, and even overrides.

    Parameters
    ----------
    dataset : Dataset, optional
      If provided, all `git config` calls are executed in this dataset's
      directory. Moreover, any modifications are, by default, directed to
      this dataset's configuration file (which will be created on demand)
    dataset_only : bool
      If True, configuration items are only read from a datasets persistent
      configuration file, if any present (the one in ``.datalad/config``, not
      ``.git/config``).
    overrides : dict, optional
      Variable overrides, see general class documentation for details.
    """
    def __init__(self, dataset=None, dataset_only=False, overrides=None):
        # store in a simple dict
        # no subclassing, because we want to be largely read-only, and implement
        # config writing separately
        self._store = {}
        self._cfgfiles = set()
        self._cfgmtimes = None
        # public dict to store variables that always override any setting
        # read from a file
        # `hasattr()` is needed because `datalad.cfg` is generated upon first module
        # import, hence when this code runs first, there cannot be any config manager
        # to inherit from
        self.overrides = datalad.cfg.overrides.copy() if hasattr(
            datalad, 'cfg') else {}
        if overrides is not None:
            self.overrides.update(overrides)
        if dataset is None:
            self._dataset_path = None
            self._dataset_cfgfname = None
            self._repo_cfgfname = None
        else:
            self._dataset_path = dataset.path
            self._dataset_cfgfname = opj(self._dataset_path, '.datalad',
                                         'config')
            if not dataset_only:
                self._repo_cfgfname = opj(self._dataset_path, '.git', 'config')
        self._dataset_only = dataset_only
        # Since configs could contain sensitive information, to prevent
        # any "facilitated" leakage -- just disable logging of outputs for
        # this runner
        run_kwargs = dict(log_outputs=False)
        if dataset is not None:
            # make sure we run the git config calls in the dataset
            # to pick up the right config files
            run_kwargs['cwd'] = dataset.path
        self._runner = GitRunner(**run_kwargs)
        try:
            self._gitconfig_has_showorgin = \
                LooseVersion(get_git_version(self._runner)) >= '2.8.0'
        except:
            # no git something else broken, assume git is present anyway
            # to not delay this, but assume it is old
            self._gitconfig_has_showorgin = False

        self.reload(force=True)

    def reload(self, force=False):
        """Reload all configuration items from the configured sources

        If `force` is False, all files configuration was previously read from
        are checked for differences in the modification times. If no difference
        is found for any file no reload is performed. This mechanism will not
        detect newly created global configuration files, use `force` in this case.
        """
        if not force and self._cfgmtimes:
            # we aren't forcing and we have read files before
            # check if any file we read from has changed
            current_time = time()
            curmtimes = {c: getmtime(c) for c in self._cfgfiles if exists(c)}
            if all(curmtimes[c] == self._cfgmtimes.get(c) and
                   # protect against low-res mtimes (FAT32 has 2s, EXT3 has 1s!)
                   # if mtime age is less than worst resolution assume modified
                   (current_time - curmtimes[c]) > 2.0 for c in curmtimes):
                # all the same, nothing to do, except for
                # superimpose overrides, could have changed in the meantime
                self._store.update(self.overrides)
                # reread env, is quick
                self._store = _parse_env(self._store)
                return

        self._store = {}
        # 2-step strategy:
        #   - load datalad dataset config from dataset
        #   - load git config from all supported by git sources
        # in doing so we always stay compatible with where Git gets its
        # config from, but also allow to override persistent information
        # from dataset locally or globally
        run_args = ['-z', '-l']
        if self._gitconfig_has_showorgin:
            run_args.append('--show-origin')

        if self._dataset_cfgfname:
            if exists(self._dataset_cfgfname):
                stdout, stderr = self._run(run_args +
                                           ['--file', self._dataset_cfgfname],
                                           log_stderr=True)
                # overwrite existing value, do not amend to get multi-line
                # values
                self._store, self._cfgfiles = _parse_gitconfig_dump(
                    stdout, self._store, self._cfgfiles, replace=False)

        if self._dataset_only:
            # superimpose overrides
            self._store.update(self.overrides)
            return

        stdout, stderr = self._run(run_args, log_stderr=True)
        self._store, self._cfgfiles = _parse_gitconfig_dump(stdout,
                                                            self._store,
                                                            self._cfgfiles,
                                                            replace=True)

        # always monitor the dataset cfg location, we know where it is in all cases
        if self._dataset_cfgfname:
            self._cfgfiles.add(self._dataset_cfgfname)
            self._cfgfiles.add(self._repo_cfgfname)
        self._cfgmtimes = {c: getmtime(c) for c in self._cfgfiles if exists(c)}

        # superimpose overrides
        self._store.update(self.overrides)

        # override with environment variables
        self._store = _parse_env(self._store)

    @_where_reload
    def obtain(self,
               var,
               default=None,
               dialog_type=None,
               valtype=None,
               store=False,
               where=None,
               reload=True,
               **kwargs):
        """
        Convenience method to obtain settings interactively, if needed

        A UI will be used to ask for user input in interactive sessions.
        Questions to ask, and additional explanations can be passed directly
        as arguments, or retrieved from a list of pre-configured items.

        Additionally, this method allows for type conversion and storage
        of obtained settings. Both aspects can also be pre-configured.

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them,
          e.g. 'core.editor'
        default : any type
          In interactive sessions and if `store` is True, this default value
          will be presented to the user for confirmation (or modification).
          In all other cases, this value will be silently assigned unless
          there is an existing configuration setting.
        dialog_type : {'question', 'yesno', None}
          Which dialog type to use in interactive sessions. If `None`,
          pre-configured UI options are used.
        store : bool
          Whether to store the obtained value (or default)
        %s
        `**kwargs`
          Additional arguments for the UI function call, such as a question
          `text`.
        """
        # do local import, as this module is import prominently and the
        # could theroetically import all kind of weired things for type
        # conversion
        from datalad.interface.common_cfg import definitions as cfg_defs
        # fetch what we know about this variable
        cdef = cfg_defs.get(var, {})
        # type conversion setup
        if valtype is None and 'type' in cdef:
            valtype = cdef['type']
        if valtype is None:
            valtype = lambda x: x

        # any default?
        if default is None and 'default' in cdef:
            default = cdef['default']

        _value = None
        if var in self:
            # nothing needs to be obtained, it is all here already
            _value = self[var]
        elif store is False and default is not None:
            # nothing will be stored, and we have a default -> no user confirmation
            # we cannot use logging, because we want to use the config to confiugre
            # the logging
            #lgr.debug('using default {} for config setting {}'.format(default, var))
            _value = default

        if _value is not None:
            # we got everything we need and can exit early
            try:
                return valtype(_value)
            except Exception as e:
                raise ValueError(
                    "value '{}' of existing configuration for '{}' cannot be "
                    "converted to the desired type '{}' ({})".format(
                        _value, var, valtype, exc_str(e)))

        # now we need to try to obtain something from the user
        from datalad.ui import ui

        # configure UI
        dialog_opts = kwargs
        if dialog_type is None:  # no override
            # check for common knowledge on how to obtain a value
            if 'ui' in cdef:
                dialog_type = cdef['ui'][0]
                # pull standard dialog settings
                dialog_opts = cdef['ui'][1]
                # update with input
                dialog_opts.update(kwargs)

        if (not ui.is_interactive or dialog_type is None) and default is None:
            raise RuntimeError(
                "cannot obtain value for configuration item '{}', "
                "not preconfigured, no default, no UI available".format(var))

        if not hasattr(ui, dialog_type):
            raise ValueError(
                "UI '{}' does not support dialog type '{}'".format(
                    ui, dialog_type))

        # configure storage destination, if needed
        if store:
            if where is None and 'destination' in cdef:
                where = cdef['destination']
            if where is None:
                raise ValueError(
                    "request to store configuration item '{}', but no "
                    "storage destination specified".format(var))

        # obtain via UI
        dialog = getattr(ui, dialog_type)
        _value = dialog(default=default, **dialog_opts)

        if _value is None:
            # we got nothing
            if default is None:
                raise RuntimeError(
                    "could not obtain value for configuration item '{}', "
                    "not preconfigured, no default".format(var))
            # XXX maybe we should return default here, even it was returned
            # from the UI -- if that is even possible

        # execute type conversion before storing to check that we got
        # something that looks like what we want
        try:
            value = valtype(_value)
        except Exception as e:
            raise ValueError(
                "cannot convert user input `{}` to desired type ({})".format(
                    _value, exc_str(e)))
            # XXX we could consider "looping" until we have a value of proper
            # type in case of a user typo...

        if store:
            # store value as it was before any conversion, needs to be str
            # anyway
            # needs string conversion nevertheless, because default could come
            # in as something else
            self.add(var, '{}'.format(_value), where=where, reload=reload)
        return value

    #
    # Compatibility with dict API
    #
    def __len__(self):
        return len(self._store)

    def __getitem__(self, key):
        return self._store.__getitem__(key)

    def __contains__(self, key):
        return self._store.__contains__(key)

    def keys(self):
        """Returns list of configuration item names"""
        return self._store.keys()

    # XXX should this be *args?
    def get(self, key, default=None):
        """D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None."""
        return self._store.get(key, default)

    #
    # Compatibility with ConfigParser API
    #
    def sections(self):
        """Returns a list of the sections available"""
        return list(
            set([cfg_section_regex.match(k).group(1) for k in self._store]))

    def options(self, section):
        """Returns a list of options available in the specified section."""
        opts = []
        for k in self._store:
            sec, opt = cfg_sectionoption_regex.match(k).groups()
            if sec == section:
                opts.append(opt)
        return opts

    def has_section(self, section):
        """Indicates whether a section is present in the configuration"""
        for k in self._store:
            if k.startswith(section):
                return True
        return False

    def has_option(self, section, option):
        """If the given section exists, and contains the given option"""
        for k in self._store:
            sec, opt = cfg_sectionoption_regex.match(k).groups()
            if sec == section and opt == option:
                return True
        return False

    def getint(self, section, option):
        """A convenience method which coerces the option value to an integer"""
        return int(self.get_value(section, option))

    def getbool(self, section, option, default=None):
        """A convenience method which coerces the option value to a bool

        Values "on", "yes", "true" and any int!=0 are considered True
        Values which evaluate to bool False, "off", "no", "false" are considered
        False
        TypeError is raised for other values.
        """
        val = self.get_value(section, option, default=default)
        return anything2bool(val)

    def getfloat(self, section, option):
        """A convenience method which coerces the option value to a float"""
        return float(self.get_value(section, option))

    # this is a hybrid of ConfigParser and dict API
    def items(self, section=None):
        """Return a list of (name, value) pairs for each option

        Optionally limited to a given section.
        """
        if section is None:
            return self._store.items()
        return [(k, v) for k, v in self._store.items()
                if cfg_section_regex.match(k).group(1) == section]

    #
    # Compatibility with GitPython's ConfigParser
    #
    def get_value(self, section, option, default=None):
        """Like `get()`, but with an optional default value

        If the default is not None, the given default value will be returned in
        case the option did not exist. This behavior imitates GitPython's
        config parser.
        """
        try:
            return self['.'.join((section, option))]
        except KeyError as e:
            # this strange dance is needed because gitpython does it this way
            if default is not None:
                return default
            else:
                raise e

    #
    # Modify configuration (proxy respective git-config call)
    #
    @_where_reload
    def _run(self, args, where=None, reload=False, **kwargs):
        """Centralized helper to run "git config" calls

        Parameters
        ----------
        args : list
          Arguments to pass for git config
        %s
        **kwargs
          Keywords arguments for Runner's call
        """
        if where:
            args = self._get_location_args(where) + args
        out = self._runner.run(['git', 'config'] + args, **kwargs)
        if reload:
            self.reload()
        return out

    def _get_location_args(self, where, args=None):
        if args is None:
            args = []
        cfg_labels = ('dataset', 'local', 'global')
        if where not in cfg_labels:
            raise ValueError(
                "unknown configuration label '{}' (not in {})".format(
                    where, cfg_labels))
        if where == 'dataset':
            if not self._dataset_cfgfname:
                raise ValueError(
                    'ConfigManager cannot store to configuration to dataset, '
                    'none specified')
            # create an empty config file if none exists, `git config` will
            # fail otherwise
            dscfg_dirname = opj(self._dataset_path, '.datalad')
            if not exists(dscfg_dirname):
                os.makedirs(dscfg_dirname)
            if not exists(self._dataset_cfgfname):
                open(self._dataset_cfgfname, 'w').close()
            args.extend(['--file', self._dataset_cfgfname])
        elif where == 'global':
            args.append('--global')
        elif where == 'local':
            args.append('--local')
        return args

    @_where_reload
    def add(self, var, value, where='dataset', reload=True):
        """Add a configuration variable and value

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them, e.g.
          'core.editor'
        value : str
          Variable value
        %s"""
        self._run(['--add', var, value],
                  where=where,
                  reload=reload,
                  log_stderr=True)

    @_where_reload
    def set(self, var, value, where='dataset', reload=True, force=False):
        """Set a variable to a value.

        In opposition to `add`, this replaces the value of `var` if there is
        one already.

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them, e.g.
          'core.editor'
        value : str
          Variable value
        force: bool
          if set, replaces all occurrences of `var` by a single one with the
          given `value`. Otherwise raise if multiple entries for `var` exist
          already
        %s"""
        from datalad.support.gitrepo import to_options

        self._run(to_options(replace_all=force) + [var, value],
                  where=where,
                  reload=reload,
                  log_stderr=True)

    @_where_reload
    def rename_section(self, old, new, where='dataset', reload=True):
        """Rename a configuration section

        Parameters
        ----------
        old : str
          Name of the section to rename.
        new : str
          Name of the section to rename to.
        %s"""
        self._run(['--rename-section', old, new], where=where, reload=reload)

    @_where_reload
    def remove_section(self, sec, where='dataset', reload=True):
        """Rename a configuration section

        Parameters
        ----------
        sec : str
          Name of the section to remove.
        %s"""
        self._run(['--remove-section', sec], where=where, reload=reload)

    @_where_reload
    def unset(self, var, where='dataset', reload=True):
        """Remove all occurrences of a variable

        Parameters
        ----------
        var : str
          Name of the variable to remove
        %s"""
        # use unset all as it is simpler for now
        self._run(['--unset-all', var], where=where, reload=reload)
Exemplo n.º 19
0
        if other is self:
            return 0
        raise TypeError("UNKNOWN version is not comparable")


#
# Custom handlers
#
from datalad.cmd import Runner
from datalad.cmd import GitRunner
from datalad.support.exceptions import (
    MissingExternalDependency,
    OutdatedExternalDependency,
)
_runner = Runner()
_git_runner = GitRunner()


def _get_annex_version():
    """Return version of available git-annex"""
    try:
        return _runner.run('git annex version --raw'.split())[0]
    except CommandError:
        # fall back on method that could work with older installations
        out, err = _runner.run(['git', 'annex', 'version'])
        return out.split('\n')[0].split(':')[1].strip()


def _get_git_version():
    """Return version of git we use (might be bundled)"""
    return __get_git_version(_git_runner)
Exemplo n.º 20
0
    # this will fix the rendering of ANSI escape sequences
    # for colored terminal output on windows
    # it will do nothing on any other platform, hence it
    # is safe to call unconditionally
    import colorama
    colorama.init()
    atexit.register(colorama.deinit)
except ImportError as e:
    pass

# Other imports are interspersed with lgr.debug to ease troubleshooting startup
# delays etc.

# If there is a bundled git, make sure GitPython uses it too:
from datalad.cmd import GitRunner
GitRunner._check_git_path()
if GitRunner._GIT_PATH:
    import os
    os.environ['GIT_PYTHON_GIT_EXECUTABLE'] = \
        os.path.join(GitRunner._GIT_PATH, 'git')

from .config import ConfigManager
cfg = ConfigManager()

from .log import lgr
from datalad.utils import get_encoding_info, get_envvars_info, getpwd

# To analyze/initiate our decision making on what current directory to return
getpwd()

lgr.log(5, "Instantiating ssh manager")
Exemplo n.º 21
0
class runner(SuprocBenchmarks):
    """Some rudimentary tests to see if there is no major slowdowns from Runner
    """
    def setup(self):
        self.runner = Runner()
        # older versions might not have it
        try:
            from datalad.cmd import GitRunner
            self.git_runner = GitRunner()
        except ImportError:
            pass

    def time_echo(self):
        self.runner.run("echo")

    def time_echo_gitrunner(self):
        self.git_runner.run("echo")

    # Following "track" measures computing overhead comparing to the simplest
    # os.system call on the same command without carrying for in/out

    unit = "% overhead"

    def _get_overhead(self, cmd, nrepeats=3, **run_kwargs):
        """Estimate overhead over running command via the simplest os.system
        and to not care about any output
        """
        # asv does not repeat tracking ones I think, so nrepeats
        overheads = []
        for _ in range(nrepeats):
            t0 = time()
            os.system(cmd + " >/dev/null 2>&1")
            t1 = time()
            self.runner.run(cmd, **run_kwargs)
            t2 = time()
            overhead = 100 * ((t2 - t1) / (t1 - t0) - 1.0)
            # print("O :", t1 - t0, t2 - t0, overhead)
            overheads.append(overhead)
        overhead = round(sum(overheads) / len(overheads), 2)
        #overhead = round(min(overheads), 2)
        return overhead

    def track_overhead_echo(self):
        return self._get_overhead("echo")

    # 100ms chosen below as providing some sensible stability for me.
    # at 10ms -- too much variability
    def track_overhead_100ms(self):
        return self._get_overhead("sleep 0.1")

    def track_overhead_heavyout(self):
        # run busyloop for 100ms outputing as much as it could
        return self._get_overhead(heavyout_cmd)

    def track_overhead_heavyout_online_through(self):
        return self._get_overhead(
            heavyout_cmd,
            log_stderr='offline',  # needed to would get stuck
            log_online=True)

    def track_overhead_heavyout_online_process(self):
        return self._get_overhead(
            heavyout_cmd,
            log_stdout=lambda s: '',
            log_stderr='offline',  # needed to would get stuck
            log_online=True)
Exemplo n.º 22
0
class runner(SuprocBenchmarks):
    """Some rudimentary tests to see if there is no major slowdowns from Runner
    """

    def setup(self):
        self.runner = Runner()
        # older versions might not have it
        try:
            from datalad.cmd import GitRunner
            self.git_runner = GitRunner()
        except ImportError:
            pass

    def time_echo(self):
        self.runner.run("echo")

    def time_echo_gitrunner(self):
        self.git_runner.run("echo")

    # Following "track" measures computing overhead comparing to the simplest
    # os.system call on the same command without carrying for in/out

    unit = "% overhead"

    def _get_overhead(self, cmd, nrepeats=3, **run_kwargs):
        """Estimate overhead over running command via the simplest os.system
        and to not care about any output
        """
        # asv does not repeat tracking ones I think, so nrepeats
        overheads = []
        for _ in range(nrepeats):
            t0 = time()
            os.system(cmd + " >/dev/null 2>&1")
            t1 = time()
            self.runner.run(cmd, **run_kwargs)
            t2 = time()
            overhead = 100 * ((t2 - t1) / (t1 - t0) - 1.0)
            # print("O :", t1 - t0, t2 - t0, overhead)
            overheads.append(overhead)
        overhead = round(sum(overheads) / len(overheads), 2)
        #overhead = round(min(overheads), 2)
        return overhead

    def track_overhead_echo(self):
        return self._get_overhead("echo")

    # 100ms chosen below as providing some sensible stability for me.
    # at 10ms -- too much variability
    def track_overhead_100ms(self):
        return self._get_overhead("sleep 0.1")

    def track_overhead_heavyout(self):
        # run busyloop for 100ms outputing as much as it could
        return self._get_overhead(heavyout_cmd)

    def track_overhead_heavyout_online_through(self):
        return self._get_overhead(heavyout_cmd,
                                  log_stderr='offline',  # needed to would get stuck
                                  log_online=True)

    def track_overhead_heavyout_online_process(self):
        return self._get_overhead(heavyout_cmd,
                                  log_stdout=lambda s: '',
                                  log_stderr='offline',  # needed to would get stuck
                                  log_online=True)

    # # Probably not really interesting, and good lord wobbles around 0
    # def track_overhead_heavyout_offline(self):
    #     return self._get_overhead(heavyout_cmd,
    #                               log_stdout='offline',
    #                               log_stderr='offline')

    # TODO: track the one with in/out, i.e. for those BatchedProcesses
Exemplo n.º 23
0
def _parse_git_diff(dspath, diff_thingie=None, paths=None,
                    ignore_submodules='none', staged=False):
    # use '--work-tree=.' to get direct omde to cooperate
    cmd = ['git', '--work-tree=.', 'diff', '--raw',
           # file names NULL terminated
           '-z',
           # how to treat submodules (see git diff docs)
           '--ignore-submodules={}'.format(ignore_submodules),
           # never abbreviate sha sums
           '--abbrev=40']
    if staged:
        cmd.append('--staged')
    if diff_thingie:
        cmd.append(diff_thingie)
    if paths:
        cmd.append('--')
        cmd.extend(ap['path'] for ap in paths if ap.get('raw_input', False))

    try:
        stdout, stderr = GitRunner(cwd=dspath).run(
            cmd,
            log_stderr=True,
            log_stdout=True,
            log_online=False,
            expect_stderr=False,
            shell=False,
            expect_fail=True)
    except CommandError as e:
        if 'bad revision' in e.stderr:
            yield dict(
                path=dspath,
                type='dataset',
                status='impossible',
                message=e.stderr.strip())
            return
        raise

    ap = None
    for line in stdout.split('\0'):
        if not line:
            continue
        if line.startswith(':'):
            # a new path
            # yield any existing one
            if ap:
                yield ap
                ap = None
            # start new record
            m_src, m_dst, sha_src, sha_dst, status = \
                line[1:].split()
            ap = dict(
                mode_src=int(m_src, base=8),
                mode=int(m_dst, base=8),
                revision_src=sha_src if sha_src != '0' * 40 else None,
                revision=sha_dst if sha_dst != '0' * 40 else None,
                parentds=dspath)
            _translate_status(status, ap)
            _translate_type(ap['mode'], ap, 'type')
            _translate_type(ap['mode_src'], ap, 'type_src')
        else:
            # a filename
            if 'path' in ap:
                ap['path_src'] = ap['path']
            ap['path'] = opj(dspath, line)
    if ap:
        yield ap
Exemplo n.º 24
0
    def __init__(self,
                 dataset=None,
                 dataset_only=False,
                 overrides=None,
                 source='any'):
        if source not in ('any', 'local', 'dataset', 'dataset-local'):
            raise ValueError(
                'Unkown ConfigManager(source=) setting: {}'.format(source))
            # legacy compat
        if dataset_only:
            if source != 'any':
                raise ValueError(
                    'Refuse to combine legacy dataset_only flag, with '
                    'source setting')
            source = 'dataset'
        # store in a simple dict
        # no subclassing, because we want to be largely read-only, and implement
        # config writing separately
        self._store = {}
        self._cfgfiles = set()
        self._cfgmtimes = None
        self._dataset_path = None
        self._dataset_cfgfname = None
        self._repo_cfgfname = None
        self._config_cmd = ['git', 'config']
        # public dict to store variables that always override any setting
        # read from a file
        # `hasattr()` is needed because `datalad.cfg` is generated upon first module
        # import, hence when this code runs first, there cannot be any config manager
        # to inherit from
        self.overrides = datalad.cfg.overrides.copy() if hasattr(
            datalad, 'cfg') else {}
        if overrides is not None:
            self.overrides.update(overrides)
        if dataset is None:
            if source in ('dataset', 'dataset-local'):
                raise ValueError(
                    'ConfigManager configured to read dataset only, '
                    'but no dataset given')
            # The caller didn't specify a repository. Unset the git directory
            # when calling 'git config' to prevent a repository in the current
            # working directory from leaking configuration into the output.
            self._config_cmd = ['git', '--git-dir=', 'config']
        else:
            self._dataset_path = dataset.path
            if source != 'local':
                self._dataset_cfgfname = opj(self._dataset_path,
                                             DATASET_CONFIG_FILE)
            if source != 'dataset':
                self._repo_cfgfname = opj(self._dataset_path, '.git', 'config')
        self._src_mode = source
        # Since configs could contain sensitive information, to prevent
        # any "facilitated" leakage -- just disable logging of outputs for
        # this runner
        run_kwargs = dict(log_outputs=False)
        if dataset is not None:
            # make sure we run the git config calls in the dataset
            # to pick up the right config files
            run_kwargs['cwd'] = dataset.path
        self._runner = GitRunner(**run_kwargs)
        try:
            self._gitconfig_has_showorgin = \
                LooseVersion(get_git_version()) >= '2.8.0'
        except Exception:
            # no git something else broken, assume git is present anyway
            # to not delay this, but assume it is old
            self._gitconfig_has_showorgin = False

        self.reload(force=True)
Exemplo n.º 25
0
def _parse_git_diff(dspath,
                    diff_thingie=None,
                    paths=None,
                    ignore_submodules='none',
                    staged=False):
    # use '--work-tree=.' to get direct omde to cooperate
    cmd = [
        'git',
        '--work-tree=.',
        'diff',
        '--raw',
        # file names NULL terminated
        '-z',
        # how to treat submodules (see git diff docs)
        '--ignore-submodules={}'.format(ignore_submodules),
        # never abbreviate sha sums
        '--abbrev=40'
    ]
    if staged:
        cmd.append('--staged')
    if diff_thingie:
        cmd.append(diff_thingie)
    if paths:
        cmd.append('--')
        cmd.extend(ap['path'] for ap in paths if ap.get('raw_input', False))

    try:
        stdout, stderr = GitRunner(cwd=dspath).run(cmd,
                                                   log_stderr=True,
                                                   log_stdout=True,
                                                   log_online=False,
                                                   expect_stderr=False,
                                                   shell=False,
                                                   expect_fail=True)
    except CommandError as e:
        if 'bad revision' in e.stderr:
            yield dict(path=dspath,
                       type='dataset',
                       status='impossible',
                       message=e.stderr.strip())
            return
        raise e

    ap = None
    for line in stdout.split('\0'):
        if not line:
            continue
        if line.startswith(':'):
            # a new path
            # yield any existing one
            if ap:
                yield ap
                ap = None
            # start new record
            m_src, m_dst, sha_src, sha_dst, status = \
                line[1:].split()
            ap = dict(mode_src=int(m_src, base=8),
                      mode=int(m_dst, base=8),
                      revision_src=sha_src if sha_src != '0' * 40 else None,
                      revision=sha_dst if sha_dst != '0' * 40 else None,
                      parentds=dspath)
            _translate_status(status, ap)
            _translate_type(ap['mode'], ap, 'type')
            _translate_type(ap['mode_src'], ap, 'type_src')
        else:
            # a filename
            if 'path' in ap:
                ap['path_src'] = ap['path']
            ap['path'] = opj(dspath, line)
    if ap:
        yield ap
Exemplo n.º 26
0
def postclonecfg_ria(ds, props):
    """Configure a dataset freshly cloned from a RIA store"""
    repo = ds.repo
    # RIA uses hashdir mixed, copying data to it via git-annex (if cloned via
    # ssh) would make it see a bare repo and establish a hashdir lower annex
    # object tree.
    # Moreover, we want the ORA remote to receive all data for the store, so its
    # objects could be moved into archives (the main point of a RIA store).
    RIA_REMOTE_NAME = 'origin'  # don't hardcode everywhere
    ds.config.set('remote.{}.annex-ignore'.format(RIA_REMOTE_NAME),
                  'true',
                  where='local')

    # chances are that if this dataset came from a RIA store, its subdatasets
    # may live there too. Place a subdataset source candidate config that makes
    # get probe this RIA store when obtaining subdatasets
    ds.config.set(
        # we use the label 'origin' for this candidate in order to not have to
        # generate a complicated name from the actual source specification.
        # we pick a cost of 200 to sort it before datalad's default candidates
        # for non-RIA URLs, because they prioritize hierarchical layouts that
        # cannot be found in a RIA store
        'datalad.get.subdataset-source-candidate-200origin',
        # use the entire original URL, up to the fragment + plus dataset ID
        # placeholder, this should make things work with any store setup we
        # support (paths, ports, ...)
        props['source'].split('#', maxsplit=1)[0] + '#{id}',
        where='local')

    # setup publication dependency, if a corresponding special remote exists
    # and was enabled (there could be RIA stores that actually only have repos)
    # make this function be a generator
    ora_remotes = [
        s for s in ds.siblings('query', result_renderer='disabled')
        if s.get('annex-externaltype') == 'ora'
    ]
    if not ora_remotes and any(
            r.get('externaltype') == 'ora' for r in (repo.get_special_remotes(
            ).values() if hasattr(repo, 'get_special_remotes') else [])):
        # no ORA remote autoenabled, but configuration known about at least one.
        # Let's check origin's config for datalad.ora-remote.uuid as stored by
        # create-sibling-ria and enable try enabling that one.
        lgr.debug("Found no autoenabled ORA special remote. Trying to look it "
                  "up in source config ...")

        # First figure whether we cloned via SSH, HTTP or local path and then
        # get that config file the same way:
        config_content = None
        scheme = props['giturl'].split(':', 1)[0]
        if scheme == 'http':

            try:
                response = requests.get("{}{}config".format(
                    props['giturl'],
                    '/' if not props['giturl'].endswith('/') else ''))
                config_content = response.text
            except requests.RequestException as e:
                lgr.debug("Failed to get config file from source:\n%s",
                          exc_str(e))

        elif scheme == 'ssh':
            # TODO: switch the following to proper command abstraction:
            # SSHRemoteIO ignores the path part ATM. No remote CWD! (To be
            # changed with command abstractions). So we need to get that part to
            # have a valid path to origin's config file:
            cfg_path = PurePosixPath(URL(props['giturl']).path) / 'config'
            op = SSHRemoteIO(props['giturl'])
            try:
                config_content = op.read_file(cfg_path)
            except RIARemoteError as e:
                lgr.debug("Failed to get config file from source: %s",
                          exc_str(e))

        elif scheme == 'file':
            # TODO: switch the following to proper command abstraction:
            op = LocalIO()
            cfg_path = Path(URL(props['giturl']).localpath) / 'config'
            try:
                config_content = op.read_file(cfg_path)
            except (RIARemoteError, OSError) as e:
                lgr.debug("Failed to get config file from source: %s",
                          exc_str(e))
        else:
            lgr.debug(
                "Unknown URL-Scheme in %s. Can handle SSH, HTTP or "
                "FILE scheme URLs.", props['source'])

        # 3. And read it
        org_uuid = None
        if config_content:
            # TODO: We might be able to spare the saving to a file.
            #       "git config -f -" is not explicitly documented but happens
            #       to work and would read from stdin. Make sure we know this
            #       works for required git versions and on all platforms.
            with make_tempfile(content=config_content) as cfg_file:
                runner = WitlessRunner(
                    env=GitRunner.get_git_environ_adjusted())
                try:
                    result = runner.run([
                        'git', 'config', '-f', cfg_file,
                        'datalad.ora-remote.uuid'
                    ],
                                        protocol=StdOutCapture)
                    org_uuid = result['stdout'].strip()
                except CommandError as e:
                    # doesn't contain what we are looking for
                    lgr.debug(
                        "Found no UUID for ORA special remote at "
                        "'%s' (%s)", RIA_REMOTE_NAME, exc_str(e))

        # Now, enable it. If annex-init didn't fail to enable it as stored, we
        # wouldn't end up here, so enable with store URL as suggested by the URL
        # we cloned from.
        if org_uuid:
            srs = repo.get_special_remotes()
            if org_uuid in srs.keys():
                # TODO: - Double-check autoenable value and only do this when
                #         true?
                #       - What if still fails? -> Annex shouldn't change config
                #         in that case

                # we only need the store:
                new_url = props['source'].split('#')[0]
                try:
                    repo.enable_remote(srs[org_uuid]['name'],
                                       options=['url={}'.format(new_url)])
                    lgr.info("Reconfigured %s for %s", srs[org_uuid]['name'],
                             new_url)
                    # update ora_remotes for considering publication dependency
                    # below
                    ora_remotes = [
                        s for s in ds.siblings('query',
                                               result_renderer='disabled')
                        if s.get('annex-externaltype', None) == 'ora'
                    ]
                except CommandError as e:
                    lgr.debug("Failed to reconfigure ORA special remote: %s",
                              exc_str(e))
            else:
                lgr.debug("Unknown ORA special remote uuid at '%s': %s",
                          RIA_REMOTE_NAME, org_uuid)
    if ora_remotes:
        if len(ora_remotes) == 1:
            yield from ds.siblings('configure',
                                   name=RIA_REMOTE_NAME,
                                   publish_depends=ora_remotes[0]['name'],
                                   result_filter=None,
                                   result_renderer='disabled')
        else:
            lgr.warning(
                "Found multiple ORA remotes. Couldn't decide which "
                "publishing to 'origin' should depend on: %s. Consider "
                "running 'datalad siblings configure -s origin "
                "--publish-depends ORAREMOTENAME' to set publication "
                "dependency manually.", [r['name'] for r in ora_remotes])
Exemplo n.º 27
0
def get_git_version(runner=None):
    """Return version of available git"""
    runner = runner or GitRunner()
    return runner.run('git version'.split())[0].split()[2]
Exemplo n.º 28
0
def setup_package():
    import os
    from datalad import consts
    _test_states['HOME'] = os.environ.get('HOME', None)
    _test_states['DATASETS_TOPURL_ENV'] = os.environ.get('DATALAD_DATASETS_TOPURL', None)
    _test_states['DATASETS_TOPURL'] = consts.DATASETS_TOPURL
    os.environ['DATALAD_DATASETS_TOPURL'] = consts.DATASETS_TOPURL = 'http://datasets-tests.datalad.org/'

    # To overcome pybuild overriding HOME but us possibly wanting our
    # own HOME where we pre-setup git for testing (name, email)
    if 'GIT_HOME' in os.environ:
        os.environ['HOME'] = os.environ['GIT_HOME']
    else:
        # we setup our own new HOME, the BEST and HUGE one
        from datalad.utils import make_tempfile
        from datalad.tests import _TEMP_PATHS_GENERATED
        # TODO: split into a function + context manager
        with make_tempfile(mkdir=True) as new_home:
            os.environ['HOME'] = new_home
        if not os.path.exists(new_home):
            os.makedirs(new_home)
        with open(os.path.join(new_home, '.gitconfig'), 'w') as f:
            f.write("""\
[user]
	name = DataLad Tester
	email = [email protected]
""")
        _TEMP_PATHS_GENERATED.append(new_home)

    # If there is a bundled git, make sure GitPython uses it too
    # (some parts of the test utilities still rely on GitPython)
    from datalad.cmd import GitRunner
    GitRunner._check_git_path()
    if GitRunner._GIT_PATH:
        import os
        os.environ['GIT_PYTHON_GIT_EXECUTABLE'] = \
            os.path.join(GitRunner._GIT_PATH, 'git')

    # Re-load ConfigManager, since otherwise it won't consider global config
    # from new $HOME (see gh-4153
    cfg.reload(force=True)

    # To overcome pybuild by default defining http{,s}_proxy we would need
    # to define them to e.g. empty value so it wouldn't bother touching them.
    # But then haskell libraries do not digest empty value nicely, so we just
    # pop them out from the environment
    for ev in ('http_proxy', 'https_proxy'):
        if ev in os.environ and not (os.environ[ev]):
            lgr.debug("Removing %s from the environment since it is empty", ev)
            os.environ.pop(ev)

    # During tests we allow for "insecure" access to local file:// and
    # http://localhost URLs since all of them either generated as tests
    # fixtures or cloned from trusted sources
    from datalad.support.annexrepo import AnnexRepo
    AnnexRepo._ALLOW_LOCAL_URLS = True

    DATALAD_LOG_LEVEL = os.environ.get('DATALAD_LOG_LEVEL', None)
    if DATALAD_LOG_LEVEL is None:
        # very very silent.  Tests introspecting logs should use
        # swallow_logs(new_level=...)
        _test_states['loglevel'] = lgr.getEffectiveLevel()
        lgr.setLevel(100)

        # And we should also set it within environ so underlying commands also stay silent
        _test_states['DATALAD_LOG_LEVEL'] = DATALAD_LOG_LEVEL
        os.environ['DATALAD_LOG_LEVEL'] = '100'
    else:
        # We are not overriding them, since explicitly were asked to have some log level
        _test_states['loglevel'] = None

    # Set to non-interactive UI
    from datalad.ui import ui
    _test_states['ui_backend'] = ui.backend
    # obtain() since that one consults for the default value
    ui.set_backend(cfg.obtain('datalad.tests.ui.backend'))

    # Monkey patch nose so it does not ERROR out whenever code asks for fileno
    # of the output. See https://github.com/nose-devs/nose/issues/6
    from io import StringIO as OrigStringIO

    class StringIO(OrigStringIO):
        fileno = lambda self: 1
        encoding = None

    from nose.ext import dtcompat
    from nose.plugins import capture, multiprocess, plugintest
    dtcompat.StringIO = StringIO
    capture.StringIO = StringIO
    multiprocess.StringIO = StringIO
    plugintest.StringIO = StringIO
Exemplo n.º 29
0
    # this will fix the rendering of ANSI escape sequences
    # for colored terminal output on windows
    # it will do nothing on any other platform, hence it
    # is safe to call unconditionally
    import colorama
    colorama.init()
    atexit.register(colorama.deinit)
except ImportError as e:
    pass

# Other imports are interspersed with lgr.debug to ease troubleshooting startup
# delays etc.

# If there is a bundled git, make sure GitPython uses it too:
from datalad.cmd import GitRunner
GitRunner._check_git_path()
if GitRunner._GIT_PATH:
    import os
    os.environ['GIT_PYTHON_GIT_EXECUTABLE'] = \
        os.path.join(GitRunner._GIT_PATH, 'git')

from .config import ConfigManager
cfg = ConfigManager()

from .log import lgr
from datalad.utils import get_encoding_info, get_envvars_info

lgr.log(5, "Instantiating ssh manager")
from .support.sshconnector import SSHManager
ssh_manager = SSHManager()
atexit.register(ssh_manager.close, allow_fail=False)
Exemplo n.º 30
0
class ConfigManager(object):
    """Thin wrapper around `git-config` with support for a dataset configuration.

    The general idea is to have an object that is primarily used to read/query
    configuration option.  Upon creation, current configuration is read via one
    (or max two, in the case of the presence of dataset-specific configuration)
    calls to `git config`.  If this class is initialized with a Dataset
    instance, it supports reading and writing configuration from
    ``.datalad/config`` inside a dataset too. This file is committed to Git and
    hence useful to ship certain configuration items with a dataset.

    The API aims to provide the most significant read-access API of a
    dictionary, the Python ConfigParser, and GitPython's config parser
    implementations.

    This class is presently not capable of efficiently writing multiple
    configurations items at once.  Instead, each modification results in a
    dedicated call to `git config`. This author thinks this is OK, as he
    cannot think of a situation where a large number of items need to be
    written during normal operation. If such need arises, various solutions are
    possible (via GitPython, or an independent writer).

    Each instance carries a public `overrides` attribute. This dictionary
    contains variables that override any setting read from a file. The overrides
    are persistent across reloads, and are not modified by any of the
    manipulation methods, such as `set` or `unset`.

    Any DATALAD_* environment variable is also presented as a configuration
    item. Settings read from environment variables are not stored in any of the
    configuration file, but are read dynamically from the environment at each
    `reload()` call. Their values take precedence over any specification in
    configuration files, and even overrides.

    Parameters
    ----------
    dataset : Dataset, optional
      If provided, all `git config` calls are executed in this dataset's
      directory. Moreover, any modifications are, by default, directed to
      this dataset's configuration file (which will be created on demand)
    dataset_only : bool
      If True, configuration items are only read from a datasets persistent
      configuration file, if any present (the one in ``.datalad/config``, not
      ``.git/config``).
    overrides : dict, optional
      Variable overrides, see general class documentation for details.
    """
    def __init__(self, dataset=None, dataset_only=False, overrides=None):
        # store in a simple dict
        # no subclassing, because we want to be largely read-only, and implement
        # config writing separately
        self._store = {}
        self._cfgfiles = set()
        self._cfgmtimes = None
        # public dict to store variables that always override any setting
        # read from a file
        # `hasattr()` is needed because `datalad.cfg` is generated upon first module
        # import, hence when this code runs first, there cannot be any config manager
        # to inherit from
        self.overrides = datalad.cfg.overrides.copy() if hasattr(datalad, 'cfg') else {}
        if overrides is not None:
            self.overrides.update(overrides)
        if dataset is None:
            self._dataset_path = None
            self._dataset_cfgfname = None
            self._repo_cfgfname = None
        else:
            self._dataset_path = dataset.path
            self._dataset_cfgfname = opj(self._dataset_path, DATASET_CONFIG_FILE)
            if not dataset_only:
                self._repo_cfgfname = opj(self._dataset_path, '.git', 'config')
        self._dataset_only = dataset_only
        # Since configs could contain sensitive information, to prevent
        # any "facilitated" leakage -- just disable logging of outputs for
        # this runner
        run_kwargs = dict(log_outputs=False)
        if dataset is not None:
            # make sure we run the git config calls in the dataset
            # to pick up the right config files
            run_kwargs['cwd'] = dataset.path
        self._runner = GitRunner(**run_kwargs)
        try:
            self._gitconfig_has_showorgin = \
                LooseVersion(get_git_version(self._runner)) >= '2.8.0'
        except:
            # no git something else broken, assume git is present anyway
            # to not delay this, but assume it is old
            self._gitconfig_has_showorgin = False

        self.reload(force=True)

    def reload(self, force=False):
        """Reload all configuration items from the configured sources

        If `force` is False, all files configuration was previously read from
        are checked for differences in the modification times. If no difference
        is found for any file no reload is performed. This mechanism will not
        detect newly created global configuration files, use `force` in this case.
        """
        if not force and self._cfgmtimes:
            # we aren't forcing and we have read files before
            # check if any file we read from has changed
            current_time = time()
            curmtimes = {c: getmtime(c) for c in self._cfgfiles if exists(c)}
            if all(curmtimes[c] == self._cfgmtimes.get(c) and
                   # protect against low-res mtimes (FAT32 has 2s, EXT3 has 1s!)
                   # if mtime age is less than worst resolution assume modified
                   (current_time - curmtimes[c]) > 2.0
                   for c in curmtimes):
                # all the same, nothing to do, except for
                # superimpose overrides, could have changed in the meantime
                self._store.update(self.overrides)
                # reread env, is quick
                self._store = _parse_env(self._store)
                return

        self._store = {}
        # 2-step strategy:
        #   - load datalad dataset config from dataset
        #   - load git config from all supported by git sources
        # in doing so we always stay compatible with where Git gets its
        # config from, but also allow to override persistent information
        # from dataset locally or globally
        run_args = ['-z', '-l']
        if self._gitconfig_has_showorgin:
            run_args.append('--show-origin')

        if self._dataset_cfgfname:
            if exists(self._dataset_cfgfname):
                stdout, stderr = self._run(
                    run_args + ['--file', self._dataset_cfgfname],
                    log_stderr=True
                )
                # overwrite existing value, do not amend to get multi-line
                # values
                self._store, self._cfgfiles = _parse_gitconfig_dump(
                    stdout, self._store, self._cfgfiles, replace=False)

        if self._dataset_only:
            # superimpose overrides
            self._store.update(self.overrides)
            return

        stdout, stderr = self._run(run_args, log_stderr=True)
        self._store, self._cfgfiles = _parse_gitconfig_dump(
            stdout, self._store, self._cfgfiles, replace=True)

        # always monitor the dataset cfg location, we know where it is in all cases
        if self._dataset_cfgfname:
            self._cfgfiles.add(self._dataset_cfgfname)
            self._cfgfiles.add(self._repo_cfgfname)
        self._cfgmtimes = {c: getmtime(c) for c in self._cfgfiles if exists(c)}

        # superimpose overrides
        self._store.update(self.overrides)

        # override with environment variables
        self._store = _parse_env(self._store)

    @_where_reload
    def obtain(self, var, default=None, dialog_type=None, valtype=None,
               store=False, where=None, reload=True, **kwargs):
        """
        Convenience method to obtain settings interactively, if needed

        A UI will be used to ask for user input in interactive sessions.
        Questions to ask, and additional explanations can be passed directly
        as arguments, or retrieved from a list of pre-configured items.

        Additionally, this method allows for type conversion and storage
        of obtained settings. Both aspects can also be pre-configured.

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them,
          e.g. 'core.editor'
        default : any type
          In interactive sessions and if `store` is True, this default value
          will be presented to the user for confirmation (or modification).
          In all other cases, this value will be silently assigned unless
          there is an existing configuration setting.
        dialog_type : {'question', 'yesno', None}
          Which dialog type to use in interactive sessions. If `None`,
          pre-configured UI options are used.
        store : bool
          Whether to store the obtained value (or default)
        %s
        `**kwargs`
          Additional arguments for the UI function call, such as a question
          `text`.
        """
        # do local import, as this module is import prominently and the
        # could theroetically import all kind of weired things for type
        # conversion
        from datalad.interface.common_cfg import definitions as cfg_defs
        # fetch what we know about this variable
        cdef = cfg_defs.get(var, {})
        # type conversion setup
        if valtype is None and 'type' in cdef:
            valtype = cdef['type']
        if valtype is None:
            valtype = lambda x: x

        # any default?
        if default is None and 'default' in cdef:
            default = cdef['default']

        _value = None
        if var in self:
            # nothing needs to be obtained, it is all here already
            _value = self[var]
        elif store is False and default is not None:
            # nothing will be stored, and we have a default -> no user confirmation
            # we cannot use logging, because we want to use the config to confiugre
            # the logging
            #lgr.debug('using default {} for config setting {}'.format(default, var))
            _value = default

        if _value is not None:
            # we got everything we need and can exit early
            try:
                return valtype(_value)
            except Exception as e:
                raise ValueError(
                    "value '{}' of existing configuration for '{}' cannot be "
                    "converted to the desired type '{}' ({})".format(
                        _value, var, valtype, exc_str(e)))

        # now we need to try to obtain something from the user
        from datalad.ui import ui

        # configure UI
        dialog_opts = kwargs
        if dialog_type is None:  # no override
            # check for common knowledge on how to obtain a value
            if 'ui' in cdef:
                dialog_type = cdef['ui'][0]
                # pull standard dialog settings
                dialog_opts = cdef['ui'][1]
                # update with input
                dialog_opts.update(kwargs)

        if (not ui.is_interactive or dialog_type is None) and default is None:
            raise RuntimeError(
                "cannot obtain value for configuration item '{}', "
                "not preconfigured, no default, no UI available".format(var))

        if not hasattr(ui, dialog_type):
            raise ValueError("UI '{}' does not support dialog type '{}'".format(
                ui, dialog_type))

        # configure storage destination, if needed
        if store:
            if where is None and 'destination' in cdef:
                where = cdef['destination']
            if where is None:
                raise ValueError(
                    "request to store configuration item '{}', but no "
                    "storage destination specified".format(var))

        # obtain via UI
        dialog = getattr(ui, dialog_type)
        _value = dialog(default=default, **dialog_opts)

        if _value is None:
            # we got nothing
            if default is None:
                raise RuntimeError(
                    "could not obtain value for configuration item '{}', "
                    "not preconfigured, no default".format(var))
            # XXX maybe we should return default here, even it was returned
            # from the UI -- if that is even possible

        # execute type conversion before storing to check that we got
        # something that looks like what we want
        try:
            value = valtype(_value)
        except Exception as e:
            raise ValueError(
                "cannot convert user input `{}` to desired type ({})".format(
                    _value, exc_str(e)))
            # XXX we could consider "looping" until we have a value of proper
            # type in case of a user typo...

        if store:
            # store value as it was before any conversion, needs to be str
            # anyway
            # needs string conversion nevertheless, because default could come
            # in as something else
            self.add(var, '{}'.format(_value), where=where, reload=reload)
        return value

    #
    # Compatibility with dict API
    #
    def __len__(self):
        return len(self._store)

    def __getitem__(self, key):
        return self._store.__getitem__(key)

    def __contains__(self, key):
        return self._store.__contains__(key)

    def keys(self):
        """Returns list of configuration item names"""
        return self._store.keys()

    # XXX should this be *args?
    def get(self, key, default=None):
        """D.get(k[,d]) -> D[k] if k in D, else d.  d defaults to None."""
        return self._store.get(key, default)

    #
    # Compatibility with ConfigParser API
    #
    def sections(self):
        """Returns a list of the sections available"""
        return list(set([cfg_section_regex.match(k).group(1) for k in self._store]))

    def options(self, section):
        """Returns a list of options available in the specified section."""
        opts = []
        for k in self._store:
            sec, opt = cfg_sectionoption_regex.match(k).groups()
            if sec == section:
                opts.append(opt)
        return opts

    def has_section(self, section):
        """Indicates whether a section is present in the configuration"""
        for k in self._store:
            if k.startswith(section):
                return True
        return False

    def has_option(self, section, option):
        """If the given section exists, and contains the given option"""
        for k in self._store:
            sec, opt = cfg_sectionoption_regex.match(k).groups()
            if sec == section and opt == option:
                return True
        return False

    def getint(self, section, option):
        """A convenience method which coerces the option value to an integer"""
        return int(self.get_value(section, option))

    def getbool(self, section, option, default=None):
        """A convenience method which coerces the option value to a bool

        Values "on", "yes", "true" and any int!=0 are considered True
        Values which evaluate to bool False, "off", "no", "false" are considered
        False
        TypeError is raised for other values.
        """
        val = self.get_value(section, option, default=default)
        return anything2bool(val)

    def getfloat(self, section, option):
        """A convenience method which coerces the option value to a float"""
        return float(self.get_value(section, option))

    # this is a hybrid of ConfigParser and dict API
    def items(self, section=None):
        """Return a list of (name, value) pairs for each option

        Optionally limited to a given section.
        """
        if section is None:
            return self._store.items()
        return [(k, v) for k, v in self._store.items()
                if cfg_section_regex.match(k).group(1) == section]

    #
    # Compatibility with GitPython's ConfigParser
    #
    def get_value(self, section, option, default=None):
        """Like `get()`, but with an optional default value

        If the default is not None, the given default value will be returned in
        case the option did not exist. This behavior imitates GitPython's
        config parser.
        """
        try:
            return self['.'.join((section, option))]
        except KeyError as e:
            # this strange dance is needed because gitpython does it this way
            if default is not None:
                return default
            else:
                raise e

    #
    # Modify configuration (proxy respective git-config call)
    #
    @_where_reload
    def _run(self, args, where=None, reload=False, **kwargs):
        """Centralized helper to run "git config" calls

        Parameters
        ----------
        args : list
          Arguments to pass for git config
        %s
        **kwargs
          Keywords arguments for Runner's call
        """
        if where:
            args = self._get_location_args(where) + args
        out = self._runner.run(['git', 'config'] + args, **kwargs)
        if reload:
            self.reload()
        return out

    def _get_location_args(self, where, args=None):
        if args is None:
            args = []
        cfg_labels = ('dataset', 'local', 'global')
        if where not in cfg_labels:
            raise ValueError(
                "unknown configuration label '{}' (not in {})".format(
                    where, cfg_labels))
        if where == 'dataset':
            if not self._dataset_cfgfname:
                raise ValueError(
                    'ConfigManager cannot store to configuration to dataset, '
                    'none specified')
            # create an empty config file if none exists, `git config` will
            # fail otherwise
            dscfg_dirname = opj(self._dataset_path,  DATALAD_DOTDIR)
            if not exists(dscfg_dirname):
                os.makedirs(dscfg_dirname)
            if not exists(self._dataset_cfgfname):
                open(self._dataset_cfgfname, 'w').close()
            args.extend(['--file', self._dataset_cfgfname])
        elif where == 'global':
            args.append('--global')
        elif where == 'local':
            args.append('--local')
        return args

    @_where_reload
    def add(self, var, value, where='dataset', reload=True):
        """Add a configuration variable and value

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them, e.g.
          'core.editor'
        value : str
          Variable value
        %s"""
        self._run(['--add', var, value], where=where, reload=reload, log_stderr=True)

    @_where_reload
    def set(self, var, value, where='dataset', reload=True, force=False):
        """Set a variable to a value.

        In opposition to `add`, this replaces the value of `var` if there is
        one already.

        Parameters
        ----------
        var : str
          Variable name including any section like `git config` expects them, e.g.
          'core.editor'
        value : str
          Variable value
        force: bool
          if set, replaces all occurrences of `var` by a single one with the
          given `value`. Otherwise raise if multiple entries for `var` exist
          already
        %s"""
        from datalad.support.gitrepo import to_options

        self._run(to_options(replace_all=force) + [var, value],
                  where=where, reload=reload, log_stderr=True)

    @_where_reload
    def rename_section(self, old, new, where='dataset', reload=True):
        """Rename a configuration section

        Parameters
        ----------
        old : str
          Name of the section to rename.
        new : str
          Name of the section to rename to.
        %s"""
        self._run(['--rename-section', old, new], where=where, reload=reload)

    @_where_reload
    def remove_section(self, sec, where='dataset', reload=True):
        """Rename a configuration section

        Parameters
        ----------
        sec : str
          Name of the section to remove.
        %s"""
        self._run(['--remove-section', sec], where=where, reload=reload)

    @_where_reload
    def unset(self, var, where='dataset', reload=True):
        """Remove all occurrences of a variable

        Parameters
        ----------
        var : str
          Name of the variable to remove
        %s"""
        # use unset all as it is simpler for now
        self._run(['--unset-all', var], where=where, reload=reload)