Пример #1
0
    def __call__(self, parser, namespace, values, option_string=None):
        # Lets use the manpage on mature systems but only for subcommands --
        # --help should behave similar to how git does it:
        # regular --help for "git" but man pages for specific commands.
        # It is important since we do discover all subcommands from entry
        # points at run time and thus any static manpage would like be out of
        # date
        interactive = is_interactive()
        if interactive \
                and option_string == '--help' \
                and ' ' in parser.prog:  # subcommand
            self._try_manpage(parser)
        if option_string == '-h':
            helpstr = self._get_short_help(parser)
        else:
            helpstr = self._get_long_help(parser)

        # normalize capitalization to what we "always" had
        helpstr = f'Usage:{helpstr[6:]}'

        if interactive and option_string == '--help':
            import pydoc
            pydoc.pager(helpstr)
        else:
            print(helpstr)
        sys.exit(0)
Пример #2
0
def test_is_interactive(fout):
    # must not fail if one of the streams is no longer open:
    # https://github.com/datalad/datalad/issues/3267
    from datalad.cmd import (
        KillOutput,
        NoCapture,
        StdOutErrCapture,
        WitlessRunner,
    )
    from datalad.support.gitrepo import GitProgress
    from datalad.support.annexrepo import (
        AnnexInitOutput,
        AnnexJsonProtocol,
    )

    bools = ["False", "True"]

    def get_interactive(py_pre="", **run_kwargs):
        out = WitlessRunner().run([
            sys.executable, "-c",
            py_pre + 'from datalad.utils import is_interactive; '
            'f = open(%r, "w"); '
            'f.write(str(is_interactive())); '
            'f.close()' % fout
        ], **run_kwargs)
        with open(fout) as f:
            out = f.read()
        assert_in(out, bools)
        return bool(bools.index(out))

    # verify that NoCapture can make fully interactive execution
    # happen, also test the core protocols
    # (we can only be interactive in a runner, if the test execution
    # itself happens in an interactive environment)
    for proto, interactive in (
        (
            NoCapture,
            # It is unclear why (on travis only) a child
            # process can report to be interactive
            # whenever the parent process is not.
            # Maintain this test exception until
            # someone can provide insight. The point of
            # this test is to ensure that NoCapture
            # in an interactive parent also keeps the
            # child interactive, so this oddity is not
            # relevant.
            True if on_travis else is_interactive()),
        (KillOutput, False),
        (StdOutErrCapture, False),
        (GitProgress, False),
        (AnnexInitOutput, False),
        (AnnexJsonProtocol, False)):
        eq_(get_interactive(protocol=proto),
            interactive,
            msg='{} -> {}'.format(str(proto), interactive))
    # and it must not crash if smth is closed
    for o in ('stderr', 'stdin', 'stdout'):
        eq_(get_interactive("import sys; sys.%s.close(); " % o), False)
Пример #3
0
def _ls_dataset(loc, fast=False, recursive=False, all_=False, long_=False):
    isabs_loc = isabs(loc)
    topdir = '' if isabs_loc else abspath(curdir)

    topds = Dataset(loc)
    dss = [topds] + (
        [Dataset(opj(loc, sm))
         for sm in topds.subdatasets(recursive=recursive, result_xfm='relpaths')]
        if recursive else [])

    dsms = []
    for ds in dss:
        if not ds.is_installed():
            dsm = AbsentRepoModel(ds.path)
        elif isinstance(ds.repo, AnnexRepo):
            dsm = AnnexModel(ds.repo)
        elif isinstance(ds.repo, GitRepo):
            dsm = GitModel(ds.repo)
        else:
            raise RuntimeError("Got some dataset which don't know how to handle %s"
                               % ds)
        dsms.append(dsm)

    # adjust path strings
    for ds_model in dsms:
        #path = ds_model.path[len(topdir) + 1 if topdir else 0:]
        path = relpath(ds_model.path, topdir) if topdir else ds_model.path
        if not path:
            path = '.'
        ds_model.path = path
    dsms = sorted(dsms, key=lambda m: m.path)

    maxpath = max(len(ds_model.path) for ds_model in dsms)
    path_fmt = u"{ds.path!U:<%d}" % (maxpath + (11 if is_interactive() else 0))  # + to accommodate ansi codes
    pathtype_fmt = path_fmt + u"  [{ds.type}]"
    full_fmt = pathtype_fmt + u"  {ds.branch!N}  {ds.describe!N} {ds.date!D}"
    if (not fast) or long_:
        full_fmt += u"  {ds.clean!X}"

    fmts = {
        AbsentRepoModel: pathtype_fmt,
        GitModel: full_fmt,
        AnnexModel: full_fmt
    }
    if long_:
        fmts[AnnexModel] += u"  {ds.annex_local_size!S}/{ds.annex_worktree_size!S}"

    formatter = LsFormatter()
    # weird problems happen in the parallel run -- TODO - figure it out
    # for out in Parallel(n_jobs=1)(
    #         delayed(format_ds_model)(formatter, dsm, full_fmt, format_exc=path_fmt + "  {msg!R}")
    #         for dsm in dss):
    #     print(out)
    for dsm in dsms:
        fmt = fmts[dsm.__class__]
        ds_str = format_ds_model(formatter, dsm, fmt, format_exc=path_fmt + u"  {msg!R}")
        safe_print(ds_str)
Пример #4
0
def _ls_dataset(loc, fast=False, recursive=False, all_=False, long_=False):
    isabs_loc = isabs(loc)
    topdir = '' if isabs_loc else abspath(curdir)

    topds = Dataset(loc)
    dss = [topds] + (
        [Dataset(opj(loc, sm))
         for sm in topds.get_subdatasets(recursive=recursive)]
        if recursive else [])

    dsms = []
    for ds in dss:
        if not ds.is_installed():
            dsm = AbsentRepoModel(ds.path)
        elif isinstance(ds.repo, AnnexRepo):
            dsm = AnnexModel(ds.repo)
        elif isinstance(ds.repo, GitRepo):
            dsm = GitModel(ds.repo)
        else:
            raise RuntimeError("Got some dataset which don't know how to handle %s"
                               % ds)
        dsms.append(dsm)

    # adjust path strings
    for ds_model in dsms:
        path = ds_model.path[len(topdir) + 1 if topdir else 0:]
        if not path:
            path = '.'
        ds_model.path = path
    dsms = sorted(dsms, key=lambda m: m.path)

    maxpath = max(len(ds_model.path) for ds_model in dsms)
    path_fmt = u"{ds.path!U:<%d}" % (maxpath + (11 if is_interactive() else 0))  # + to accommodate ansi codes
    pathtype_fmt = path_fmt + u"  [{ds.type}]"
    full_fmt = pathtype_fmt + u"  {ds.branch!N}  {ds.describe!N} {ds.date!D}"
    if (not fast) or long_:
        full_fmt += u"  {ds.clean!X}"

    fmts = {
        AbsentRepoModel: pathtype_fmt,
        GitModel: full_fmt,
        AnnexModel: full_fmt
    }
    if long_:
        fmts[AnnexModel] += u"  {ds.annex_local_size!S}/{ds.annex_worktree_size!S}"

    formatter = LsFormatter()
    # weird problems happen in the parallel run -- TODO - figure it out
    # for out in Parallel(n_jobs=1)(
    #         delayed(format_ds_model)(formatter, dsm, full_fmt, format_exc=path_fmt + "  {msg!R}")
    #         for dsm in dss):
    #     print(out)
    for dsm in dsms:
        fmt = fmts[dsm.__class__]
        ds_str = format_ds_model(formatter, dsm, fmt, format_exc=path_fmt + u"  {msg!R}")
        print(ds_str)
Пример #5
0
class LsFormatter(string.Formatter):
    # condition by interactive
    if is_interactive():
        BLUE = ansi_colors.COLOR_SEQ % ansi_colors.BLUE
        RED = ansi_colors.COLOR_SEQ % ansi_colors.RED
        GREEN = ansi_colors.COLOR_SEQ % ansi_colors.GREEN
        RESET = ansi_colors.RESET_SEQ
        DATASET = ansi_colors.COLOR_SEQ % ansi_colors.UNDERLINE
    else:
        BLUE = RED = GREEN = RESET = DATASET = u""

    # http://stackoverflow.com/questions/9932406/unicodeencodeerror-only-when-running-as-a-cron-job
    # reveals that Python uses ascii encoding when stdout is a pipe, so we shouldn't force it to be
    # unicode then
    # TODO: we might want to just ignore and force utf8 while explicitly .encode()'ing output!
    if sys.getdefaultencoding() == 'ascii':
        OK = 'OK'   # u"✓"
        NOK = 'X'  # u"✗"
        NONE = '-'  # u"✗"
    else:
        # unicode versions which look better but which blow during tests etc
        OK = u"✓"
        NOK = u"✗"
        NONE = u"✗"

    def convert_field(self, value, conversion):
        #print("%r->%r" % (value, conversion))
        if conversion == 'D':  # Date
            if value is not None:
                return time.strftime(u"%Y-%m-%d/%H:%M:%S", time.localtime(value))
            else:
                return u'-'
        elif conversion == 'S':  # Human size
            #return value
            if value is not None:
                return humanize.naturalsize(value)
            else:
                return u'-'
        elif conversion == 'X':  # colored bool
            chr, col = (self.OK, self.GREEN) if value else (self.NOK, self.RED)
            return u"%s%s%s" % (col, chr, self.RESET)
        elif conversion == 'N':  # colored Red - if None
            if value is None:
                # return "%s✖%s" % (self.RED, self.RESET)
                return u"%s%s%s" % (self.RED, self.NONE, self.RESET)
            return value
        elif conversion in {'B', 'R', 'U'}:
            return u"%s%s%s" % ({'B': self.BLUE, 'R': self.RED, 'U': self.DATASET}[conversion], value, self.RESET)

        return super(LsFormatter, self).convert_field(value, conversion)

    def format_field(self, value, format_spec):
        # TODO: move all the "coloring" into formatting, so we could correctly indent
        # given the format and only then color it up
        # print "> %r, %r" % (value, format_spec)
        return super(LsFormatter, self).format_field(value, format_spec)
Пример #6
0
def _ls_dataset(loc, fast=False, recursive=False, all=False):
    from ..distribution.dataset import Dataset
    isabs_loc = isabs(loc)
    topdir = '' if isabs_loc else abspath(curdir)

    topds = Dataset(loc)
    dss = [topds] + ([
        Dataset(opj(loc, sm))
        for sm in topds.get_dataset_handles(recursive=recursive)
    ] if recursive else [])
    dsms = list(map(DsModel, dss))

    # adjust path strings
    for ds_model in dsms:
        path = ds_model.path[len(topdir) + 1 if topdir else 0:]
        if not path:
            path = '.'
        ds_model.path = path

    maxpath = max(len(ds_model.path) for ds_model in dsms)
    path_fmt = u"{ds.path!B:<%d}" % (maxpath + (11 if is_interactive() else 0)
                                     )  # + to accommodate ansi codes
    pathtype_fmt = path_fmt + u"  [{ds.type}]"
    full_fmt = pathtype_fmt + u"  {ds.branch!N}  {ds.describe!N} {ds.date!D}"
    if (not fast) or all:
        full_fmt += u"  {ds.clean!X}"
    if all:
        full_fmt += u"  {ds.annex_local_size!S}/{ds.annex_worktree_size!S}"

    formatter = LsFormatter()
    # weird problems happen in the parallel run -- TODO - figure it out
    # for out in Parallel(n_jobs=1)(
    #         delayed(format_ds_model)(formatter, dsm, full_fmt, format_exc=path_fmt + "  {msg!R}")
    #         for dsm in dss):
    #     print(out)
    for dsm in dsms:
        ds_str = format_ds_model(formatter,
                                 dsm,
                                 full_fmt,
                                 format_exc=path_fmt + u"  {msg!R}")
        print(ds_str)
Пример #7
0
def _ls_dataset(loc, fast=False, recursive=False, all=False):
    from ..distribution.dataset import Dataset
    isabs_loc = isabs(loc)
    topdir = '' if isabs_loc else abspath(curdir)

    topds = Dataset(loc)
    dss = [topds] + (
        [Dataset(opj(loc, sm))
         for sm in topds.get_dataset_handles(recursive=recursive)]
         if recursive else [])
    dsms = list(map(DsModel, dss))

    # adjust path strings
    for ds_model in dsms:
        path = ds_model.path[len(topdir) + 1 if topdir else 0:]
        if not path:
            path = '.'
        ds_model.path = path

    maxpath = max(len(ds_model.path) for ds_model in dsms)
    path_fmt = u"{ds.path!B:<%d}" % (maxpath + (11 if is_interactive() else 0))  # + to accommodate ansi codes
    pathtype_fmt = path_fmt + u"  [{ds.type}]"
    full_fmt = pathtype_fmt + u"  {ds.branch!N}  {ds.describe!N} {ds.date!D}"
    if (not fast) or all:
        full_fmt += u"  {ds.clean!X}"
    if all:
        full_fmt += u"  {ds.annex_local_size!S}/{ds.annex_worktree_size!S}"

    formatter = LsFormatter()
    # weird problems happen in the parallel run -- TODO - figure it out
    # for out in Parallel(n_jobs=1)(
    #         delayed(format_ds_model)(formatter, dsm, full_fmt, format_exc=path_fmt + "  {msg!R}")
    #         for dsm in dss):
    #     print(out)
    for dsm in dsms:
        ds_str = format_ds_model(formatter, dsm, full_fmt, format_exc=path_fmt + u"  {msg!R}")
        print(ds_str)
Пример #8
0
class LsFormatter(string.Formatter):
    # condition by interactive
    if is_interactive():
        BLUE = ansi_colors.COLOR_SEQ % ansi_colors.BLUE
        RED = ansi_colors.COLOR_SEQ % ansi_colors.RED
        GREEN = ansi_colors.COLOR_SEQ % ansi_colors.GREEN
        RESET = ansi_colors.RESET_SEQ
        DATASET = ansi_colors.COLOR_SEQ % ansi_colors.UNDERLINE
    else:
        BLUE = RED = GREEN = RESET = DATASET = u""

    # TODO: we might want to just ignore and force utf8 while explicitly .encode()'ing output!
    # unicode versions which look better but which blow during tests etc
    # Those might be reset by the constructor
    OK = 'OK'  # u"✓"
    NOK = 'X'  # u"✗"
    NONE = '-'  # u"✗"

    def __init__(self, *args, **kwargs):
        super(LsFormatter, self).__init__(*args, **kwargs)
        if sys.stdout.encoding is None:
            lgr.debug("encoding not set, using safe alternatives")
        elif not sys.stdout.isatty():
            lgr.debug("stdout is not a tty, using safe alternatives")
        else:
            try:
                u"✓".encode(sys.stdout.encoding)
            except UnicodeEncodeError:
                lgr.debug(
                    "encoding %s does not support unicode, "
                    "using safe alternatives", sys.stdout.encoding)
            else:
                self.OK = u"✓"
                self.NOK = u"✗"
                self.NONE = u"✗"

    def convert_field(self, value, conversion):
        #print("%r->%r" % (value, conversion))
        if conversion == 'D':  # Date
            if value is not None:
                return time.strftime(u"%Y-%m-%d/%H:%M:%S",
                                     time.localtime(value))
            else:
                return u'-'
        elif conversion == 'S':  # Human size
            #return value
            if value is not None:
                return humanize.naturalsize(value)
            else:
                return u'-'
        elif conversion == 'X':  # colored bool
            chr, col = (self.OK, self.GREEN) if value else (self.NOK, self.RED)
            return u"%s%s%s" % (col, chr, self.RESET)
        elif conversion == 'N':  # colored Red - if None
            if value is None:
                # return "%s✖%s" % (self.RED, self.RESET)
                return u"%s%s%s" % (self.RED, self.NONE, self.RESET)
            return value
        elif conversion in {'B', 'R', 'U'}:
            return u"%s%s%s" % ({
                'B': self.BLUE,
                'R': self.RED,
                'U': self.DATASET
            }[conversion], value, self.RESET)

        return super(LsFormatter, self).convert_field(value, conversion)

    def format_field(self, value, format_spec):
        # TODO: move all the "coloring" into formatting, so we could correctly indent
        # given the format and only then color it up
        # print "> %r, %r" % (value, format_spec)
        return super(LsFormatter, self).format_field(value, format_spec)