def teardown_package(): import os if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return from datalad.ui import ui ui.set_backend(_test_states['ui_backend']) if _test_states['loglevel'] is not None: lgr.setLevel(_test_states['loglevel']) if _test_states['DATALAD_LOG_LEVEL'] is None: os.environ.pop('DATALAD_LOG_LEVEL') else: os.environ['DATALAD_LOG_LEVEL'] = _test_states['DATALAD_LOG_LEVEL'] from datalad.tests import _TEMP_PATHS_GENERATED from datalad.tests.utils import rmtemp if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(path, ignore_errors=True) lgr.debug("Printing versioning information collected so far") from datalad.support.external_versions import external_versions as ev print(ev.dumps(query=True))
def teardown_package(): import os if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return from datalad.ui import ui ui.set_backend(_test_states['ui_backend']) if _test_states['loglevel'] is not None: lgr.setLevel(_test_states['loglevel']) if _test_states['DATALAD_LOG_LEVEL'] is None: os.environ.pop('DATALAD_LOG_LEVEL') else: os.environ['DATALAD_LOG_LEVEL'] = _test_states['DATALAD_LOG_LEVEL'] from datalad.tests import _TEMP_PATHS_GENERATED from datalad.tests.utils import rmtemp if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(path, ignore_errors=True) lgr.debug("Printing versioning information collected so far") from datalad.support.external_versions import external_versions as ev # request versioning for few others which we do not check at runtime for m in ('git', 'system-ssh'): try: # Let's make sure to not blow up when we are almost done ev[m] except Exception: pass print(ev.dumps(query=True))
def teardown_package(): import os from datalad.tests.utils import rmtemp, OBSCURE_FILENAME lgr.debug("Printing versioning information collected so far") from datalad.support.external_versions import external_versions as ev print(ev.dumps(query=True)) try: print("Obscure filename: str=%s repr=%r" % (OBSCURE_FILENAME.encode('utf-8'), OBSCURE_FILENAME)) except UnicodeEncodeError as exc: from .dochelpers import exc_str print("Obscure filename failed to print: %s" % exc_str(exc)) def print_dict(d): return " ".join("%s=%r" % v for v in d.items()) print("Encodings: %s" % print_dict(get_encoding_info())) print("Environment: %s" % print_dict(get_envvars_info())) if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return from datalad.ui import ui from datalad import consts ui.set_backend(_test_states['ui_backend']) if _test_states['loglevel'] is not None: lgr.setLevel(_test_states['loglevel']) from datalad.tests import _TEMP_PATHS_GENERATED if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(path, ignore_errors=True) # restore all the env variables for v, val in _test_states['env'].items(): if val is not None: os.environ[v] = val else: os.environ.pop(v) # Re-establish correct global config after changing $HOME. # Might be superfluous, since after teardown datalad.cfg shouldn't be # needed. However, maintaining a consistent state seems a good thing # either way. cfg.reload(force=True) consts.DATASETS_TOPURL = _test_states['DATASETS_TOPURL'] from datalad.support.cookies import cookies_db cookies_db.close() from datalad.support.annexrepo import AnnexRepo AnnexRepo._ALLOW_LOCAL_URLS = False # stay safe!
def teardown_package(): import os from datalad.tests.utils import rmtemp, OBSCURE_FILENAME lgr.debug("Printing versioning information collected so far") from datalad.support.external_versions import external_versions as ev print(ev.dumps(query=True)) try: print("Obscure filename: str=%s repr=%r" % (OBSCURE_FILENAME.encode('utf-8'), OBSCURE_FILENAME)) except UnicodeEncodeError as exc: from .dochelpers import exc_str print("Obscure filename failed to print: %s" % exc_str(exc)) def print_dict(d): return " ".join("%s=%r" % v for v in d.items()) print("Encodings: %s" % print_dict(get_encoding_info())) print("Environment: %s" % print_dict(get_envvars_info())) if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return from datalad.ui import ui from datalad import consts ui.set_backend(_test_states['ui_backend']) if _test_states['loglevel'] is not None: lgr.setLevel(_test_states['loglevel']) if _test_states['DATALAD_LOG_LEVEL'] is None: os.environ.pop('DATALAD_LOG_LEVEL') else: os.environ['DATALAD_LOG_LEVEL'] = _test_states['DATALAD_LOG_LEVEL'] from datalad.tests import _TEMP_PATHS_GENERATED if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(path, ignore_errors=True) if _test_states['HOME'] is not None: os.environ['HOME'] = _test_states['HOME'] if _test_states['DATASETS_TOPURL_ENV']: os.environ['DATALAD_DATASETS_TOPURL'] = _test_states[ 'DATASETS_TOPURL_ENV'] consts.DATASETS_TOPURL = _test_states['DATASETS_TOPURL'] from datalad.support.cookies import cookies_db cookies_db.close() from datalad.support.annexrepo import AnnexRepo AnnexRepo._ALLOW_LOCAL_URLS = False # stay safe!
def teardown_package(): import os from datalad.tests.utils import rmtemp, OBSCURE_FILENAME lgr.debug("Printing versioning information collected so far") from datalad.support.external_versions import external_versions as ev print(ev.dumps(query=True)) try: print("Obscure filename: str=%s repr=%r" % (OBSCURE_FILENAME.encode('utf-8'), OBSCURE_FILENAME)) except UnicodeEncodeError as exc: from .dochelpers import exc_str print("Obscure filename failed to print: %s" % exc_str(exc)) def print_dict(d): return " ".join("%s=%r" % v for v in d.items()) print("Encodings: %s" % print_dict(get_encoding_info())) print("Environment: %s" % print_dict(get_envvars_info())) if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return from datalad.ui import ui from datalad import consts ui.set_backend(_test_states['ui_backend']) if _test_states['loglevel'] is not None: lgr.setLevel(_test_states['loglevel']) if _test_states['DATALAD_LOG_LEVEL'] is None: os.environ.pop('DATALAD_LOG_LEVEL') else: os.environ['DATALAD_LOG_LEVEL'] = _test_states['DATALAD_LOG_LEVEL'] from datalad.tests import _TEMP_PATHS_GENERATED if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % (len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(path, ignore_errors=True) if _test_states['HOME'] is not None: os.environ['HOME'] = _test_states['HOME'] if _test_states['DATASETS_TOPURL_ENV']: os.environ['DATALAD_DATASETS_TOPURL'] = _test_states['DATASETS_TOPURL_ENV'] consts.DATASETS_TOPURL = _test_states['DATASETS_TOPURL'] from datalad.support.cookies import cookies_db cookies_db.close() from datalad.support.annexrepo import AnnexRepo AnnexRepo._ALLOW_LOCAL_URLS = False # stay safe!
def __call__(dataset=None): from datalad.distribution.dataset import require_dataset from datalad.support.exceptions import NoDatasetArgumentFound ds = None try: ds = require_dataset(dataset, check_installed=False, purpose='reporting') except NoDatasetArgumentFound: # failure is already logged pass if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if ds is None: from datalad import cfg else: cfg = ds.config from datalad.ui import ui from datalad.api import metadata from datalad.metadata import extractors as metaextractors from datalad.support.external_versions import external_versions import os import platform as pl import json # formatting helper def _t2s(t): res = [] for e in t: if isinstance(e, tuple): es = _t2s(e) if es != '': res += ['(%s)' % es] elif e != '': res += [e] return '/'.join(res) report_template = """\ System ====== {system} Environment =========== {env} Externals ========= {externals} Available metadata extractors ============================= {metaextractors} Configuration ============= {cfg} {dataset} """ dataset_template = """\ Dataset information =================== {basic} Metadata -------- {meta} """ ds_meta = None if ds and ds.is_installed(): ds_meta = metadata( dataset=ds, reporton='datasets', return_type='list', result_filter=lambda x: x['action'] == 'metadata', result_renderer='disabled') if ds_meta: ds_meta = [dm['metadata'] for dm in ds_meta] if len(ds_meta) == 1: ds_meta = ds_meta.pop() ui.message( report_template.format( system='\n'.join( '{}: {}'.format(*i) for i in (('OS ', ' '.join( [os.name, pl.system(), pl.release(), pl.version()]).rstrip()), ('Distribution', ' '.join([ _t2s(pl.dist()), _t2s(pl.mac_ver()), _t2s(pl.win32_ver()) ]).rstrip()))), env='\n'.join('{}: {}'.format(k, v) for k, v in os.environ.items() if k.startswith('PYTHON') or k.startswith('GIT') or k.startswith('DATALAD')), dataset='' if not ds else dataset_template.format( basic='\n'.join('{}: {}'.format(k, v) for k, v in ( ('path', ds.path), ('repo', ds.repo.__class__.__name__ if ds.repo else '[NONE]'), )), meta=json.dumps(ds_meta, indent=1) if ds_meta else '[no metadata]'), externals=external_versions.dumps(preamble=None, indent='', query=True), metaextractors='\n'.join(p for p in dir(metaextractors) if not p.startswith('_')), cfg='\n'.join( '{}: {}'.format( k, '<HIDDEN>' if 'user' in k or 'token' in k or 'passwd' in k else v) for k, v in sorted(cfg.items(), key=lambda x: x[0])), )) yield
def dlplugin(dataset=None): """Generate a report about the DataLad installation and configuration IMPORTANT: Sharing this report with untrusted parties (e.g. on the web) should be done with care, as it may include identifying information, and/or credentials or access tokens. Parameters ---------- dataset : Dataset, optional If a dataset is given or found, information on this dataset is provided (if it exists), and its active configuration is reported. """ ds = dataset if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if ds is None: from datalad import cfg else: cfg = ds.config from datalad.ui import ui from datalad.api import metadata from datalad.support.external_versions import external_versions import os import platform as pl # formatting helper def _t2s(t): res = [] for e in t: if isinstance(e, tuple): es = _t2s(e) if es != '': res += ['(%s)' % es] elif e != '': res += [e] return '/'.join(res) report_template = """\ System ====== {system} Environment =========== {env} {dataset} Externals ========= {externals} Configuration ============= {cfg} """ dataset_template = """\ Dataset information =================== {basic} Metadata -------- {meta} """ ds_meta = None if ds and ds.is_installed(): ds_meta = metadata(dataset=ds, dataset_global=True, return_type='item-or-list', result_filter=lambda x: x['action'] == 'metadata') if ds_meta: ds_meta = ds_meta['metadata'] ui.message( report_template.format( system='\n'.join( '{}: {}'.format(*i) for i in (('OS ', ' '.join( [os.name, pl.system(), pl.release(), pl.version()]).rstrip()), ('Distribution', ' '.join([ _t2s(pl.dist()), _t2s(pl.mac_ver()), _t2s(pl.win32_ver()) ]).rstrip()))), env='\n'.join('{}: {}'.format(k, v) for k, v in os.environ.items() if k.startswith('PYTHON') or k.startswith('GIT') or k.startswith('DATALAD')), dataset='' if not ds else dataset_template.format( basic='\n'.join( '{}: {}'.format(k, v) for k, v in ( ('path', ds.path), ('repo', ds.repo.__class__.__name__ if ds.repo else '[NONE]'), )), meta='\n'.join( '{}: {}'.format(k, v) for k, v in ds_meta) if ds_meta else '[no metadata]'), externals=external_versions.dumps(preamble=None, indent='', query=True), cfg='\n'.join('{}: {}'.format( k, '<HIDDEN>' if k.startswith('user.') or 'token' in k or 'user' in k else v) for k, v in sorted(cfg.items(), key=lambda x: x[0])), )) yield
def setup_package(): import tempfile from pathlib import Path from datalad import consts from datalad.support.annexrepo import AnnexRepo from datalad.support.cookies import cookies_db from datalad.support.external_versions import external_versions from datalad.tests import _TEMP_PATHS_GENERATED from datalad.tests.utils_pytest import ( DEFAULT_BRANCH, DEFAULT_REMOTE, OBSCURE_FILENAME, HTTPPath, rmtemp, ) from datalad.ui import ui from datalad.utils import ( make_tempfile, on_osx, ) if on_osx: # enforce honoring TMPDIR (see gh-5307) tempfile.tempdir = os.environ.get('TMPDIR', tempfile.gettempdir()) with pytest.MonkeyPatch().context() as m: m.setattr(consts, "DATASETS_TOPURL", 'https://datasets-tests.datalad.org/') m.setenv('DATALAD_DATASETS_TOPURL', consts.DATASETS_TOPURL) m.setenv( "GIT_CONFIG_PARAMETERS", "'init.defaultBranch={}' 'clone.defaultRemoteName={}'".format( DEFAULT_BRANCH, DEFAULT_REMOTE)) def prep_tmphome(): # re core.askPass: # Don't let git ask for credentials in CI runs. Note, that this variable # technically is not a flag, but an executable (which is why name and value # are a bit confusing here - we just want a no-op basically). The environment # variable GIT_ASKPASS overwrites this, but neither env var nor this config # are supported by git-credential on all systems and git versions (most recent # ones should work either way, though). Hence use both across CI builds. gitconfig = """\ [user] name = DataLad Tester email = [email protected] [core] askPass = [datalad "log"] exc = 1 [annex "security"] # from annex 6.20180626 file:/// and http://localhost access isn't # allowed by default allowed-url-schemes = http https file allowed-http-addresses = all """ # TODO: split into a function + context manager with make_tempfile(mkdir=True) as new_home: pass # register for clean-up on exit _TEMP_PATHS_GENERATED.append(new_home) # populate default config new_home = Path(new_home) new_home.mkdir(parents=True, exist_ok=True) cfg_file = new_home / '.gitconfig' cfg_file.write_text(gitconfig) return new_home, cfg_file if external_versions['cmd:git'] < "2.32": # To overcome pybuild overriding HOME but us possibly wanting our # own HOME where we pre-setup git for testing (name, email) if 'GIT_HOME' in os.environ: m.setenv('HOME', os.environ['GIT_HOME']) else: # we setup our own new HOME, the BEST and HUGE one new_home, _ = prep_tmphome() for v, val in get_home_envvars(new_home).items(): m.setenv(v, val) else: _, cfg_file = prep_tmphome() m.setenv('GIT_CONFIG_GLOBAL', str(cfg_file)) # Re-load ConfigManager, since otherwise it won't consider global config # from new $HOME (see gh-4153 cfg.reload(force=True) # datalad.locations.sockets has likely changed. Discard any cached values. ssh_manager._socket_dir = None # To overcome pybuild by default defining http{,s}_proxy we would need # to define them to e.g. empty value so it wouldn't bother touching them. # But then haskell libraries do not digest empty value nicely, so we just # pop them out from the environment for ev in ('http_proxy', 'https_proxy'): if ev in os.environ and not (os.environ[ev]): lgr.debug("Removing %s from the environment since it is empty", ev) os.environ.pop(ev) # Prevent interactive credential entry (note "true" is the command to run) # See also the core.askPass setting above m.setenv('GIT_ASKPASS', 'true') # Set to non-interactive UI _test_states['ui_backend'] = ui.backend # obtain() since that one consults for the default value ui.set_backend(cfg.obtain('datalad.tests.ui.backend')) # in order to avoid having to fiddle with rather uncommon # file:// URLs in the tests, have a standard HTTP server # that serves an 'httpserve' directory in the test HOME # the URL will be available from datalad.test_http_server.url global test_http_server # Start the server only if not running already # Relevant: we have test_misc.py:test_test which runs datalad.test but # not doing teardown, so the original server might never get stopped if test_http_server is None: serve_path = tempfile.mkdtemp( dir=cfg.get("datalad.tests.temp.dir"), prefix='httpserve', ) test_http_server = HTTPPath(serve_path) test_http_server.start() _TEMP_PATHS_GENERATED.append(serve_path) if cfg.obtain('datalad.tests.setup.testrepos'): lgr.debug("Pre-populating testrepos") from datalad.tests.utils_pytest import with_testrepos with_testrepos()(lambda repo: 1)() yield lgr.debug("Printing versioning information collected so far") # Query for version of datalad, so it is included in ev.dumps below - useful while # testing extensions where version of datalad might differ in the environment. external_versions['datalad'] print(external_versions.dumps(query=True)) try: print("Obscure filename: str=%s repr=%r" % (OBSCURE_FILENAME.encode('utf-8'), OBSCURE_FILENAME)) except UnicodeEncodeError as exc: ce = CapturedException(exc) print("Obscure filename failed to print: %s" % ce) def print_dict(d): return " ".join("%s=%r" % v for v in d.items()) print("Encodings: %s" % print_dict(get_encoding_info())) print("Environment: %s" % print_dict(get_envvars_info())) if os.environ.get('DATALAD_TESTS_NOTEARDOWN'): return ui.set_backend(_test_states['ui_backend']) if test_http_server: test_http_server.stop() test_http_server = None else: lgr.debug( "For some reason global http_server was not set/running, thus not stopping" ) if len(_TEMP_PATHS_GENERATED): msg = "Removing %d dirs/files: %s" % ( len(_TEMP_PATHS_GENERATED), ', '.join(_TEMP_PATHS_GENERATED)) else: msg = "Nothing to remove" lgr.debug("Teardown tests. " + msg) for path in _TEMP_PATHS_GENERATED: rmtemp(str(path), ignore_errors=True) # Re-establish correct global config after changing $HOME. # Might be superfluous, since after teardown datalad.cfg shouldn't be # needed. However, maintaining a consistent state seems a good thing # either way. cfg.reload(force=True) ssh_manager._socket_dir = None cookies_db.close()
def __call__(dataset=None, sensitive=None, clipboard=None): from datalad import get_encoding_info from datalad import get_envvars_info from datalad.distribution.dataset import require_dataset from datalad.support.exceptions import NoDatasetArgumentFound ds = None try: ds = require_dataset(dataset, check_installed=False, purpose='reporting') except NoDatasetArgumentFound: # failure is already logged pass if ds and not ds.is_installed(): # we don't deal with absent datasets ds = None if sensitive: if ds is None: from datalad import cfg else: cfg = ds.config else: cfg = None from pkg_resources import iter_entry_points from datalad.ui import ui from datalad.api import metadata from datalad.support.external_versions import external_versions from datalad.dochelpers import exc_str from datalad.interface.results import success_status_map import os import platform as pl import json extractors={} for ep in iter_entry_points('datalad.metadata.extractors'): try: ep.load() status = 'OK' except Exception as e: status = 'BROKEN ({})'.format(exc_str(e)) extractors[ep.name] = status # formatting helper def _t2s(t): res = [] for e in t: if isinstance(e, tuple): es = _t2s(e) if es != '': res += ['(%s)' % es] elif e != '': res += [e] return '/'.join(res) report_template = """\ DataLad ======= {datalad} System ====== {system} Locale/Encoding =============== {loc} Environment =========== {env} Externals ========= {externals} Installed extensions ==================== {extensions} Known metadata extractors ========================= {metaextractors} Configuration ============= {cfg} {dataset} """ dataset_template = """\ Dataset information =================== {basic} Metadata -------- {meta} """ ds_meta = None if not sensitive: ds_meta = _HIDDEN elif ds and ds.is_installed() and ds.id: ds_meta = metadata( dataset=ds, reporton='datasets', return_type='list', result_filter=lambda x: x['action'] == 'metadata' and success_status_map[x['status']] == 'success', result_renderer='disabled', on_failure='ignore') if ds_meta: ds_meta = [dm['metadata'] for dm in ds_meta] if len(ds_meta) == 1: ds_meta = ds_meta.pop() if cfg is not None: # make it into a dict to be able to reassign cfg = dict(cfg.items()) if sensitive != 'all' and cfg: # filter out some of the entries which known to be highly sensitive for k in cfg.keys(): if 'user' in k or 'token' in k or 'passwd' in k: cfg[k] = _HIDDEN from datalad.version import __version__, __full_version__ text = report_template.format( datalad=_format_dict([ ('Version', __version__), ('Full version', __full_version__) ], indent=True), system=_format_dict([ ('OS', ' '.join([ os.name, pl.system(), pl.release(), pl.version()]).rstrip()), ('Distribution', ' '.join([_t2s(pl.dist()), _t2s(pl.mac_ver()), _t2s(pl.win32_ver())]).rstrip()) ], indent=True), loc=_format_dict(get_encoding_info(), indent=True), # , fmt="{}={!r}"), env=_format_dict(get_envvars_info(), fmt="{}={!r}"), dataset='' if not ds else dataset_template.format( basic=_format_dict([ ('path', ds.path), ('repo', ds.repo.__class__.__name__ if ds.repo else '[NONE]'), ]), meta=_HIDDEN if not sensitive else json.dumps(ds_meta, indent=1) if ds_meta else '[no metadata]' ), externals=external_versions.dumps(preamble=None, indent='', query=True), extensions='\n'.join(ep.name for ep in iter_entry_points('datalad.extensions')), metaextractors=_format_dict(extractors), cfg=_format_dict(sorted(cfg.items(), key=lambda x: x[0])) if cfg else _HIDDEN, ) if clipboard: from datalad.support.external_versions import external_versions external_versions.check( 'pyperclip', msg="It is needed to be able to use clipboard") import pyperclip pyperclip.copy(text) ui.message("WTF information of length %s copied to clipboard" % len(text)) else: ui.message(text) yield