class CLIConfig(scfg.Config): """ Make the COCO file conform to the spec. Populates inferable information such as image size, annotation area, etc. """ epilog = """ Example Usage: kwcoco conform --help kwcoco conform --src=special:shapes8 --dst conformed.json """ default = { 'src': scfg.Value(None, help=('Path to the coco dataset'), position=1), 'ensure_imgsize': scfg.Value(True, help=ub.paragraph(''' ensure each image has height and width attributes ''')), 'pycocotools_info': scfg.Value(True, help=ub.paragraph(''' ensure information needed for pycocotools ''')), 'legacy': scfg.Value(False, help='if True tries to convert to the ' 'original ms-coco format'), 'workers': scfg.Value(8, help='number of background workers for bigger checks'), 'dst': scfg.Value(None, help=('Save the modified dataset to a new file')), }
class CLIConfig(scfg.Config): """ Create COCO toydata """ default = { 'key': scfg.Value('shapes8', help='special demodata code', position=1), 'dst': scfg.Value(None, help=ub.paragraph(''' Output path for the final kwcoco json file. Note, that even when given, a data.kwcoco.json file will also be generated in a bundle_dpath. ''')), 'bundle_dpath': scfg.Value(None, help=ub.paragraph(''' Creates a bundled dataset in the specified location. If unspecified, a bundle name is generated based on the toydata config. ''')), 'use_cache': scfg.Value(True) } epilog = """
class SedCLI(scfg.Config): name = 'sed' description = 'Search and replace text in files' default = { 'regexpr': scfg.Value('', position=1, help=ub.paragraph(''' The pattern to search for. ''')), 'repl': scfg.Value('', position=2, help=ub.paragraph(''' The pattern to replace with. ''')), 'dpath': scfg.Value(None, position=3, help=ub.paragraph(''' The directory to recursively search or a file pattern to match. ''')), 'dry': scfg.Value('ask', position=4, help=ub.paragraph(''' if 1, show what would be done. if 0, execute the change, if "ask", then show the dry run and then ask for confirmation. ''')), 'include': scfg.Value(None), 'exclude': scfg.Value(None), 'recursive': scfg.Value(True), 'verbose': scfg.Value(1), } @classmethod def main(cls, cmdline=False, **kwargs): from xdev import search_replace config = cls(cmdline=cmdline, data=kwargs) if config['dry'] in {'ask', 'auto'}: from rich.prompt import Confirm config['dry'] = True search_replace.sed(**config) flag = Confirm.ask('Do you want to execute this sed?') if flag: config['dry'] = False search_replace.sed(**config) else: search_replace.sed(**config)
class FileHashConfig(scfg.Config): """ The docstring will be the description in the CLI help """ default = { 'fpath': scfg.Value(None, position=1, help=ub.paragraph( ''' a path to a file to hash ''')), 'hasher': scfg.Value('sha1', choices=['sha1', 'sha512'], help=ub.paragraph( ''' a name of a hashlib hasher' ''')), }
def schedule_deprecation(deprecate=None, error=None, remove=None): # nocover """ Deprecation machinery to help provide users with a smoother transition """ import ubelt as ub from distutils.version import LooseVersion current = LooseVersion(ub.__version__) deprecate = None if deprecate is None else LooseVersion(deprecate) remove = None if remove is None else LooseVersion(remove) error = None if error is None else LooseVersion(error) if deprecate is None or current >= deprecate: import inspect prev_frame = inspect.currentframe().f_back # the_class = prev_frame.f_locals["self"].__class__ caller = prev_frame.f_code.co_name # the_method = prev_frame.f_code.co_name # stack = inspect.stack() # the_class = stack[1][0].f_locals["self"].__class__.__name__ # the_method = stack[1][0].f_code.co_name # caller = str(str(inspect.currentframe())).split(' ')[-1][:-1] msg = ub.paragraph( ''' The "{caller}" function was deprecated in {deprecate}, will cause an error in {error} and will be removed in {remove}. The current version is {current}. ''').format(**locals()) if remove is not None and current >= remove: raise AssertionError('forgot to remove a deprecated function') if error is not None and current >= error: raise DeprecationWarning(msg) else: warnings.warn(msg, DeprecationWarning)
class DocstrStubgenCLI(scfg.Config): name = 'doctypes' description = 'Generate Typed Stubs from Docstrings' default = { 'module': scfg.Value(None, position=1, help=ub.paragraph(''' The name of a module in the PYTHONPATH or an explicit path to that module. ''')), } @classmethod def main(cls, cmdline=False, **kwargs): from xdev.cli import docstr_stubgen import ubelt as ub config = cls(cmdline=cmdline, data=kwargs) print(f'config={config}') modname_or_path = config['module'] print(f'modname_or_path={modname_or_path}') if modname_or_path is None: raise ValueError('Must specify the module') modpath = docstr_stubgen.modpath_coerce(modname_or_path) modpath = ub.Path(modpath) generated = docstr_stubgen.generate_typed_stubs(modpath) for fpath, text in generated.items(): fpath = ub.Path(fpath) print(f'Write fpath={fpath}') fpath.write_text(text) # Generate a py.typed file to mark the package as typed if modpath.is_dir(): pytyped_fpath = (modpath / 'py.typed') print(f'touch pytyped_fpath={pytyped_fpath}') pytyped_fpath.touch()
def schedule_deprecation2(migration='', name='?', type='?', deprecate=None, error=None, remove=None): # nocover """ Deprecation machinery to help provide users with a smoother transition. New version for kwargs, todo: rectify with function version """ import ubelt as ub from distutils.version import LooseVersion current = LooseVersion(ub.__version__) deprecate = None if deprecate is None else LooseVersion(deprecate) remove = None if remove is None else LooseVersion(remove) error = None if error is None else LooseVersion(error) if deprecate is None or current >= deprecate: if migration is None: migration = '' msg = ub.paragraph( ''' The "{name}" {type} was deprecated in {deprecate}, will cause an error in {error} and will be removed in {remove}. The current version is {current}. {migration} ''').format(**locals()).strip() if remove is not None and current >= remove: raise AssertionError('forgot to remove a deprecated function') if error is not None and current >= error: raise DeprecationWarning(msg) else: # print(msg) warnings.warn(msg, DeprecationWarning)
class CLIConfig(scfg.Config): """ Compute summary statistics about a COCO dataset """ default = { 'src': scfg.Value(['special:shapes8'], nargs='+', help='path to dataset', position=1), 'basic': scfg.Value(True, help='show basic stats'), 'extended': scfg.Value(True, help='show extended stats'), 'catfreq': scfg.Value(True, help='show category frequency stats'), 'boxes': scfg.Value(False, help=ub.paragraph(''' show bounding box stats in width-height format. ''')), 'annot_attrs': scfg.Value(False, help='show annotation attribute information'), 'image_attrs': scfg.Value(False, help='show image attribute information'), 'video_attrs': scfg.Value(False, help='show video attribute information'), 'embed': scfg.Value(False, help='embed into interactive shell'), } epilog = """
def _create_test_filesystem(): dpath = ub.ensure_app_cache_dir('xdev/test_search_replace') text1 = ub.paragraph(''' Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt mollit anim id est laborum. ''') text2 = ub.codeblock(''' def fib(n): a, b = 0, 1 while a < n: print(a, end=' ') a, b = b, a+b print() fib(1000) ''') text3 = ub.codeblock(''' This file contains Lorem and fib Newlines fib lorem fib ''') text4 = '' fpath1 = join(dpath, 'lorium.txt') fpath2 = join(dpath, 'fib.py') fpath3 = join(dpath, 'foo.txt') fpath4 = join(ub.ensuredir((dpath, 'subdir')), 'foo.txt') with open(fpath1, 'w') as file: file.write(text1) with open(fpath2, 'w') as file: file.write(text2) with open(fpath3, 'w') as file: file.write(text3) with open(fpath4, 'w') as file: file.write(text4) info = { 'root': dpath, 'contents': [fpath1, fpath2, fpath3], } return info
class CLIConfig(scfg.Config): """ Validate that a coco file conforms to the json schema, that assets exist, and potentially fix corrupted assets by removing them. """ default = { 'src': scfg.Value(['special:shapes8'], nargs='+', help='path to datasets', position=1), 'schema': scfg.Value(True, help='If True check the json schema'), 'missing': scfg.Value(True, help='If True check if all assets (e.g. images) exist'), 'corrupted': scfg.Value(False, help='If True check the assets can be read'), 'fix': scfg.Value(None, help=ub.paragraph(''' Code indicating strategy to attempt to fix the dataset. If None, do nothing. If remove, removes missing / corrupted images. Other strategies may be added in the future. This is a hueristic and does not always work. dst must be specified. And only one src dataset can be given. ''')), 'dst': scfg.Value(None, help=ub.paragraph(''' Location to write a "fixed" coco file if a fix strategy is given. ''')) } epilog = """
class CLIConfig(scfg.Config): """ Visualize a COCO image using matplotlib or opencv, optionally writing it to disk """ epilog = """ Example Usage: kwcoco show --help kwcoco show --src=special:shapes8 --gid=1 kwcoco show --src=special:shapes8 --gid=1 --dst out.png """ default = { 'src': scfg.Value(None, help=('Path to the coco dataset'), position=1), 'gid': scfg.Value( None, help=( 'Image id to show, if unspecified the first image is shown' )), 'aid': scfg.Value( None, help=('Annotation id to show, mutually exclusive with gid')), 'dst': scfg.Value( None, help=('Save the image to the specified file. ' 'If unspecified, the image is shown with pyplot')), 'mode': scfg.Value('matplotlib', choices=['matplotlib', 'opencv'], help='method used to draw the image'), 'channels': scfg.Value(None, type=str, help=ub.paragraph(''' By default uses the default channels (usually this is rgb), otherwise specify the name of an auxiliary channels ''')), 'show_annots': scfg.Value(True, help=('Overlay annotations on dispaly')), }
def print_facts(): """ Print facts with rich """ from rich.panel import Panel from rich.console import Console fact_data = load_facts() console = Console() for fact in fact_data['facts']: text = ub.codeblock(''' {} References: {} ''').format( ub.paragraph(fact['text']), ub.indent(fact['references']), ) fact_panel = Panel(text, title='FACT') console.print(fact_panel)
def convert_argparse(parser): """ Helper for converting an existing argparse object to scriptconfig definition. """ import argparse import ubelt as ub value_template1 = '{dest!r}: scfg.Value({default!r}, help={help!r})' value_template2 = '{dest!r}: scfg.Value({default!r})' lines = [] for action in parser._actions: if action.default == argparse.SUPPRESS: continue if action.help is None: value_text = value_template2.format( dest=action.dest, default=action.default, ) else: value_text = value_template1.format(dest=action.dest, default=action.default, help=ub.paragraph(action.help)) lines.append(value_text + ',') class_template = ub.codeblock(''' import scriptconfig as scfg class MyConfig(scfg.Config): """{desc}""" default = {{ {body} }} ''') body = ub.indent('\n'.join(lines), ' ' * 8) text = class_template.format(body=body, desc=parser.description) print(text)
def render_facts(): """ Render facts to a latex document """ import pylatex from pylatex.base_classes.command import Options # NOQA import pyqrcode fact_data = load_facts() class MDFramed(pylatex.base_classes.Environment): _latex_name = 'mdframed' packages = [pylatex.Package('mdframed')] class SamePage(pylatex.base_classes.Environment): _latex_name = 'samepage' class ComposeContexts: def __init__(self, *contexts): self.contexts = contexts def __enter__(self): return [c.__enter__() for c in self.contexts] def __exit__(self, a, b, c): return [c.__exit__(a, b, c) for c in self.contexts[::-1]] # class NewUnicodeChar(pylatex.base_classes.CommandBase): # pass # Dont use fontenc, lmodern, or textcomp # https://tex.stackexchange.com/questions/179778/xelatex-under-ubuntu doc = pylatex.Document('fact_document', inputenc=None, page_numbers=False, indent=False, fontenc=None, lmodern=False, textcomp=False) doc.preamble.append(pylatex.Package('graphicx')) # For PNG images # doc.preamble.append(pylatex.Package('svg', options=dict(inkscapearea='page'))) # doc.preamble.append(pylatex.Command('title', 'Facts')) # doc.preamble.append(pylatex.Command('author', 'Anonymous author')) # doc.preamble.append(pylatex.Command('date', pylatex.NoEscape(r'\today'))) # doc.append(pylatex.NoEscape(r'\maketitle')) # doc.preamble.append(pylatex.Package('newunicodechar')) # doc.preamble.append(pylatex.NoEscape(r'\newunicodechar{±}{$\pm$}')) # doc.append(pylatex.NoEscape('13.787±0.020')) # print(doc.dumps()) # doc.generate_pdf(clean_tex=False, compiler='xelatex') # return QR_REFERENCE = True stop_flag = 0 image_dpath = ub.Path('~/misc/facts/images').expand().ensuredir() # image_dpath = for fact in ub.ProgIter(fact_data['facts']): contexts = ComposeContexts( # doc.create(SamePage()), doc.create(MDFramed()), doc.create(pylatex.MiniPage(width=r'0.99\textwidth'))) # with doc.create(pylatex.MiniPage(width=r'\textwidth')): with contexts: doc.append(pylatex.NoEscape(r'\paragraph{Fact:}')) text = ub.paragraph(fact['text']) if r'\[' in text: found = list( re.finditer( '(' + re.escape(r'\[') + '|' + re.escape(r'\]') + ')', text)) prev_x = 0 for a, b in ub.iter_window(found, step=2): part = text[prev_x:a.span()[0]] doc.append(part) ax, bx = a.span()[1], b.span()[0] part = pylatex.NoEscape(r'$' + text[ax:bx] + r'$ ') doc.append(part) prev_x = b.span()[1] part = text[prev_x:] doc.append(part) else: # if '$' in text: # parts = text.split('$') # for idx, p in enumerate(parts): # if idx % 2 == 1: # doc.append(pylatex.NoEscape('$' + p + '$ ')) # else: # doc.append(p) # else: doc.append(text) if QR_REFERENCE: doc.append('\n') num_refs = 0 for refline in fact['references'].split('\n'): if refline.startswith('http'): found = refline image_fname = ub.hash_data(found, base='abc')[0:16] + '.png' image_fpath = image_dpath / image_fname if not image_fpath.exists(): # pyqrcode.create(found).svg(fpath, scale=6) pyqrcode.create(found).png(str(image_fpath), scale=2) doc.append( pylatex.NoEscape(r'\includegraphics[width=90px]{' + str(image_fpath) + '}')) # doc.append(pylatex.NoEscape(r'\includesvg[width=120px]{' + fpath + '}')) num_refs += 1 if num_refs > 3: break else: doc.append(pylatex.NoEscape(r'\paragraph{References:}')) with doc.create(pylatex.Itemize()) as itemize: for refline in fact['references'].split('\n'): if refline: refline = refline.strip() itemize.add_item(refline) doc.append(pylatex.NoEscape(r'\bigskip')) if stop_flag: break print(doc.dumps()) print('generate pdf') doc.generate_pdf(str(ub.Path('~/misc/facts/fact_document').expand()), clean_tex=True)
def main(): # TODO: find a better place for root ROOT = join(os.getcwd()) # ROOT = '.' os.chdir(ROOT) NAME = 'pyhesaff' VERSION = '0.1.2' DOCKER_TAG = '{}-{}'.format(NAME, VERSION) QUAY_REPO = 'quay.io/erotemic/manylinux-for' DOCKER_URI = '{QUAY_REPO}:{DOCKER_TAG}'.format(**locals()) dockerfile_fpath = join(ROOT, 'Dockerfile') # This docker code is very specific for building linux binaries. # We will need to do a bit of refactoring to handle OSX and windows. # But the goal is to get at least one OS working end-to-end. """ Notes: docker run --rm -it quay.io/pypa/manylinux2010_x86_64 /bin/bash --- ls /opt/python """ BASE_IMAGE = 'quay.io/pypa/manylinux2010_x86_64' docker_code = ub.codeblock(f''' FROM {BASE_IMAGE} RUN yum install lz4-devel -y RUN MB_PYTHON_TAG=cp27-cp27m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp27-cp27mu && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp35-cp35m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp36-cp36m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp37-cp37m && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja RUN MB_PYTHON_TAG=cp38-cp38 && \ /opt/python/$MB_PYTHON_TAG/bin/python -m pip install setuptools pip virtualenv -U && \ /opt/python/$MB_PYTHON_TAG/bin/python -m virtualenv ./venv-$MB_PYTHON_TAG && \ source ./venv-$MB_PYTHON_TAG/bin/activate && \ pip install scikit-build cmake ninja ''') docker_code2 = '\n\n'.join( [ub.paragraph(p) for p in docker_code.split('\n\n')]) try: print(ub.color_text('\n--- DOCKER CODE ---', 'white')) print(ub.highlight_code(docker_code2, 'docker')) print(ub.color_text('--- END DOCKER CODE ---\n', 'white')) except Exception: pass with open(dockerfile_fpath, 'w') as file: file.write(docker_code2) docker_build_cli = ' '.join([ 'docker', 'build', '--tag {}'.format(DOCKER_TAG), '-f {}'.format(dockerfile_fpath), '.', ]) print('docker_build_cli = {!r}'.format(docker_build_cli)) if ub.argflag('--dry'): print('DRY RUN') print('WOULD RUN') print(docker_build_cli) else: info = ub.cmd(docker_build_cli, verbose=3, shell=True) if info['ret'] != 0: print(ub.color_text('\n--- FAILURE ---', 'red')) print('Failed command:') print(info['command']) print(info['err']) print('NOTE: sometimes reruning the command manually works') raise Exception('Building docker failed with exit code {}'.format( info['ret'])) else: print(ub.color_text('\n--- SUCCESS ---', 'green')) print( ub.highlight_code( ub.codeblock(r''' # Finished creating the docker image. # To test / export / publish you can do something like this: # Test that we can get a bash terminal docker run -it {DOCKER_TAG} /bin/bash # Create a tag for the docker image docker tag {DOCKER_TAG} {DOCKER_URI} # Export your docker image to a file docker save -o ${ROOT}/{DOCKER_TAG}.docker.tar {DOCKER_TAG} # Login to a docker registry (we are using quay) # In some cases this works, docker login # But you may need to specify secret credentials load_secrets echo "QUAY_USERNAME = $QUAY_USERNAME" docker login -u $QUAY_USERNAME -p $QUAY_PASSWORD quay.io unload_secrets # Upload the docker image to quay.io docker push {DOCKER_URI} ''').format( NAME=NAME, ROOT=ROOT, DOCKER_TAG=DOCKER_TAG, DOCKER_URI=DOCKER_URI, ), 'bash', )) PUBLISH = 0 if PUBLISH: cmd1 = 'docker tag {DOCKER_TAG} {DOCKER_URI}'.format(**locals()) cmd2 = 'docker push {DOCKER_URI}'.format(**locals()) print('-- <push cmds> ---') print(cmd1) print(cmd2) print('-- </push cmds> ---')
def git_sync(host, remote=None, message='wip', forward_ssh_agent=False, dry=False, force=False): """ Commit any changes in the current working directory, ssh into a remote machine, and then pull those changes. Args: host (str): The name of the host to sync to: e.g. [email protected] remote (str): The git remote used to push and pull from message (str, default='wip'): Default git commit message. forward_ssh_agent (bool): Enable forwarding of the ssh authentication agent connection force (bool, default=False): if True does a forced push and additionally forces the remote to do a hard reset to the remote state. dry (bool, default=False): Executes dry run mode. Example: >>> host = '*****@*****.**' >>> remote = 'origin' >>> message = 'this is the commit message' >>> git_sync(host, remote, message, dry=True) git commit -am "this is the commit message" git push origin ssh [email protected] "cd ... && git pull origin" """ cwd = _getcwd() relcwd = relpath(cwd, expanduser('~')) """ # How to check if a branch exists git branch --list ${branch} # Get current branch name if [[ "$(git rev-parse --abbrev-ref HEAD)" != "{branch}" ]]; then git checkout {branch} ; fi # git rev-parse --abbrev-ref HEAD if [[ -z $(git branch --list ${branch}) ]]; then else fi """ # $(git branch --list ${branch}) # Assume the remote directory is the same as the local one (relative to home) remote_cwd = relcwd # Build one command to execute on the remote remote_parts = [ 'cd {remote_cwd}', ] # Get branch name from the local local_branch_name = ub.cmd( 'git rev-parse --abbrev-ref HEAD')['out'].strip() # Assume the branches are the same between local / remote remote_branch_name = local_branch_name if force: if remote is None: # FIXME: might not work in all cases remote = git_default_push_remote_name() # Force the remote to the state of the remote remote_checkout_branch_force = ub.paragraph(''' git fetch {remote}; if [[ "$(git rev-parse --abbrev-ref HEAD)" != "{branch}" ]]; then git checkout {branch}; fi; git reset {remote}/{branch} --hard ''').format(remote=remote, branch=remote_branch_name) remote_parts += [ 'git fetch {remote}', remote_checkout_branch_force.replace('"', r'\"'), ] else: # ensure the remote is on the right branch # (this assumes no conflicts and will fail if anything bad # might happen) remote_checkout_branch_simple = ub.paragraph(r''' if [[ "$(git rev-parse --abbrev-ref HEAD)" != "{branch}" ]]; then git checkout {branch}; fi ''').format(branch=local_branch_name) remote_parts += [ 'git pull {remote}' if remote else 'git pull', remote_checkout_branch_simple.replace('"', r'\"'), ] remote_part = ' && '.join(remote_parts) # Build one comand to execute locally commit_command = 'git commit -am "{}"'.format(message) push_args = ['git push'] if remote: push_args.append('{remote}') if force: push_args.append('--force') push_command = ' '.join(push_args) sync_command = 'ssh {ssh_flags} {host} "' + remote_part + '"' local_parts = [ commit_command, push_command, sync_command, ] ssh_flags = [] if forward_ssh_agent: ssh_flags += ['-A'] ssh_flags = ' '.join(ssh_flags) kw = dict(host=host, remote_cwd=remote_cwd, remote=remote, ssh_flags=ssh_flags) for part in local_parts: command = part.format(**kw) if not dry: result = ub.cmd(command, verbose=2) retcode = result['ret'] if command.startswith('git commit') and retcode == 1: pass elif retcode != 0: print('git-sync cannot continue. retcode={}'.format(retcode)) break else: print(command)
'population', 'gdp', 'primary_energy_consumption', 'consumption_co2', ] # header_info_fpath = ub.grabdata('https://github.com/owid/co2-data/raw/master/owid-co2-codebook.csv') from datetime import timedelta header_info_fpath = ub.grabdata('https://github.com/owid/co2-data/blob/master/owid-co2-codebook.csv', expires=timedelta(days=30)) # header_info_fpath = ub.grabdata('https://nyc3.digitaloceanspaces.com/owid-public/data/co2/owid-co2-data.csv') header_info = pd.read_csv(header_info_fpath).set_index('column').drop('source', axis=1) column_descriptions = header_info.loc[columns_of_interest] key_to_description = {} for key, row in column_descriptions.iterrows(): key_to_description[key] = ub.paragraph(row['description']).split('. ') print('key_to_description = {}'.format(ub.repr2(key_to_description, nl=2))) print(column_descriptions.to_string()) # Download carbon emission dataset # https://github.com/owid/co2-data # https://github.com/owid/co2-data/blob/master/owid-co2-codebook.csv # Annual production-based emissions of carbon dioxide (CO2), measured in # million tonnes. This is based on territorial emissions, which do not # account for emissions embedded in traded goods. # owid_co2_data_fpath = ub.grabdata('https://github.com/owid/co2-data/raw/master/owid-co2-data.json') owid_co2_data_fpath = ub.grabdata('https://nyc3.digitaloceanspaces.com/owid-public/data/co2/owid-co2-data.json', expires=timedelta(days=30)) with open(owid_co2_data_fpath, 'r') as file: co2_data = json.load(file) us_co2_data = co2_data['United States']['data']
class TemplateConfig(scfg.Config): default = { 'repodir': scfg.Value(None, help='path to the new or existing repo', required=True), 'repo_name': scfg.Value(None, help='repo name'), 'setup_secrets': scfg.Value(False), 'tags': scfg.Value([], nargs='*', help=ub.paragraph(''' Tags modify what parts of the template are used. Valid tags are: "binpy" - do we build binpy wheels? "graphics" - do we need opencv / opencv-headless? "erotemic" - this is an erotemic repo "kitware" - this is an kitware repo "pyutils" - this is an pyutils repo "purepy" - this is a pure python repo ''')), } def normalize(self): if self['tags']: if isinstance(self['tags'], str): self['tags'] = [self['tags']] new = [] for t in self['tags']: new.extend([p.strip() for p in t.split(',')]) self['tags'] = new @classmethod def main(cls, cmdline=0, **kwargs): """ Ignore: repodir = ub.Path('~/code/pyflann_ibeis').expand() kwargs = { 'repodir': repodir, 'tags': ['binpy', 'erotemic', 'github'], } cmdline = 0 Example: repodir = ub.Path.appdir('pypkg/demo/my_new_repo') import sys, ubelt sys.path.append(ubelt.expandpath('~/misc/templates/PYPKG')) from apply_template import * # NOQA kwargs = { 'repodir': repodir, } cmdline = 0 """ import ubelt as ub config = TemplateConfig(cmdline=cmdline, data=kwargs) repo_dpath = ub.Path(config['repodir']) repo_dpath.ensuredir() IS_NEW_REPO = 0 create_new_repo_info = ub.codeblock(''' # TODO: # At least instructions on how to create a new repo, or maybe an # API call https://github.com/new git init ''') print(create_new_repo_info) if IS_NEW_REPO: # TODO: git init # TODO: github or gitlab register pass self = TemplateApplier(config) self.setup().gather_tasks() self.setup().apply() if config['setup_secrets']: setup_secrets_fpath = self.repo_dpath / 'dev/setup_secrets.sh' if 'erotemic' in self.config['tags']: environ_export = 'setup_package_environs_github_erotemic' upload_secret_cmd = 'upload_github_secrets' elif 'pyutils' in self.config['tags']: environ_export = 'setup_package_environs_github_pyutils' upload_secret_cmd = 'upload_github_secrets' elif 'kitware' in self.config['tags']: environ_export = 'setup_package_environs_gitlab_kitware' upload_secret_cmd = 'upload_gitlab_repo_secrets' else: raise Exception import cmd_queue script = cmd_queue.Queue.create() script.submit( ub.codeblock(f''' cd {self.repo_dpath} source {setup_secrets_fpath} {environ_export} load_secrets export_encrypted_code_signing_keys git commit -am "Updated secrets" {upload_secret_cmd} ''')) script.rprint()
def ensure(repo, dry=False): """ Ensure that the repo is checked out on your local machine, that the correct branch is checked out, and the upstreams are targeting the correct remotes. """ if repo.verbose > 0: if dry: repo.debug(ub.color_text('Checking {}'.format(repo), 'blue')) else: repo.debug(ub.color_text('Ensuring {}'.format(repo), 'blue')) if not exists(repo.dpath): repo.debug('NEED TO CLONE {}: {}'.format(repo, repo.url)) if dry: return repo.ensure_clone() repo._assert_clean() # Ensure we have the right remote remote = repo._registered_remote(dry=dry) if remote is not None: try: if not remote.exists(): raise IndexError else: repo.debug( 'The requested remote={} name exists'.format(remote)) except IndexError: repo.debug('WARNING: remote={} does not exist'.format(remote)) else: if remote.exists(): repo.debug('Requested remote does exists') remote_branchnames = [ ref.remote_head for ref in remote.refs ] if repo.branch not in remote_branchnames: repo.info( 'Branch name not found in local remote. Attempting to fetch' ) if dry: repo.info('dry run, not fetching') else: repo._cmd('git fetch {}'.format(remote.name)) repo.info('Fetch was successful') else: repo.debug('Requested remote does NOT exist') # Ensure the remote points to the right place if repo.url not in list(remote.urls): repo.debug( ub.paragraph(''' 'WARNING: The requested url={} disagrees with remote urls={} ''').format(repo.url, list(remote.urls))) if dry: repo.info('Dry run, not updating remote url') else: repo.info('Updating remote url') repo._cmd('git remote set-url {} {}'.format( repo.remote, repo.url)) # Ensure we are on the right branch try: active_branch_name = repo.pygit.active_branch.name except TypeError: # We may be on a tag, not a branch candidates = [ tag for tag in repo.pygit.tags if tag.name == repo.branch ] if len(candidates) != 1: raise else: # branch is actually a tag assert len(candidates) == 1 want_tag = candidates[0] is_on_correct_commit = (repo.pygit.head.commit.hexsha == want_tag.commit.hexsha) ref_is_tag = True else: ref_is_tag = False tracking_branch = repo.pygit.active_branch.tracking_branch() is_on_correct_commit = repo.branch == active_branch_name if not is_on_correct_commit: repo.debug('NEED TO SET BRANCH TO {} for {}'.format( repo.branch, repo)) if dry: repo.info('Dry run, not setting branch') else: try: repo._cmd('git checkout {}'.format(repo.branch)) except ShellException: repo.debug( 'Checkout failed. Branch name might be ambiguous. Trying again' ) try: repo._cmd('git fetch {}'.format(remote.name)) repo._cmd('git checkout -b {} {}/{}'.format( repo.branch, repo.remote, repo.branch)) except ShellException: raise Exception( 'does the branch exist on the remote?') if not ref_is_tag: if tracking_branch is None or tracking_branch.remote_name != repo.remote: repo.debug('NEED TO SET UPSTREAM FOR FOR {}'.format(repo)) try: remote = repo.pygit.remotes[repo.remote] if not remote.exists(): raise IndexError except IndexError: repo.debug( 'WARNING: remote={} does not exist'.format(remote)) else: if remote.exists(): remote_branchnames = [ ref.remote_head for ref in remote.refs ] if repo.branch not in remote_branchnames: if dry: repo.info( 'Branch name not found in local remote. Dry run, use ensure to attempt to fetch' ) else: repo.info( 'Branch name not found in local remote. Attempting to fetch' ) repo._cmd('git fetch {}'.format( repo.remote)) remote_branchnames = [ ref.remote_head for ref in remote.refs ] if repo.branch not in remote_branchnames: raise Exception( 'Branch name still does not exist') if not dry: repo._cmd( 'git branch --set-upstream-to={remote}/{branch} {branch}' .format(remote=repo.remote, branch=repo.branch)) else: repo.info('Would attempt to set upstream') # Check if the current head is tagged head_tags = [ tag for tag in repo.pygit.tags if tag.commit.hexsha == repo.pygit.head.commit.hexsha ] # Print some status try: repo.debug(' * branch = {} -> {}'.format( repo.pygit.active_branch.name, repo.pygit.active_branch.tracking_branch(), )) except Exception: pass if head_tags: repo.debug(' * head_tags = {}'.format(head_tags))
def _run_consistency_test(sensor, sensor_alias, start_date, end_date, geojson_geom, expected_urls, expected_dates ): from fels.utils import FELS_DEFAULT_OUTPUTDIR outputcatalogs = FELS_DEFAULT_OUTPUTDIR start_date_iso = start_date.isoformat() end_date_iso = end_date.isoformat() wkt_geometry = geometry.shape(geojson_geom).wkt geojson_geom_text = json.dumps(geojson_geom) cloudcover = 30 # Test python invocation python_result1 = fels.run_fels( None, sensor, start_date_iso, end_date_iso, cloudcover=cloudcover, outputcatalogs=outputcatalogs, output='.', geometry=wkt_geometry, latest=True, list=True) # python with friendly aliases python_result2 = fels.run_fels( None, sensor_alias, start_date, end_date, cloudcover=cloudcover, outputcatalogs=outputcatalogs, output='.', geometry=geojson_geom, latest=True, list=True) python_dates_result = fels.run_fels( None, sensor_alias, start_date, end_date, cloudcover=cloudcover, outputcatalogs=outputcatalogs, output='.', geometry=geojson_geom, latest=True, dates=True, list=True) assert python_dates_result == expected_dates assert len(python_result1) == len(expected_urls), ( 'we expect {} results'.format(len(expected_urls))) assert python_result1 == python_result2 fmtdict = dict( sensor=sensor, sensor_alias=sensor_alias, start_date_iso=start_date_iso, end_date_iso=end_date_iso, outputcatalogs=outputcatalogs, cloudcover=cloudcover) # Test CLI invocation fmtdict1 = fmtdict.copy() fmtdict1['geometry'] = wkt_geometry cli_result1 = ub.cmd(ub.paragraph( ''' fels {sensor} {start_date_iso} {end_date_iso} -c {cloudcover} -o . -g '{geometry}' --latest --list --outputcatalogs {outputcatalogs} ''').format(**fmtdict1), verbose=3) # The last lines of the CLI output should be our expected results results = cli_result1['out'].strip().split('\n')[-(len(expected_urls) + 1):] assert not results[0].startswith('http') assert results[1:] == expected_urls fmtdict2 = fmtdict.copy() fmtdict2['geometry'] = geojson_geom_text cli_result2 = ub.cmd(ub.paragraph( ''' fels {sensor_alias} {start_date_iso} {end_date_iso} -c {cloudcover} -o . -g '{geometry}' --latest --list --outputcatalogs {outputcatalogs} ''').format(**fmtdict2), verbose=3) # The last lines of the CLI output should be our expected results results = cli_result2['out'].strip().split('\n')[-(len(expected_urls) + 1):] assert not results[0].startswith('http') assert results[1:] == expected_urls
def _rectify_class(arg, kw, lookup=None): """ Helps normalize and serialize hyperparameter inputs. Args: arg (Tuple[type, dict] | type | object): Either a (cls, initkw) tuple, a class, or an instance. It is recommended that you don't pass an instance. kw (Dict[str, object]): augments initkw if arg is in tuple form otherwise becomes initkw lookup (func | None): transforms arg or arg[0] into the class type Returns: Dict: containing 'cls' (type): the type of the object 'cls_kw' (Dict): the initialization keyword args 'instance': (object): None or the actual instanciated object We will use this cls and cls_kw to construct an instance unless one is already specified. Example: >>> # The ideal case is that we have a cls, initkw tuple >>> import netharn as nh >>> kw = {'lr': 0.1} >>> cls = torch.optim.SGD >>> rectified1 = _rectify_class(cls, kw.copy()) >>> print('rectified1 = {!r}'.format(rectified1)) >>> # But we can also take an instance of the object, however, you must >>> # now make sure to specify the _initkw attribute. >>> model = nh.models.ToyNet2d() >>> self = cls(model.parameters(), **kw) >>> self._initkw = kw >>> rectified2 = _rectify_class(self, {}) >>> print('rectified2 = {!r}'.format(rectified2)) """ if lookup is None: lookup = ub.identity if arg is None: cls = None cls_kw = {} instance = None else: instance = None # Extract the part that identifies the class we care about if isinstance(arg, tuple): cls_key = arg[0] kw2 = arg[1] else: cls_key = arg kw2 = {} cls = lookup(cls_key) if not isinstance(cls, type): # Rectified an instance to an instance cls = cls.__class__ instance = None if isinstance(cls_key, cls): # We were passed an actual instance of the class. (for shame) instance = cls_key cls_kw = util_inspect.default_kwargs(cls).copy() if instance is not None: # Try and introspect the initkw, which is needed for model # deployment and proper hyperparam tracking for key in cls_kw: if hasattr(instance, key): cls_kw[key] = getattr(instance, key) if hasattr(instance, '_initkw'): # Special attribute that allows safe instance specification and # supresses the instance warning. cls_kw.update(instance._initkw) else: import warnings warnings.warn( ub.paragraph( # _initkw warning ''' netharn.HyperParams objects are expected to be specified as (type, kw) tuples, but we received a preconstructed instance. This is only ok if you know what you are doing. To disable this warning set the _initkw instance attribute to the correct keyword arguments needed to reconstruct this class. Offending data is arg={!r}, kw={!r} ''').format(arg, kw)) # Update with explicitly specified information cls_kw.update(kw2) for key in cls_kw: if key in kw: cls_kw[key] = kw.pop(key) cls_kw = util_json.ensure_json_serializable(cls_kw) rectified = { 'cls': cls, 'cls_kw': cls_kw, 'instance': instance, } return rectified
'id': INTEGER, 'name': STRING, 'caption': STRING, }, required=['id', 'name'], title='VIDEO' ) CHANNELS = STRING(title='CHANNEL_SPEC', description='experimental. todo: refine') IMAGE = OBJECT(OrderedDict(( ('id', INTEGER), ('file_name', PATH(description=ub.paragraph( ''' A relative or absolute path to the main image file. If this file_name is unspecified, then a name and auxiliary file paths must be specified. This should only be unspecified for multispectral observations that dont have a clear default file. ''')) | NULL), ('name', STRING(description=ub.paragraph( ''' Unique name for the image. If unspecified the file_name should be used as the default value for the name property. ''')) | NULL), ('width', INTEGER), ('height', INTEGER), # Extension
def soft_fill(self, image, coord_axes=None, radius=5): """ Used for drawing keypoint truth in heatmaps Args: coord_axes (Tuple): specify which image axes each coordinate dim corresponds to. For 2D images, if you are storing r/c data, set to [0,1], if you are storing x/y data, set to [1,0]. In other words the i-th entry in coord_axes specifies which row-major spatial dimension the i-th column of a coordinate corresponds to. The index is the coordinate dimension and the value is the axes dimension. Returns: ndarray: image with coordinates rasterized on it References: https://stackoverflow.com/questions/54726703/generating-keypoint-heatmaps-in-tensorflow Example: >>> from kwimage.structs.coords import * # NOQA >>> s = 64 >>> self = Coords.random(10, meta={'shape': (s, s)}).scale(s) >>> # Put points on edges to to verify "edge cases" >>> self.data[1] = [0, 0] # top left >>> self.data[2] = [s, s] # bottom right >>> self.data[3] = [0, s + 10] # bottom left >>> self.data[4] = [-3, s // 2] # middle left >>> self.data[5] = [s + 1, -1] # top right >>> # Put points in the middle to verify overlap blending >>> self.data[6] = [32.5, 32.5] # middle >>> self.data[7] = [34.5, 34.5] # middle >>> fill_value = 1 >>> coord_axes = [1, 0] >>> radius = 10 >>> image1 = np.zeros((s, s)) >>> self.soft_fill(image1, coord_axes=coord_axes, radius=radius) >>> radius = 3.0 >>> image2 = np.zeros((s, s)) >>> self.soft_fill(image2, coord_axes=coord_axes, radius=radius) >>> # xdoc: +REQUIRES(--show) >>> # xdoc: +REQUIRES(module:kwplot) >>> import kwplot >>> kwplot.autompl() >>> kwplot.imshow(image1, pnum=(1, 2, 1)) >>> kwplot.imshow(image2, pnum=(1, 2, 2)) """ import scipy.stats if radius <= 0: raise ValueError('radius must be positive') # OH! How I HATE the squeeze function! SCIPY_STILL_USING_SQUEEZE_FUNC = True blend_mode = 'maximum' image_ndims = len(image.shape) for pt in self.data: # Find a grid of coordinates on the image to fill for this point low = np.floor(pt - radius).astype(int) high = np.ceil(pt + radius).astype(int) grid = np.dstack(np.mgrid[tuple( slice(s, t) for s, t in zip(low, high))]) # Flatten the grid into a list of coordinates to be filled rows_of_coords = grid.reshape(-1, grid.shape[-1]) # Remove grid coordinates that are out of bounds lower_bound = np.array([0, 0]) upper_bound = np.array([image.shape[i] for i in coord_axes])[None, :] in_bounds_flags1 = (rows_of_coords >= lower_bound).all(axis=1) rows_of_coords = rows_of_coords[in_bounds_flags1] in_bounds_flags2 = (rows_of_coords < upper_bound).all(axis=1) rows_of_coords = rows_of_coords[in_bounds_flags2] if len(rows_of_coords) > 0: # Create a index into the image and insert the columns of # coordinates to fill into the appropirate dimensions img_index = [slice(None)] * image_ndims for axes_idx, coord_col in zip(coord_axes, rows_of_coords.T): img_index[axes_idx] = coord_col img_index = tuple(img_index) # Note: Do we just use kwimage.gaussian_patch for the 2D case # instead? # TODO: is there a better method for making a "brush stroke"? # cov = 0.3 * ((extent - 1) * 0.5 - 1) + 0.8 cov = radius rv = scipy.stats.multivariate_normal(mean=pt, cov=cov) new_values = rv.pdf(rows_of_coords) # the mean will be the maximum values of the normal # distribution, normalize by that. max_val = float(rv.pdf(pt)) if SCIPY_STILL_USING_SQUEEZE_FUNC: # If multivariate_normal was implemented right we would not # need to check for scalar values # See: https://github.com/scipy/scipy/issues/7689 if len(rows_of_coords) == 1: if len(new_values.shape) != 0: import warnings warnings.warn( ub.paragraph(''' Scipy fixed the bug in multivariate_normal! We can remove this stupid hack! ''')) else: # Ensure new_values is always a list of scalars new_values = new_values[None] new_values = new_values / max_val # Blend the sampled values onto the existing pixels prev_values = image[img_index] # HACK: wont generalize? if len(prev_values.shape) != len(new_values.shape): new_values = new_values[:, None] if blend_mode == 'maximum': blended = np.maximum(prev_values, new_values) else: raise KeyError(blend_mode) # Draw the blended pixels inplace image[img_index] = blended return image
def _lookup_deprecated_attribute(key): import ubelt as ub # mapping from module name to the attributes that were moved there. refactored = { 'kwarray': [ 'ArrayAPI', 'DataFrameArray', 'DataFrameLight', 'FlatIndexer', 'LocLight', 'RunningStats', 'apply_grouping', 'arglexmax', 'argmaxima', 'argminima', 'atleast_nd', 'boolmask', 'ensure_rng', 'group_consecutive', 'group_consecutive_indices', 'group_indices', 'group_items', 'isect_flags', 'iter_reduce_ufunc', 'maxvalue_assignment', 'mincost_assignment', 'mindist_assignment', 'one_hot_embedding', 'one_hot_lookup', 'random_combinations', 'random_product', 'seed_global', 'setcover', 'shuffle', 'standard_normal', 'standard_normal32', 'standard_normal64', 'stats_dict', 'uniform', 'uniform32' ], 'kwimage': [ 'BASE_COLORS', 'Boxes', 'CSS4_COLORS', 'Color', 'Coords', 'Detections', 'Heatmap', 'Mask', 'MaskList', 'MultiPolygon', 'Points', 'PointsList', 'Polygon', 'PolygonList', 'Segmentation', 'SegmentationList', 'TABLEAU_COLORS', 'TORCH_GRID_SAMPLE_HAS_ALIGN', 'XKCD_COLORS', 'add_homog', 'atleast_3channels', 'available_nms_impls', 'convert_colorspace', 'daq_spatial_nms', 'decode_run_length', 'draw_boxes_on_image', 'draw_clf_on_image', 'draw_line_segments_on_image', 'draw_text_on_image', 'draw_vector_field', 'encode_run_length', 'ensure_alpha_channel', 'ensure_float01', 'ensure_uint255', 'fourier_mask', 'gaussian_patch', 'grab_test_image', 'grab_test_image_fpath', 'imread', 'imresize', 'imscale', 'imwrite', 'load_image_shape', 'make_channels_comparable', 'make_heatmask', 'make_orimask', 'make_vector_field', 'non_max_supression', 'normalize', 'num_channels', 'overlay_alpha_images', 'overlay_alpha_layers', 'radial_fourier_mask', 'remove_homog', 'rle_translate', 'smooth_prob', 'stack_images', 'stack_images_grid', 'subpixel_accum', 'subpixel_align', 'subpixel_getvalue', 'subpixel_maximum', 'subpixel_minimum', 'subpixel_set', 'subpixel_setvalue', 'subpixel_slice', 'subpixel_translate', 'warp_image', 'warp_points', 'warp_tensor', ], 'kwplot': [ 'BackendContext', 'Color', 'PlotNums', 'autompl', 'autoplt', 'distinct_colors', 'distinct_markers', 'draw_boxes', 'draw_boxes_on_image', 'draw_clf_on_image', 'draw_line_segments', 'draw_points', 'draw_text_on_image', 'ensure_fnum', 'figure', 'imshow', 'legend', 'make_conv_images', 'make_heatmask', 'make_legend_img', 'make_orimask', 'make_vector_field', 'multi_plot', 'next_fnum', 'plot_convolutional_features', 'plot_matrix', 'plot_surface3d', 'set_figtitle', 'set_mpl_backend', 'show_if_requested', ], 'ubelt': [ 'CacheStamp', ] } ERROR_ON_ACCESS = True for modname, attrs in refactored.items(): if key in attrs: text = ub.paragraph(''' The attribute `netharn.util.{key}` is deprecated. It was refactored and moved to `{modname}.{key}`. ''').format(key=key, modname=modname) if ERROR_ON_ACCESS: raise AttributeError(text) else: module = ub.import_module_from_name(modname) import warnings warnings.warn(text) return getattr(module, key) if key in ['SlidingIndexDataset', 'SlidingSlices']: raise AttributeError('Deprecated {}, but still available in ' 'netharn.util.util_slider_dep'.format(key)) raise AttributeError(key)
def autompl(verbose=0, recheck=False, force=None): """ Uses platform heuristics to automatically set the matplotlib backend. If no display is available it will be set to `agg`, otherwise we will try to use the cross-platform `Qt5Agg` backend. Args: verbose (int, default=0): verbosity level recheck (bool, default=False): if False, this function will not run if it has already been called (this can save a significant amount of time). force (str, default=None): backend to force to or "auto" Checks: export QT_DEBUG_PLUGINS=1 xdoctest -m kwplot.auto_backends autompl --check KWPLOT_UNSAFE=1 xdoctest -m kwplot.auto_backends autompl --check KWPLOT_UNSAFE=0 xdoctest -m kwplot.auto_backends autompl --check Example: >>> # xdoctest +REQUIRES(--check) >>> plt = autoplt(verbose=1) >>> plt.figure() References: https://stackoverflow.com/questions/637005/check-if-x-server-is-running """ global _AUTOMPL_WAS_RUN if force == 'auto': recheck = True force = None elif force is not None: set_mpl_backend(force) _AUTOMPL_WAS_RUN = True if recheck or not _AUTOMPL_WAS_RUN: if verbose: print('AUTOMPL') if sys.platform.startswith('win32'): # TODO: something reasonable pass else: DISPLAY = os.environ.get('DISPLAY', '') if DISPLAY: # Check if we can actually connect to X # NOTE: this call takes a significant amount of time info = ub.cmd('xdpyinfo', shell=True) if verbose: print('xdpyinfo-info = {}'.format(ub.repr2(info))) if info['ret'] != 0: DISPLAY = None if verbose: print(' * DISPLAY = {!r}'.format(DISPLAY)) if not DISPLAY: backend = 'agg' else: """ Note: May encounter error that crashes the program, not sure why this happens yet. The current workaround is to uninstall PyQt5, but that isn't sustainable. QObject::moveToThread: Current thread (0x7fe8d965d030) is not the object's thread (0x7fffb0f64340). Cannot move to target thread (0x7fe8d965d030) qt.qpa.plugin: Could not load the Qt platform plugin "xcb" in "" even though it was found. This application failed to start because no Qt platform plugin could be initialized. Reinstalling the application may fix this problem. Available platform plugins are: eglfs, linuxfb, minimal, minimalegl, offscreen, vnc, wayland-egl, wayland, wayland-xcomposite-egl, wayland-xcomposite-glx, webgl, xcb. UPDATE 2021-01-04: By setting export QT_DEBUG_PLUGINS=1 I was able to look at more debug information. It turns out that it was grabbing the xcb plugin from the opencv-python package. I uninstalled that package and then installed opencv-python-headless which does not include an xcb binary. However, now the it is missing "libxcb-xinerama". May be able to do something with: conda install -c conda-forge xorg-libxinerama # But that didnt work I had to pip uninstall PyQt5 # This seems to work correctly conda install -c anaconda pyqt """ if ub.modname_to_modpath('PyQt5'): try: import PyQt5 # NOQA from PyQt5 import QtCore # NOQA except ImportError: backend = 'agg' else: backend = 'Qt5Agg' KWPLOT_UNSAFE = os.environ.get('KWPLOT_UNSAFE', '') TRY_AVOID_CRASH = KWPLOT_UNSAFE.lower() not in [ '1', 'true', 'yes' ] if TRY_AVOID_CRASH and ub.LINUX: # HOLD UP. Lets try to avoid a crash. if 'cv2' in sys.modules: from os.path import dirname, join, exists cv2 = sys.modules['cv2'] cv2_mod_dpath = dirname(cv2.__file__) cv2_lib_dpath = join(cv2_mod_dpath, 'qt/plugins/platforms') cv2_qxcb_fpath = join(cv2_lib_dpath, 'libqxcb.so') qt_mod_dpath = dirname(QtCore.__file__) qt_lib_dpath = join(qt_mod_dpath, 'Qt/plugins/platforms') qt_qxcb_fpath = join(qt_lib_dpath, 'libqxcb.so') if exists(cv2_qxcb_fpath) and exists( qt_qxcb_fpath): # Can we use ldd to make the test better? import warnings warnings.warn( ub.paragraph(''' Autompl has detected libqxcb in PyQt and cv2. Falling back to agg to avoid a potential crash. This can be worked around by installing opencv-python-headless instead of opencv-python. Disable this check by setting the environ KWPLOT_UNSAFE=1 ''')) backend = 'agg' elif ub.modname_to_modpath('PyQt4'): try: import Qt4Agg # NOQA from PyQt4 import QtCore # NOQA except ImportError: backend = 'agg' else: backend = 'Qt4Agg' else: backend = 'agg' set_mpl_backend(backend, verbose=verbose) if 0: # TODO: # IF IN A NOTEBOOK, BE SURE TO SET INLINE BEHAVIOR # THIS EFFECTIVELY REPRODUCES THE %matplotlib inline behavior # BUT USING AN ACTUAL PYTHON FUNCTION shell = _current_ipython_session() if shell: shell.enable_matplotlib('inline') _AUTOMPL_WAS_RUN = True