Exemplo n.º 1
0
 def test_unrelated_contents(self):
     """
     Test thata zip with two unrelated subpackages return
     distinct resources. Ref python/importlib_resources#44.
     """
     self.assertEqual(set(resources.contents('ziptestdata.one')),
                      {'__init__.py', 'resource1.txt'})
     self.assertEqual(set(resources.contents('ziptestdata.two')),
                      {'__init__.py', 'resource2.txt'})
Exemplo n.º 2
0
 def test_unrelated_contents(self):
     # https://gitlab.com/python-devs/importlib_resources/issues/44
     #
     # Here we have a zip file with two unrelated subpackages.  The bug
     # reports that getting the contents of a resource returns unrelated
     # files.
     self.assertEqual(set(resources.contents('ziptestdata.one')),
                      {'__init__.py', 'resource1.txt'})
     self.assertEqual(set(resources.contents('ziptestdata.two')),
                      {'__init__.py', 'resource2.txt'})
Exemplo n.º 3
0
def listcommands():
    """
    Returns a list of all available commands
    """
    paths = map(pathlib.Path, importlib_resources.contents(command))
    all = (path.stem for path in paths if not path.name.startswith('_'))
    return filter(_is_available, all)
Exemplo n.º 4
0
def ncp_fw_update():
    '''
    Compare the NCP firmware with the one available in the 'ncp_fiwmare' folder 
    and update if needed
    '''

    # Find the DFU file that matches the required fw version
    dfu_file = None
    ver_num = kibra.__kinosver__.split(' v')[-1]
    for file_name in importlib_resources.contents(NCP_FW_FOLDER):
        if ver_num in file_name:
            # TODO: This relies on the file name, we could also check the file
            # contents to make sure
            dfu_file = file_name
            break
    if not dfu_file:
        logging.error('Required NCP firmware not present.')
        sys.exit()

    # Flash the NCP and re-enable it
    with importlib_resources.path(NCP_FW_FOLDER, dfu_file) as dfu_path:
        logging.warn('NCP will be updated with firmware v%s' % ver_num)
        try:
            dfu_file = kidfu.DfuFile(str(dfu_path))
            kifwu.dfu_find_and_flash(dfu_file, unattended=True)
        except Exception as exc:
            logging.error('Problem updating NCP firmware: %s' % exc)
            sys.exit()

    logging.info('NCP updated successfully.')
Exemplo n.º 5
0
 def test_submodule_contents_by_name(self):
     contents = set(resources.contents('namespacedata01'))
     try:
         contents.remove('__pycache__')
     except KeyError:
         pass
     self.assertEqual(contents, {'binary.file', 'utf-8.file', 'utf-16.file'})
Exemplo n.º 6
0
def copy_bootstrap(bootstrap_target: Path) -> None:
    """Copy bootstrap code from shiv into the pyz.

    :param bootstrap_target: The temporary directory where we are staging pyz contents.
    """
    for bootstrap_file in importlib_resources.contents(bootstrap):
        if importlib_resources.is_resource(bootstrap, bootstrap_file):
            with importlib_resources.path(bootstrap, bootstrap_file) as f:
                shutil.copyfile(f.absolute(), bootstrap_target / f.name)
Exemplo n.º 7
0
def copy_resources(package, destination):
    makedirs(destination, exist_ok=True)

    for entry in resources.contents(package):
        if not resources.is_resource(package, entry):
            continue

        with resources.path(package, entry) as resource_path:
            shutil.copy2(str(resource_path), destination)
Exemplo n.º 8
0
def copy_bootstrap(bootstrap_target: Path) -> None:
    """Copy bootstrap code from shiv into the pyz.

    :param bootstrap_target: The temporary directory where we are staging pyz contents.
    """
    for bootstrap_file in importlib_resources.contents(bootstrap):
        if importlib_resources.is_resource(bootstrap, bootstrap_file):
            with importlib_resources.path(bootstrap, bootstrap_file) as f:
                shutil.copyfile(f.absolute(), bootstrap_target / f.name)
Exemplo n.º 9
0
def test_all_exported_classes_have_slots():
    """All "public" classes that form part of the library's interface should have `__slots__`.

    When defining a subclass of AWSObject, it is possible to accidentally
    forget to add an empty __slots__ class variable. This means that
    unknown variables can accidentally be set on that class - violating
    the well-defined behaviour of our types.
    """
    # Modules to inspect
    public_modules = [flyingcircus.core, flyingcircus.intrinsic_function]

    # Packages to inspect
    public_packages = [flyingcircus.service]

    # Get public module from public packages
    for package in public_packages:
        for filename in importlib_resources.contents(package):
            # Ignore special filenames/modules
            if filename.startswith("__"):
                continue

            modulename = package.__name__ + "." + filename.split(".")[0]
            module = importlib.import_module(modulename)
            public_modules.append(module)

    # Verify that every AWSObject-like class in a public module has
    # a well-defined attribute list
    for module in public_modules:
        for name, obj in module.__dict__.items():

            if not inspect.isclass(obj):
                continue
            if not issubclass(obj, AWSObject):
                continue
            if obj is flyingcircus.core.Resource:
                # Resource is abstract, so we can't check it directly
                continue

            full_class_name = f"{module.__name__}.{name}"

            try:
                instance = obj()
            except Exception as ex:
                assert False, f"Unable to instantiate {full_class_name}: " + str(ex)

            nonexistent_attrib_name = "ThisAttributeDoesNotExist"
            with pytest.raises(
                AttributeError,
                message=f"{full_class_name} hasn't defined __slots__",
                match=f"{obj.__name__}.*{nonexistent_attrib_name}",
            ):
                setattr(instance, nonexistent_attrib_name, 42)
Exemplo n.º 10
0
def _resolve_resource(
    package: str,
    *,
    name: t.Optional[str],
    support_extensions=(".yaml", ".yml")) -> dict:
    logger.info("resolve resource, find resource from %s", package)
    for fname in importlib_resources.contents(package):
        if not os.path.splitext(fname)[1].endswith(support_extensions):
            continue
        name = fname
        break
    logger.info("resolve resource, load data from %s", name)
    with importlib_resources.open_text(package, name) as rf:
        return loading.load(rf)
Exemplo n.º 11
0
def _sorted_file_migrations():
    files = []
    ids = []
    for f in importlib_resources.contents(migrations):
        match = re.search(MIGRATION_FILE_PATTERN, f)
        if match:
            idx = int(match.group("id"))
            if idx in ids:
                raise click.ClickException(
                    f"Inconsistent package (multiples migrations with {idx} as id)"
                )
            ids.append(idx)
            files.append((idx, match.group("name"), f))

    return sorted(files, key=lambda f: f[0])
Exemplo n.º 12
0
 def test_contents(self):
     contents = set(resources.contents(self.data))
     # There may be cruft in the directory listing of the data directory.
     # It could have a __pycache__ directory,
     # an artifact of the
     # test suite importing these modules, which
     # are not germane to this test, so just filter them out.
     contents.discard('__pycache__')
     self.assertEqual(
         contents,
         {
             '__init__.py',
             'subdirectory',
             'utf-8.file',
             'binary.file',
             'utf-16.file',
         },
     )
Exemplo n.º 13
0
 def test_contents(self):
     contents = set(resources.contents(self.data))
     # There may be cruft in the directory listing of the data directory.
     # Under Python 3 we could have a __pycache__ directory, and under
     # Python 2 we could have .pyc files.  These are both artifacts of the
     # test suite importing these modules and writing these caches.  They
     # aren't germane to this test, so just filter them out.
     contents.discard('__pycache__')
     contents.discard('__init__.pyc')
     contents.discard('__init__.pyo')
     self.assertEqual(
         contents, {
             '__init__.py',
             'subdirectory',
             'utf-8.file',
             'binary.file',
             'utf-16.file',
         })
Exemplo n.º 14
0
def copy_resource_folder(package: Package,
                         destination_path: str,
                         exclude: List[str] = None):
    """
    Copies the full content of provided package in destination folder.

    Names of files that should not be copied have to be listed in `exclude`.

    .. Warning ::

        As the resources in the folder are discovered by browsing
        through the folder, they are not explicitly listed in the Python code.
        Therefore, to have the install process run smoothly, these resources need
        to be listed in the MANIFEST.in file.

    :param package: name of resource package to copy
    :param destination_path: file system path of destination
    :param exclude: list of item names that should not be copied
    """
    exclusion_list = ["__pycache__"]
    if exclude:
        exclusion_list += exclude

    for resource_name in contents(package):
        if resource_name in exclusion_list:
            continue
        if is_resource(package, resource_name):
            destination_file_path = pth.join(destination_path, resource_name)
            copy_resource(package, resource_name, destination_file_path)
        else:
            # In case of subfolders that are only declared in MANIFEST.in,
            # getattr(package, "resource_name") will fail (is there another way?).
            # So we fall back to using package name as as string.
            if isinstance(package, ModuleType):
                package_name = package.__name__
            else:  # str
                package_name = package
            new_package_name = ".".join([package_name, resource_name])
            new_destination_path = pth.join(destination_path, resource_name)
            copy_resource_folder(new_package_name,
                                 new_destination_path,
                                 exclude=exclude)
Exemplo n.º 15
0
def retrieve_migrations() -> List[MigrationItem]:
    migrations = []
    ids = []
    for file_name in importlib_resources.contents(migrations_module):
        match = re.search(MIGRATION_FILE_PATTERN, file_name)
        if match:
            idx = int(match.group("id"))
            # Sanity check
            if idx in ids:
                raise AssertionError(
                    f"Inconsistent package (multiples migrations with {idx} as id)"
                )
            ids.append(idx)
            sql = importlib_resources.read_text(migrations_module, file_name)
            if not sql:
                raise AssertionError(f"Empty migration file {file_name}")
            migrations.append(
                MigrationItem(idx=idx, name=match.group("name"), file_name=file_name, sql=sql)
            )

    return sorted(migrations, key=lambda item: item.idx)
Exemplo n.º 16
0
 def test_namespaces_cannot_have_resources(self):
     contents = resources.contents(
         'importlib_resources.tests.data03.namespace')
     self.assertFalse(list(contents))
     # Even though there is a file in the namespace directory, it is not
     # considered a resource, since namespace packages can't have them.
     self.assertFalse(
         resources.is_resource('importlib_resources.tests.data03.namespace',
                               'resource1.txt'))
     # We should get an exception if we try to read it or open it.
     self.assertRaises(FileNotFoundError, resources.open_text,
                       'importlib_resources.tests.data03.namespace',
                       'resource1.txt')
     self.assertRaises(FileNotFoundError, resources.open_binary,
                       'importlib_resources.tests.data03.namespace',
                       'resource1.txt')
     self.assertRaises(FileNotFoundError, resources.read_text,
                       'importlib_resources.tests.data03.namespace',
                       'resource1.txt')
     self.assertRaises(FileNotFoundError, resources.read_binary,
                       'importlib_resources.tests.data03.namespace',
                       'resource1.txt')
Exemplo n.º 17
0
def resource_context(*path, **kwargs):
    """Provide a context manager that yields a pathlib.Path object to a resource file or directory.

    If the resource does not already exist on its own on the file system,
    a temporary directory/file will be created. If the directory/file was created, it
    will be deleted upon exiting the context manager (no exception is
    raised if the directory was deleted prior to the context manager
    exiting).
    """
    if len(path) == 0:
        raise TypeError("must provide a path")
    final_name = path[-1]
    package = ".".join([RESOURCE_MODULE] + list(path[:-1]))
    ignore = kwargs.pop("ignore", (".DS_Store", "__init__.py"))

    if importlib_resources.is_resource(package, final_name):
        # the resource is a file
        with importlib_resources.path(package, final_name) as path:
            yield path.absolute()
    else:
        # the resource is a directory
        package = package + "." + final_name
        # TODO if the package folder exists on the file system it would be ideal to just return that
        # but importlib_resources doesn't provide a public API for that
        resources = [
            c for c in importlib_resources.contents(package)
            if importlib_resources.is_resource(package, c) and c not in ignore
        ]
        folder_path = pathlib.Path(tempfile.mkdtemp())
        try:
            for resource in resources:
                with (folder_path / resource).open("wb") as handle:
                    handle.write(
                        importlib_resources.read_binary(package, resource))
            yield folder_path
        finally:
            if folder_path.exists():
                shutil.rmtree(str(folder_path))
Exemplo n.º 18
0
def find_components(package, interface):
    """Find components which conform to a given interface.

    Search all the modules in a given package, returning an iterator over all
    items found that conform to the given interface, unless that object is
    decorated with `@abstract_component`.

    :param package: The package path to search.
    :type package: string
    :param interface: The interface that returned objects must conform to.
    :type interface: `Interface`
    :return: The sequence of matching components.
    :rtype: items implementing `interface`
    """
    for filename in contents(package):
        basename, extension = os.path.splitext(filename)
        if extension != '.py' or basename.startswith('.'):
            continue
        module_name = '{}.{}'.format(package, basename)
        module = import_module(module_name)
        if not hasattr(module, '__all__'):
            continue
        yield from scan_module(module, interface)
Exemplo n.º 19
0
def find_pluggable_components(subpackage, interface):
    """Find components which conform to a given interface.

    This finds components which can be implemented in a plugin.  It will
    search for the interface in the named subpackage, where the Python import
    path of the subpackage will be prepended by `mailman` for system
    components, and the various plugin names for any external components.

    :param subpackage: The subpackage to search.  This is prepended by
        'mailman' to search for system components, and each enabled plugin for
        external components.
    :type subpackage: str
    :param interface: The interface that returned objects must conform to.
    :type interface: `Interface`
    :return: The sequence of matching components.
    :rtype: Objects implementing `interface`
    """
    # This can't be imported at module level because of circular imports.
    from mailman.config import config
    # Return the system components first.
    yield from find_components('mailman.' + subpackage, interface)
    # Return all the matching components in all the subpackages of all enabled
    # plugins.  Only enabled and existing plugins will appear in this
    # dictionary.
    for name, plugin_config in config.plugin_configs:
        # If the plugin's configuration defines a components package, use
        # that, falling back to the plugin's name.
        package = plugin_config['component_package'].strip()
        if len(package) == 0:
            package = name
        # It's possible that the plugin doesn't include the directory for this
        # subpackage.  That's fine.
        if (subpackage in contents(package)
                and not is_resource(package, subpackage)):
            plugin_package = '{}.{}'.format(package, subpackage)
            yield from find_components(plugin_package, interface)
Exemplo n.º 20
0
import importlib_resources as resources
import subprocess
import shutil
import glob
import re

from pathlib import Path, PureWindowsPath

from ginjinn import config
from ginjinn.core import Configuration
from ginjinn.data_files import tf_config_templates, tf_script_templates

# TODO: maybe put those constants in another file
''' Model configuration files that are available out of the box'''
AVAILABLE_MODEL_CONFIGS = [
    f for f in resources.contents(tf_config_templates) if f.endswith('.config')
]
''' Models (names) that are available out of the box'''
AVAILABLE_MODELS = [
    # [:-7] removes '.config'
    f[:-7] for f in AVAILABLE_MODEL_CONFIGS
]
''' Mapping of model names to model configuration file names'''
MODEL_CONFIG_FILES = {m: f'{m}.config' for m in AVAILABLE_MODELS}
''' Mapping of model names (and configs) to urls where a coco-pretrained version can be downloaded '''
PRETRAINED_MODEL_URLS = {
    'faster_rcnn_resnet50_coco':
    'http://download.tensorflow.org/models/object_detection/faster_rcnn_resnet50_coco_2018_01_28.tar.gz',
    'rfcn_resnet101_coco':
    'http://download.tensorflow.org/models/object_detection/rfcn_resnet101_coco_2018_01_28.tar.gz',
    'faster_rcnn_inception_resnet_v2_atrous_coco':
Exemplo n.º 21
0
 def test_contents(self):
     assert self.expected <= set(resources.contents(self.data))
import runpy
import sys
from io import StringIO
from time import sleep

import importlib_resources
import pkg_resources
from benchmarker import Benchmarker
from importlib_resources import path

from benchmarks.benchmark_utils import swap_attr

for entry in [
        entry for entry in importlib_resources.contents('examples')
        if not pkg_resources.resource_isdir('examples', entry) and entry.
        endswith(".py") and '__init__' not in entry and 'DELETE' not in entry
]:
    print("*****************************************************************")
    print("Benchmarking {}".format(entry))
    print("*****************************************************************")

    with path('examples', entry) as file_in:
        with Benchmarker(cycle=20, extra=1) as bench:

            @bench(entry)
            def _(_):
                # prevent Benchmarker from doing "ZeroDivisionError: float division by zero:
                #    ratio = base_time / real_time"
                sleep(0.001)
                # In order to pipe input into examples that have input(),
                # we use the test package, which is meant for internal use by Python only internal and
Exemplo n.º 23
0
 def test_resource_contents(self):
     package = util.create_package(file=data01,
                                   path=data01.__file__,
                                   contents=['A', 'B', 'C'])
     self.assertEqual(set(resources.contents(package)), {'A', 'B', 'C'})
Exemplo n.º 24
0
 def test_contents_does_not_keep_open(self):
     c = resources.contents('ziptestdata')
     self.zip_path.unlink()
     del c
Exemplo n.º 25
0
            cmd = ' '.join(cmd) + ' ' + parameters
        else:
            cmd = ' '.join(cmd + parameters)
    else:
        if isinstance(parameters, string_types):
            parameters = shlex.split(parameters)
        cmd += parameters
    p = Popen(cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE)
    stdout, stderr = p.communicate(input=stdin)
    if not (p.returncode == 0 or ignore_return_code):
        raise OSError(bytes_to_str(stderr))
    return stdout.decode('utf8', 'replace'), stderr.decode('utf8', 'replace')


for entry in [
        entry for entry in importlib_resources.contents('notebooks')
        if not pkg_resources.resource_isdir('notebooks', entry)
        and entry.endswith(".ipynb")
]:
    print("*****************************************************************")
    print("Converting and running {}".format(entry))
    print("*****************************************************************")

    with path('notebooks', entry) as file_in:
        out, err = run_python(
            " -m jupyter nbconvert --execute --to notebook --inplace " +
            str(file_in))
        sys.stderr.write(err)
        sys.stderr.flush()
        sys.stdout.write(out)
        sys.stdout.flush()
Exemplo n.º 26
0
 def test_submodule_contents_by_name(self):
     self.assertEqual(
         set(resources.contents('ziptestdata.subdirectory')),
         {'__init__.py', 'binary.file'},
     )
Exemplo n.º 27
0
 def test_submodule_contents(self):
     submodule = import_module('ziptestdata.subdirectory')
     self.assertEqual(set(resources.contents(submodule)),
                      {'__init__.py', 'binary.file'})
Exemplo n.º 28
0
def main(myst_file, header_file):
    """
    Build a web page for the myst markdown file MYST_FILE, using the headers in
    headers.json

    usage: build_page quiz3.md [headers.json]

    if headers.json is missing and doesn't exist, a default file "headers.json"
    will be written and used.  If headers.json does exist, it must be specified
    on the command line.

    headers.json is a json file with a dictionary with
    these keys (margin sizes in inches)
    
    \b
    {
        "page_title": "Day 03 quiz",
        "left_header": "Jan. 19, 2021",
        "center_header": "Day 03 quiz",
        "page_margin_top": 1,
        "page_margin_bottom": 1,
        "page_margin_left": 1,
        "page_margin_right": 1
    }


    html output is written to the file _myst_file/myst_file.html

    To force a page break insert:

    <div class="page-break"></div>

    """
    myst_file = Path(myst_file).resolve()
    header_list = list(header_file)
    if len(header_list) == 0:
        #
        # header_file may be missing so handle the case where
        # nargs=0 and create default headers.json.
        #
        the_headers = "headers.json"
        if Path(the_headers).is_file():
            raise ValueError(
                f"{the_headers} exists but wasn't specified, won't overwrite")
        with ir.open_text('paged_html_theme.templates',
                          'headers.json',
                          encoding='utf-8') as header_in:
            header_contents = header_in.read()
        with open(the_headers, 'w', encoding='utf-8') as header_out:
            header_out.write(header_contents)
            print(
                f"wrote default header file \n{header_contents} to \n{str(the_headers)}\n"
            )
    elif len(header_list) == 1:
        the_headers = Path(header_list[0]).resolve()
    else:
        #
        # if nargs is not 0 or 1, give up
        #
        print(f"need a single header file, found {header_list}")

    stem_name = myst_file.stem
    root_dir = myst_file.parent
    output_dir = root_dir / f'_{stem_name}_build'
    with open(the_headers, 'r', encoding='utf-8') as input:
        header_dict = json.load(input)
    contents = list(ir.contents('paged_html_theme.templates'))
    with ir.open_text('paged_html_theme.templates',
                      'conf_py.j2',
                      encoding='utf-8') as the_conf:
        conf_j2 = jinja2.Template(the_conf.read())
    if not Path(myst_file).is_file():
        raise ValueError(f"could not find {myst_file}")
    arglist = [
        'sphinx-build', '-v', '-b html',
        str(root_dir),
        str(output_dir),
        str(myst_file)
    ]
    argstring = ' '.join(arglist)
    header_dict['stem_name'] = stem_name
    conf_file = conf_j2.render(**header_dict)
    with cd(root_dir):
        with open('conf.py', 'w', encoding='utf-8') as conf_out:
            conf_out.write(conf_file)
        print(f"running the command \n{argstring}\n")
        result = subprocess.run(argstring, capture_output=True, shell=True)
    if result.stdout:
        print(f"stdout message: {result.stdout.decode('utf-8')}")
        out_file = output_dir / f"{stem_name}.html"
        if out_file.is_file():
            print(f"full path to output: {out_file}")
        else:
            raise ValueError(f"can't find output file {out_file}")
    if result.stderr:
        print(f"stderror message: {result.stderr.decode('utf-8')}")