def create_config(self): from pkg_resources import resource_string as resource_bytes logging.debug(f"create_config: {self.config_file}") presets_json = resource_bytes('foundryWorldTools', 'presets.json').decode('utf-8') self.data.update(json.loads(presets_json)) if not self.config_file.parent.exists(): self.config_file.parent.mkdir() self.save()
def pil_from_svg_file(self, package, resource): bytestring = resource_bytes(package, resource) bytesPNG = svg2png(bytestring=bytestring, output_height=self.nPixCell, output_width=self.nPixCell) with io.BytesIO(bytesPNG) as fIn: pil_img = Image.open(fIn) pil_img.load() return pil_img
def _reset_configs(self): for filename in os.listdir(self.ini_path): if filename.endswith('.ini'): os.remove(os.path.join(self.ini_path, filename)) template = resource_bytes( 'systemimage.tests.data', '01.ini').decode('utf-8') defaults = os.path.join(self.ini_path, '00_defaults.ini') with open(defaults, 'w', encoding='utf-8') as fp: print(template.format(tmpdir=self.ini_tmpdir, vardir=self.ini_vardir, logfile=self.ini_logfile, loglevel=self.loglevel), file=fp)
def ensure_current_suffix_list(): # Read and parse the organizational domain suffix list. First look in the # cached directory to see if we already have a valid copy of it. cached_copy_path = os.path.join(config.VAR_DIR, LOCAL_FILE_NAME) lifetime = as_timedelta(config.dmarc.cache_lifetime) download = False try: mtime = os.stat(cached_copy_path).st_mtime except FileNotFoundError: vlog.info('No cached copy of the public suffix list found') download = True cache_found = False else: cache_found = True # Is the cached copy out-of-date? Note that when we write a new cache # version we explicitly set its mtime to the time in the future when # the cache will expire. if mtime < now().timestamp(): download = True vlog.info('Cached copy of public suffix list is out of date') if download: try: content = get(config.dmarc.org_domain_data_url) except (URLError, HTTPError) as error: elog.error('Unable to retrieve public suffix list from %s: %s', config.dmarc.org_domain_data_url, getattr(error, 'reason', str(error))) if cache_found: vlog.info('Using out of date public suffix list') content = None else: # We couldn't access the URL and didn't even have an out of # date suffix list cached. Use the shipped version. content = resource_bytes('mailman.rules.data', LOCAL_FILE_NAME) if content is not None: # Content is either a string or UTF-8 encoded bytes. if isinstance(content, bytes): content = content.decode('utf-8') # Write the cache atomically. new_path = cached_copy_path + '.new' with open(new_path, 'w', encoding='utf-8') as fp: fp.write(content) # Set the expiry time to the future. mtime = (now() + lifetime).timestamp() os.utime(new_path, (mtime, mtime)) # Flip the new file into the cached location. This does not # modify the mtime. os.rename(new_path, cached_copy_path) return cached_copy_path
def load_from_resource(self, resource_file): """ Load curve data from resource_file in the package """ resource_data = resource_bytes('pyfractal', resource_file) try: curve_data = json.loads(resource_data) self.rules = curve_data["rules"] self.base_length = curve_data["base_length"] self.start_point = curve_data["start_point"] self.recursion_depth = curve_data["recursion_depth"] except (json.JSONDecodeError, KeyError): print("Malformed JSON data") except FileNotFoundError: print("Curve resource file not found/available")
def load_external(path): """Load the configuration file named by path. :param path: A string naming the location of the external configuration file. This is either an absolute file system path or a special ``python:`` path. When path begins with ``python:``, the rest of the value must name a ``.cfg`` file located within Python's import path, however the trailing ``.cfg`` suffix is implied (don't provide it here). :return: The contents of the configuration file. :rtype: str """ # Is the context coming from a file system or Python path? if path.startswith('python:'): resource_path = path[7:] package, dot, resource = resource_path.rpartition('.') return resource_bytes(package, resource + '.cfg').decode('utf-8') with open(path, 'r', encoding='utf-8') as fp: return fp.read()
def _wrapper(self, function, ini_files, *args, **kws): start = 0 # It would be preferable to simply add a device='nexus7' argument, but that # causes 'decorator() takes 1 positional argument but 2 were given' device = kws.get('device', 'nexus7') with ExitStack() as resources: # Create the config.d directory and copy all the source ini files to # this directory in sequential order, interpolating in the temporary # tmp and var directories. config_d = resources.enter_context(temporary_directory()) temp_tmpdir = resources.enter_context(temporary_directory()) temp_vardir = resources.enter_context(temporary_directory()) for ini_file in ini_files: dst = os.path.join(config_d, '{:02d}_override.ini'.format(start)) start += 1 template = resource_bytes('systemimage.tests.data', ini_file).decode('utf-8') with atomic(dst) as fp: print(template.format(tmpdir=temp_tmpdir, vardir=temp_vardir), file=fp) # Patch the global configuration object so that it can be used # directly, which is good enough in most cases. Also patch the bit of # code that detects the device name. config = Configuration(config_d) resources.enter_context(patch('systemimage.config._config', config)) resources.enter_context( patch('systemimage.device.check_output', return_value=device)) # Make sure the cache_partition and data_partition exist. makedirs(config.updater.cache_partition) makedirs(config.updater.data_partition) # The method under test is allowed to specify some additional # keyword arguments, in order to pass some variables in from the # wrapper. signature = inspect.signature(function) if 'config_d' in signature.parameters: kws['config_d'] = config_d if 'config' in signature.parameters: kws['config'] = config # Call the function with the given arguments and return the result. return function(self, *args)
def initialize_1(config_path=None): """First initialization step. * Zope component architecture * The configuration system * Run-time directories :param config_path: The path to the configuration file. :type config_path: string """ zcml = resource_bytes('mailman.config', 'configure.zcml') xmlconfig.string(zcml.decode('utf-8')) # By default, set the umask so that only owner and group can read and # write our files. Specifically we must have g+rw and we probably want # o-rwx although I think in most cases it doesn't hurt if other can read # or write the files. os.umask(0o007) # Initialize configuration event subscribers. This must be done before # setting up the configuration system. from mailman.app.events import initialize as initialize_events initialize_events() # config_path will be set if the command line argument -C is given. That # case overrides all others. When not given on the command line, the # configuration file is searched for in the file system. if config_path is None: config_path = search_for_configuration_file() elif config_path is INHIBIT_CONFIG_FILE: # For the test suite, force this back to not using a config file. config_path = None mailman.config.config.load(config_path) # Use this environment variable to define an extra configuration file for # testing. This is used by the tox.ini to run the full test suite under # PostgreSQL. extra_cfg_path = os.environ.get('MAILMAN_EXTRA_TESTING_CFG') if extra_cfg_path is not None: with open(extra_cfg_path, 'r', encoding='utf-8') as fp: extra_cfg = fp.read() mailman.config.config.push('extra testing config', extra_cfg)
def pil_from_png_file(self, package, resource): bytestring = resource_bytes(package, resource) with io.BytesIO(bytestring) as fIn: pil_img = Image.open(fIn) pil_img.load() return pil_img
def setUp(cls): # Set up the basic configuration stuff. Turn off path creation until # we've pushed the testing config. config.create_paths = False initialize.initialize_1(INHIBIT_CONFIG_FILE) assert cls.var_dir is None, 'Layer already set up' # Calculate a temporary VAR_DIR directory so that run-time artifacts # of the tests won't tread on the installation's data. This also # makes it easier to clean up after the tests are done, and insures # isolation of test suite runs. cls.var_dir = tempfile.mkdtemp() # We need a test configuration both for the foreground process and any # child processes that get spawned. lazr.config would allow us to do # it all in a string that gets pushed, and we'll do that for the # foreground, but because we may be spawning processes (such as # runners) we'll need a file that we can specify to the with the -C # option. Craft the full test configuration string here, push it, and # also write it out to a temp file for -C. # # Create a dummy postfix.cfg file so that the test suite doesn't try # to run the actual postmap command, which may not exist anyway. postfix_cfg = os.path.join(cls.var_dir, 'postfix.cfg') with open(postfix_cfg, 'w') as fp: print(dedent(""" [postfix] postmap_command: true """), file=fp) test_config = dedent(""" [mailman] layout: testing [paths.testing] var_dir: {0} [devmode] testing: yes [mta] configuration: {1} """.format(cls.var_dir, postfix_cfg)) # Read the testing config and push it. more = resource_bytes('mailman.testing', 'testing.cfg') test_config += more.decode('utf-8') config.create_paths = True config.push('test config', test_config) # Initialize everything else. initialize.initialize_2(testing=True) initialize.initialize_3() # When stderr debugging is enabled, subprocess root loggers should # also be more verbose. if cls.stderr: test_config += dedent(""" [logging.root] level: debug """) # Enable log message propagation and reset the log paths so that the # doctests can check the output. for logger_config in config.logger_configs: sub_name = logger_config.name.split('.')[-1] if sub_name == 'root': continue logger_name = 'mailman.' + sub_name log = logging.getLogger(logger_name) log.propagate = cls.stderr # Reopen the file to a new path that tests can get at. Instead of # using the configuration file path though, use a path that's # specific to the logger so that tests can find expected output # more easily. path = os.path.join(config.LOG_DIR, sub_name) get_handler(sub_name).reopen(path) log.setLevel(logging.DEBUG) # If stderr debugging is enabled, make sure subprocesses are also # more verbose. if cls.stderr: test_config += expand(dedent(""" [logging.$name] propagate: yes level: debug """), dict(name=sub_name, path=path)) # The root logger will already have a handler, but it's not the right # handler. Remove that and set our own. if cls.stderr: console = logging.StreamHandler(sys.stderr) formatter = logging.Formatter(config.logging.root.format, config.logging.root.datefmt) console.setFormatter(formatter) root = logging.getLogger() del root.handlers[:] root.addHandler(console) # Write the configuration file for subprocesses and set up the config # object to pass that properly on the -C option. config_file = os.path.join(cls.var_dir, 'test.cfg') with open(config_file, 'w') as fp: fp.write(test_config) print(file=fp) config.filename = config_file
from systemimage.config import config from systemimage.dbus import Loop from systemimage.helpers import makedirs from systemimage.logging import initialize from systemimage.main import DEFAULT_CONFIG_D # --testing is only enabled when the systemimage.testing package is # available. This will be the case for the upstream source package, and when # the systemimage-dev binary package is installed in Ubuntu. try: from systemimage.testing.dbus import instrument, get_service except ImportError: # pragma: no cover instrument = None get_service = None __version__ = resource_bytes('systemimage', 'version.txt').decode('utf-8').strip() def main(): # If enabled, start code coverage collection as early as possible. # Parse arguments. parser = argparse.ArgumentParser( prog='system-image-dbus', description='Ubuntu System Image Upgrader DBus service') parser.add_argument('--version', action='version', version='system-image-dbus {}'.format(__version__)) parser.add_argument('-C', '--config', default=DEFAULT_CONFIG_D, action='store',
"""Allows the package to be run with `python3 -m ubuntu_image`.""" import sys import logging import argparse from pkg_resources import resource_string as resource_bytes from ubuntu_image.i18n import _ _logger = logging.getLogger("ubuntu-image") __version__ = resource_bytes('ubuntu_image', 'version.txt').decode('utf-8') PROGRAM = 'ubuntu-image' def parseargs(argv=None): parser = argparse.ArgumentParser( prog=PROGRAM, description=_('Generate a bootable disk image.'), ) parser.add_argument('--version', action='version', version='{} {}'.format(PROGRAM, __version__)) parser.add_argument('-d', '--debug', default=False, action='store_true', help=_('Enable debugging output')) args = parser.parse_args(argv) if args.debug: logging.basicConfig(level=logging.DEBUG) return args
def get_index(filename): json_bytes = resource_bytes('systemimage.tests.data', filename) return Index.from_json(json_bytes.decode('utf-8'))
import os import sys import logging import argparse from contextlib import suppress from pickle import dump, load from pkg_resources import resource_string as resource_bytes from ubuntu_image.builder import ModelAssertionBuilder from ubuntu_image.i18n import _ _logger = logging.getLogger('ubuntu-image') try: __version__ = resource_bytes('ubuntu_image', 'version.txt').decode('utf-8') except FileNotFoundError: # pragma: nocover # Probably, setup.py hasn't been run yet to generate the version.txt. __version__ = 'dev' PROGRAM = 'ubuntu-image' def parseargs(argv=None): parser = argparse.ArgumentParser( prog=PROGRAM, description=_('Generate a bootable disk image.'), ) parser.add_argument('--version', action='version', version='{} {}'.format(PROGRAM, __version__)) parser.add_argument('-d', '--debug', default=False, action='store_true',
def get_channels(filename): json_bytes = resource_bytes('systemimage.tests.data', filename) return Channels.from_json(json_bytes.decode('utf-8'))