def __init__(self, project_fs=None, breakpoint=False, strict=False, test_build=False, develop=False): self.project_fs = project_fs self.strict = strict self.test_build = test_build self.develop = develop self.registry = ElementRegistry() self.libs = {} self.apps = OrderedDict() self.apps_by_lib = defaultdict(list) self.app_settings = defaultdict(SettingsContainer) self.app_system_settings = defaultdict(SettingsContainer) self.cfg = None self.settings = SettingsContainer() self.templates_fs = MultiFS() self.data_fs = MultiFS() self.filesystems = FSContainer({'templates': self.templates_fs, 'data': self.data_fs}) self.filters = FilterContainer(self) self.template_engines = {} self.database_engines = {} self.caches = {} self.mail_servers = {} self.default_mail_server = None self.default_db_engine = None self.debug = False self.struct = False self.auto_reload = False self.known_namespaces = set() self.sites = Sites() self.breakpoint = breakpoint self.suppress_breakpoints = False self.data_tags = defaultdict(list) self.data_tags_by_lib = defaultdict(lambda: defaultdict(list)) # awesome self.preflight = False self.log_signals = False self.debug_echo = False self.debug_memory = False self.lib_paths = None self._lib_database = None self.log_logger = None self.log_color = True self.log_width = None self.media_urls = None self.media_app = None self.failed_documents = [] self.enum = {} self.enum_by_lib = {} self.signals = Signals() self._moyarc = None self.console = self.create_console()
def test_priority(self): """Test priority order is working""" m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.writebytes("name", b"m1") m2.writebytes("name", b"m2") m3.writebytes("name", b"m3") multi_fs = MultiFS(auto_close=False) multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2) multi_fs.add_fs("m3", m3) self.assertEqual(multi_fs.readbytes("name"), b"m3") m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.writebytes("name", b"m1") m2.writebytes("name", b"m2") m3.writebytes("name", b"m3") multi_fs = MultiFS(auto_close=False) multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2, priority=10) multi_fs.add_fs("m3", m3) self.assertEqual(multi_fs.readbytes("name"), b"m2") m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.writebytes("name", b"m1") m2.writebytes("name", b"m2") m3.writebytes("name", b"m3") multi_fs = MultiFS(auto_close=False) multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2, priority=10) multi_fs.add_fs("m3", m3, priority=10) self.assertEqual(multi_fs.readbytes("name"), b"m3") m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.writebytes("name", b"m1") m2.writebytes("name", b"m2") m3.writebytes("name", b"m3") multi_fs = MultiFS(auto_close=False) multi_fs.add_fs("m1", m1, priority=11) multi_fs.add_fs("m2", m2, priority=10) multi_fs.add_fs("m3", m3, priority=10) self.assertEqual(multi_fs.readbytes("name"), b"m1")
def test_priority(self): """Test priority order is working""" m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.setcontents("name", b("m1")) m2.setcontents("name", b("m2")) m3.setcontents("name", b("m3")) multi_fs = MultiFS(auto_close=False) multi_fs.addfs("m1", m1) multi_fs.addfs("m2", m2) multi_fs.addfs("m3", m3) self.assert_(multi_fs.getcontents("name") == b("m3")) m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.setcontents("name", b("m1")) m2.setcontents("name", b("m2")) m3.setcontents("name", b("m3")) multi_fs = MultiFS(auto_close=False) multi_fs.addfs("m1", m1) multi_fs.addfs("m2", m2, priority=10) multi_fs.addfs("m3", m3) self.assert_(multi_fs.getcontents("name") == b("m2")) m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.setcontents("name", b("m1")) m2.setcontents("name", b("m2")) m3.setcontents("name", b("m3")) multi_fs = MultiFS(auto_close=False) multi_fs.addfs("m1", m1) multi_fs.addfs("m2", m2, priority=10) multi_fs.addfs("m3", m3, priority=10) self.assert_(multi_fs.getcontents("name") == b("m3")) m1 = MemoryFS() m2 = MemoryFS() m3 = MemoryFS() m1.setcontents("name", b("m1")) m2.setcontents("name", b("m2")) m3.setcontents("name", b("m3")) multi_fs = MultiFS(auto_close=False) multi_fs.addfs("m1", m1, priority=11) multi_fs.addfs("m2", m2, priority=10) multi_fs.addfs("m3", m3, priority=10) self.assert_(multi_fs.getcontents("name") == b("m1"))
def test_opener(self): """Test use of FS URLs.""" multi_fs = MultiFS() with self.assertRaises(TypeError): multi_fs.add_fs(u'foo', 5) multi_fs.add_fs(u'f1', u'mem://') multi_fs.add_fs(u'f2', u'temp://') self.assertIsInstance(multi_fs.get_fs(u'f1'), MemoryFS)
def test_opener(self): """Test use of FS URLs.""" multi_fs = MultiFS() with self.assertRaises(TypeError): multi_fs.add_fs("foo", 5) multi_fs.add_fs("f1", "mem://") multi_fs.add_fs("f2", "temp://") self.assertIsInstance(multi_fs.get_fs("f1"), MemoryFS)
def test_listdir_duplicates(self): m1 = MemoryFS() m2 = MemoryFS() m1.touch('foo') m2.touch('foo') multi_fs = MultiFS() multi_fs.add_fs('m1', m1) multi_fs.add_fs('m2', m2) self.assertEqual(multi_fs.listdir(u'/'), ['foo'])
def test_listdir_duplicates(self): m1 = MemoryFS() m2 = MemoryFS() m1.touch("foo") m2.touch("foo") multi_fs = MultiFS() multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2) self.assertEqual(multi_fs.listdir("/"), ["foo"])
def test_auto_close(self): """Test MultiFS auto close is working""" multi_fs = MultiFS() m1 = MemoryFS() m2 = MemoryFS() multi_fs.addfs('m1', m1) multi_fs.addfs('m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(m1.closed) self.assert_(m2.closed)
def test_no_auto_close(self): """Test MultiFS auto close can be disables""" multi_fs = MultiFS(auto_close=False) m1 = MemoryFS() m2 = MemoryFS() multi_fs.addfs('m1', m1) multi_fs.addfs('m2', m2) self.assert_(not m1.closed) self.assert_(not m2.closed) multi_fs.close() self.assert_(not m1.closed) self.assert_(not m2.closed)
def test_auto_close(self): """Test MultiFS auto close is working""" multi_fs = MultiFS() m1 = MemoryFS() m2 = MemoryFS() multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2) self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed()) multi_fs.close() self.assertTrue(m1.isclosed()) self.assertTrue(m2.isclosed())
def test_multiple_fs_with_use_syspath(self, ctx): testfs = ctx << fs.open_fs('mem://') self.build_fs(testfs, ctx) self.build_zipfs() multi_fs = MultiFS() multi_fs.add_fs('memory', testfs) multi_fs.add_fs('zip', fs.open_fs("zip://test.zip")) env = self.build_env(multi_fs, use_syspath=True) source, path, _ = env.loader.get_source(None, "template_in_zip.j2") self.assertEqual(path, "template_in_zip.j2") os.unlink("test.zip")
def test_no_auto_close(self): """Test MultiFS auto close can be disabled""" multi_fs = MultiFS(auto_close=False) self.assertEqual(repr(multi_fs), "MultiFS(auto_close=False)") m1 = MemoryFS() m2 = MemoryFS() multi_fs.add_fs("m1", m1) multi_fs.add_fs("m2", m2) self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed()) multi_fs.close() self.assertFalse(m1.isclosed()) self.assertFalse(m2.isclosed())
def _find_extensions(paths): '''Iterate the paths, in order, finding extensions and adding them to the return dict.''' extension_kinds = ['check', 'configure', 'write'] efs = MultiFS() map(lambda x: efs.addfs(x, OSFS(x)), paths) def get_extensions(kind): return {os.path.splitext(x)[0]: efs.getsyspath(x) for x in efs.walkfiles('.', '*.%s' % kind)} return {e: get_extensions(e) for e in extension_kinds}
def get_scenario_fs(): """Create filesystem combining the server (if connected) with blob storage, prioritizing the server if connected. :return: (*fs.base.FS*) -- filesystem instance """ scenario_data = get_blob_fs("scenariodata") mfs = MultiFS() try: ssh_fs = get_ssh_fs(server_setup.DATA_ROOT_DIR) mfs.add_fs("ssh_fs", ssh_fs, write=True, priority=2) except: # noqa print("Could not connect to ssh server") mfs.add_fs("scenario_fs", scenario_data, priority=1) remotes = ",".join([f[0] for f in mfs.iterate_fs()]) print(f"Initialized remote filesystem with {remotes}") return mfs
def get_fs(cls, registry, fs_name, fs_name_params, fs_path, writeable, create_dir): from fs.multifs import MultiFS from configparser import ConfigParser cfg = ConfigParser() if '#' in fs_path: path, section = fs_path.split('#', 1) else: path = fs_path section = 'fs' cfg.readfp(registry.open(path)) multi_fs = MultiFS() for name, fs_url in cfg.items(section): multi_fs.addfs(name, registry.opendir(fs_url, create_dir=create_dir)) return multi_fs, ''
def test_multiple_fs(self, ctx): testfs = ctx << fs.open_fs('mem://') self.build_fs(testfs, ctx) self.build_zipfs() multi_fs = MultiFS() multi_fs.add_fs('memory', testfs) multi_fs.add_fs('zip', fs.open_fs("zip://test.zip")) env = self.build_env(multi_fs) template = env.get_template("dir/nested.j2") self.assertEqual(template.render(), "<html>this is a nested template !</html>") template = env.get_template("template_in_zip.j2") self.assertEqual(template.render(), "<html>this template is in a zip</html>") self.assertRaises(jinja2.TemplateNotFound, env.get_template, "other.j2") source, path, _ = env.loader.get_source(None, "template_in_zip.j2") self.assertEqual(path, "template_in_zip.j2")
def get_multi_fs(root): """Create filesystem combining the server (if connected) with profile and scenario containers in blob storage. The priority is in descending order, so the server will be used first if possible :param str root: root directory on server :return: (*fs.base.FS*) -- filesystem instance """ scenario_data = get_blob_fs("scenariodata") profiles = get_blob_fs("profiles") mfs = MultiFS() try: ssh_fs = get_ssh_fs(root) mfs.add_fs("ssh_fs", ssh_fs, write=True, priority=3) except: # noqa print("Could not connect to ssh server") mfs.add_fs("profile_fs", profiles, priority=2) mfs.add_fs("scenario_fs", scenario_data, priority=1) remotes = ",".join([f[0] for f in mfs.iterate_fs()]) print(f"Initialized remote filesystem with {remotes}") return mfs
def _get_fs(self): mfs = MultiFS() profiles = get_blob_fs("profiles") mfs.add_fs("profile_fs", profiles, priority=2) mfs.add_fs("local_fs", self.local_fs, write=True, priority=3) return mfs
def _get_fs(self, fs_url): mfs = MultiFS() mfs.add_fs("remotefs", fs.open_fs(fs_url), write=True, priority=3) return mfs
def get_multi_fs(directories): filesystem = MultiFS() for directory in directories: filesystem.add_fs(directory, fs.open_fs(directory)) return filesystem
def build( fs, settings_path="settings.ini", rebuild=False, archive=None, strict=False, master_settings=None, test_build=False, develop=False, ): """Build a project""" if isinstance(fs, string_types): if "://" in fs: fs = open_fs(fs) else: fs = OSFS(fs) if isinstance(settings_path, string_types): settings_path = [settings_path] try: syspath = fs.getsyspath("/") except NoSysPath: syspath = None cwd = os.getcwd() if syspath is not None: os.chdir(syspath) try: log.debug("reading settings from {}".format( textual_list(settings_path))) cfg = SettingsContainer.read(fs, settings_path, master=master_settings) if "customize" in cfg: customize_location = cfg.get("customize", "location") if customize_location: settings_path = cfg.get("customize", "settings", "settings.ini") startup_log.info("customizing '%s'", customize_location) customize_fs = open_fs(cfg.get("customize", "location")) cfg = SettingsContainer.read(customize_fs, settings_path, master=cfg) overlay_fs = MultiFS() overlay_fs.add_fs("project", fs) overlay_fs.add_fs("custom", customize_fs, write=True) fs = overlay_fs try: syspath = fs.getsyspath("/") except NoSysPath: pass else: os.chdir(syspath) if archive is None: archive = Archive(fs, strict=strict, test_build=test_build, develop=develop) context = Context() archive.cfg = cfg root = context.root root["libs"] = archive.libs root["apps"] = archive.apps root["fs"] = FSWrapper(fs) root["settings"] = SettingsContainer.from_dict(archive.cfg["settings"]) startup_path = archive.cfg.get("project", "startup") docs_location = archive.cfg.get("project", "location") archive.init_settings() root["console"] = archive.console root["debug"] = archive.debug root["_rebuild"] = rebuild parser = Parser(archive, fs.opendir(docs_location), startup_path) doc = parser.parse() if doc is None: raise errors.StartupFailedError( 'unable to parse "{}"'.format(startup_path)) archive.build(doc, fs=fs) return fs, archive, context, doc finally: os.chdir(cwd) gc.collect()
def init_settings(self, cfg=None): cfg = cfg or self.cfg self.secret = cfg.get("project", "secret", "") self.preflight = cfg.get_bool("project", "preflight", False) self.debug = cfg.get_bool("project", "debug") self.strict = self.strict or cfg.get_bool("project", "strict") self.develop = self.develop or cfg.get_bool("project", "develop") self.log_signals = cfg.get_bool("project", "log_signals") self.debug_echo = cfg.get_bool("project", "debug_echo") self.debug_memory = cfg.get_bool("project", "debug_memory") self.lib_paths = cfg.get_list("project", "paths", "./local\n./external") self.lib_paths = self.lib_paths[:] + [MOYA_LIBS_PATH] if "console" in cfg: self.log_logger = cfg.get("console", "logger", None) self.log_color = cfg.get_bool("console", "color", True) self.log_width = cfg.get_int("console", "width", None) self.console = self.create_console() self.sites.set_defaults(cfg["site"]) if "templates" not in self.caches: self.caches["templates"] = Cache.create( "templates", SettingsSectionContainer({"type": "dict"}) ) if "runtime" not in self.caches: self.caches["runtime"] = Cache.create( "runtime", SettingsSectionContainer({"type": "dict"}) ) require_name = ["app", "smtp", "db"] self.auto_reload = cfg.get_bool("autoreload", "enabled") if self.strict: startup_log.debug("strict mode is enabled") if self.develop: startup_log.debug("develop mode is enabled") for section_name, section in iteritems(cfg): section = SectionWrapper(section_name, section) if ":" in section_name: what, name = section_name.split(":", 1) else: what = section_name name = None if what in require_name and not name: raise errors.StartupFailedError( "name required in section, [{section}:?]".format(section=what) ) if what in ("project", "debug", "autoreload", "console", "customize", ""): continue if what == "settings": if name is None: self.settings.update( (k, SettingContainer(v)) for k, v in iteritems(section) ) else: self.app_settings[name].update( (k, SettingContainer(v)) for k, v in iteritems(section) ) elif what == "application": self.app_system_settings[name].update(section) elif what == "lib": if self.has_library(name): lib = self.get_library(name) lib.settings.update( (k, SettingContainer(v)) for k, v in iteritems(section) ) elif what == "fs": location = section.get("location") if not location: raise errors.StartupFailedError( "a value for 'location' is required in [{}]".format( section_name ) ) create = section.get_bool("create", False) self.add_filesystem(name, location, create=create) elif what == "data": location = section.get("location") data_fs = self.open_fs(location) self.data_fs.add_fs( "archive", data_fs, priority=section.get_int("priority", 0) ) elif what == "cache": self.init_cache(name, section) elif what == "templates": location = section["location"] try: priority = int(section["priority"]) except (IndexError, ValueError): priority = 0 self.init_templates(name, location, priority) elif what == "db": from .db import add_engine add_engine(self, name, section) elif what == "media": priority = section.get_int("priority", 1) location = section["location"] static_media_fs = self.open_fs(location) media_fs = MultiFS() media_fs.add_fs("static", static_media_fs, priority=priority) self.add_filesystem("media", media_fs) self.media_urls = section.get_list("url") self.media_app = section.get("app", "media") elif what == "smtp": host = section["host"] port = section.get_int("port", 25) timeout = section.get_int("timeout", None) username = section.get("username", None) password = section.get("password", None) default = section.get_bool("default", False) sender = section.get("sender", None) server = MailServer( host, name=name, port=port, default=default, timeout=timeout, username=username, password=password, sender=sender, ) self.mail_servers[name] = server if self.default_mail_server is None or default: self.default_mail_server = name if default: startup_log.debug("%r (default) created", server) else: startup_log.debug("%r created", server) elif what == "site": if name: self.sites.add_from_section(name, section) elif what == "themes": location = section["location"] theme_fs = self.open_fs(location) self.add_filesystem("themes", theme_fs) # startup_log.debug("added theme filesystem '%s'", location) else: startup_log.warn("unknown settings section, [%s]", section_name) self.init_template_engine("moya", {})
def open_file( authority, cache, update, version_check, hasher, read_path, write_path=None, cache_on_write=False, mode='r', *args, **kwargs): ''' Context manager for reading/writing an archive and uploading on changes Parameters ---------- authority : object :py:mod:`pyFilesystem` filesystem object to use as the authoritative, up-to-date source for the archive cache : object :py:mod:`pyFilesystem` filesystem object to use as the cache. Default ``None``. use_cache : bool update, service_path, version_check, \*\*kwargs ''' if write_path is None: write_path = read_path with _choose_read_fs( authority, cache, read_path, version_check, hasher) as read_fs: write_mode = ('w' in mode) or ('a' in mode) or ('+' in mode) if write_mode: readwrite_mode = ( ('a' in mode) or ( ('r' in mode) and ( '+' in mode))) with _prepare_write_fs( read_fs, cache, read_path, readwrite_mode) as write_fs: wrapper = MultiFS() wrapper.addfs('reader', read_fs) wrapper.setwritefs(write_fs) with wrapper.open(read_path, mode, *args, **kwargs) as f: yield f info = write_fs.getinfokeys(read_path, 'size') if 'size' in info: if info['size'] == 0: return with write_fs.open(read_path, 'rb') as f: checksum = hasher(f) if not version_check(checksum): if ( cache_on_write or ( cache and ( fs.path.abspath(read_path) == fs.path.abspath(write_path)) and cache.fs.isfile(read_path) ) ): _makedirs(cache.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, cache.fs, write_path) _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( cache.fs, write_path, authority.fs, write_path) else: _makedirs(authority.fs, fs.path.dirname(write_path)) fs.utils.copyfile( write_fs, read_path, authority.fs, write_path) update(**checksum) else: with read_fs.open(read_path, mode, *args, **kwargs) as f: yield f
def init_settings(self, cfg=None): cfg = cfg or self.cfg self.secret = cfg.get('project', 'secret', '') self.preflight = cfg.get_bool('project', 'preflight', False) self.debug = cfg.get_bool('project', 'debug') self.develop = cfg.get_bool('project', 'develop') self.log_signals = cfg.get_bool('project', 'log_signals') self.debug_echo = cfg.get_bool('project', 'debug_echo') if 'console' in cfg: self.log_logger = cfg.get('console', 'logger', None) self.log_color = cfg.get_bool('console', 'color', True) self.log_width = cfg.get_int('console', 'width', None) self.console = self.create_console() self.sites.set_defaults(cfg['site']) if 'templates' not in self.caches: self.caches['templates'] = Cache.create('templates', SettingsSectionContainer({'type': 'dict'})) if 'runtime' not in self.caches: self.caches['runtime'] = Cache.create('runtime', SettingsSectionContainer({'type': 'dict'})) require_name = ['app', 'smtp', 'db'] self.auto_reload = cfg.get_bool('autoreload', 'enabled') for section_name, section in iteritems(cfg): section = SectionWrapper(section_name, section) if ':' in section_name: what, name = section_name.split(':', 1) else: what = section_name name = None if what in require_name and not name: raise errors.StartupFailedError('Name/text required in project settings [{section}:]'.format(section=what)) if what in ('project', 'debug', 'autoreload', 'console', ''): continue if what == "settings": if name is None: self.settings.update((k, SettingContainer(v)) for k, v in iteritems(section)) else: self.app_settings[name].update((k, SettingContainer(v)) for k, v in iteritems(section)) elif what == 'application': self.app_system_settings[name].update(section) elif what == "lib": if self.has_library(name): lib = self.get_library(name) lib.settings.update((k, SettingContainer(v)) for k, v in iteritems(section)) elif what == "fs": location = section.get("location") self.add_filesystem(name, location) elif what == "data": location = section.get("location") data_fs = self.open_fs(location) self.data_fs.addfs('archive', data_fs, priority=section.get_int('priority', 0)) elif what == "cache": self.init_cache(name, section) elif what == "templates": location = section["location"] try: priority = int(section["priority"]) except (IndexError, ValueError): priority = 0 self.init_templates(name, location, priority) elif what == "db": from .db import add_engine add_engine(self, name, section) elif what == 'media': priority = section.get_int('priority', 1) location = section["location"] static_media_fs = self.open_fs(location) media_fs = MultiFS() media_fs.addfs("static", static_media_fs, priority=priority) self.add_filesystem('media', media_fs) self.media_urls = section.get_list('url') self.media_app = section.get('app', 'media') elif what == "smtp": host = section["host"] port = section.get_int('port', 25) timeout = section.get_int('timeout', None) username = section.get('username', None) password = section.get("password", None) default = section.get_bool('default', False) sender = section.get('sender', None) server = MailServer(host, name=name, port=port, default=default, timeout=timeout, username=username, password=password, sender=sender) self.mail_servers[name] = server if self.default_mail_server is None or default: self.default_mail_server = name if default: startup_log.debug('%r (default) created', server) else: startup_log.debug('%r created', server) elif what == "site": if name: self.sites.add_from_section(name, section) else: startup_log.warn("unknown settings section: [%s]", section_name) self.init_template_engine('moya', {})
def make_fs(self): fs = MultiFS() mem_fs = MemoryFS() fs.add_fs("mem", mem_fs, write=True) return fs
def test_no_writable(self): fs = MultiFS() with self.assertRaises(errors.ResourceReadOnly): fs.setbytes('foo', b'bar')
def test_no_writable(self): fs = MultiFS() with self.assertRaises(errors.ResourceReadOnly): fs.writebytes("foo", b"bar")
def setUp(self): fs = MultiFS() mem_fs = MemoryFS() fs.add_fs("mem", mem_fs, write=True) self.fs = fs self.mem_fs = mem_fs