def test_import_errors(self): # must specify exactly one of package or src self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile, StringIO("<schema><import/></schema>")) self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile, StringIO("<schema>" " <import src='library.xml'" " package='ZConfig'/>" "</schema>")) # cannot specify src and file self.assertRaises(ZConfig.SchemaError, ZConfig.loadSchemaFile, StringIO("<schema>" " <import src='library.xml'" " file='other.xml'/>" "</schema>")) # cannot specify module as package sio = StringIO("<schema>" " <import package='ZConfig.tests.test_loader'/>" "</schema>") with self.assertRaises(ZConfig.SchemaResourceError) as ctx: ZConfig.loadSchemaFile(sio) e = ctx.exception self.assertEqual(e.filename, "component.xml") self.assertEqual(e.package, "ZConfig.tests.test_loader") self.assertTrue(e.path is None) # make sure the str() doesn't raise an unexpected exception str(e)
def checkConfigureViaZConfig(self): replica_conf = os.path.join(os.path.dirname(__file__), 'replicas.conf') if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' conf = u""" %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <mysql> driver auto db %s user relstoragetest passwd relstoragetest </mysql> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dbname, ) schema_xml = u""" <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from io import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = db.storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.mysql import MySQLAdapter self.assertIsInstance(adapter, MySQLAdapter) self.assertEqual(adapter._params, { 'passwd': 'relstoragetest', 'db': dbname, 'user': '******', }) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual( adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close()
def test_zip_import_component_from_config(self): sio = StringIO(''' <schema> <abstracttype name="something"/> <section name="*" attribute="something" type="something" /> </schema> ''') schema = ZConfig.loadSchemaFile(sio) value = ''' %import foo.sample <sample> data value </sample> ''' sio = StringIO(value) config, _ = ZConfig.loadConfigFile(schema, sio) self.assertEqual(config.something.data, "| value |") sio = StringIO(value) with self.assertRaises(ZConfig.ConfigurationSyntaxError): ZConfig.loadConfigFile(schema, sio, overrides=["sample/data=othervalue"])
def run_with_options(options): conf_fn = options.config_file # Do the gevent stuff ASAP if getattr(options, 'gevent', False): import gevent.monkey gevent.monkey.patch_all() if options.log: import logging lvl_map = getattr(logging, '_nameToLevel', None) or getattr(logging, '_levelNames', {}) logging.basicConfig(level=lvl_map.get(options.log, logging.INFO), format='%(asctime)s %(levelname)-5.5s [%(name)s][%(thread)d:%(process)d][%(threadName)s] %(message)s') object_size = max(options.object_size, pobject_base_size) if options.profile_dir and not os.path.exists(options.profile_dir): os.makedirs(options.profile_dir) schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _handler = ZConfig.loadConfigFile(schema, conf_fn) contenders = [(db.name, db) for db in config.databases] if options.zap: _zap(contenders) # results: {contender_name: {concurrency_level: {objects_per_txn: [[SpeedTestTimes]...]}}} results = defaultdict(lambda: defaultdict(dict)) try: for objects_per_txn in options.counts or DEFAULT_OBJ_COUNTS: for concurrency in options.concurrency or DEFAULT_CONCURRENCIES: speedtest = SpeedTest( concurrency, objects_per_txn, object_size, options.profile_dir, 'threads' if options.threads else 'mp', test_reps=options.test_reps) if options.btrees: import BTrees if options.btrees == 'IO': speedtest.MappingType = BTrees.family64.IO.BTree else: speedtest.MappingType = BTrees.family64.OO.BTree for contender_name, db in contenders: print(( 'Testing %s with objects_per_txn=%d, object_size=%d, ' 'mappingtype=%s and concurrency=%d (threads? %s)' % (contender_name, objects_per_txn, object_size, speedtest.MappingType, concurrency, options.threads)), file=sys.stderr) all_times = _run_one_contender(options, speedtest, contender_name, db) #results[key] = all_times results[contender_name][concurrency][objects_per_txn] = all_times # The finally clause causes test results to print even if the tests # stop early. finally: _print_results(options, contenders, results)
def __call__(self, uri): (scheme, netloc, path, query, frag) = urlparse.urlsplit(uri) # urlparse doesnt understand file URLs and stuffs everything into path (scheme, netloc, path, query, frag) = urlparse.urlsplit('http:' + path) path = os.path.normpath(path) schema_xml = self.schema_xml_template schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, path) for config_item in config.databases + config.storages: if not frag: # use the first defined in the file break elif frag == config_item.name: # match found break else: raise KeyError("No storage or database named %s found" % frag) if isinstance(config_item, ZODBDatabase): config = config_item.config factory = config.storage dbkw = { 'connection_cache_size': config.cache_size, 'connection_pool_size': config.pool_size, } if config.database_name: dbkw['database_name'] = config.database_name else: factory = config_item dbkw = dict(cgi.parse_qsl(query)) return factory.open, dbkw
def setup_eventlog(self): """Create an eventlog ZConfig configuration and patch it onto the global config, so it's present when ftw.structlog attempts to read it to derive its own logfile path from the eventlog's logfile path. """ schema = ZConfig.loadSchemaFile( StringIO(""" <schema> <import package='ZConfig.components.logger'/> <section type='eventlog' name='*' attribute='eventlog'/> </schema> """)) fn = self.mktemp() eventlog_conf, handler = ZConfig.loadConfigFile( schema, StringIO(""" <eventlog> <logfile> path %s level debug </logfile> </eventlog> """ % fn)) assert eventlog_conf.eventlog is not None getConfiguration().eventlog = eventlog_conf.eventlog
def __call__(self, uri): (scheme, netloc, path, query, frag) = urlparse.urlsplit(uri) # urlparse doesnt understand file URLs and stuffs everything into path (scheme, netloc, path, query, frag) = urlparse.urlsplit("http:" + path) path = os.path.normpath(path) schema_xml = self.schema_xml_template schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, path) for config_item in config.databases + config.storages: if not frag: # use the first defined in the file break elif frag == config_item.name: # match found break else: raise KeyError("No storage or database named %s found" % frag) if isinstance(config_item, ZODBDatabase): config = config_item.config factory = config.storage dbkw = {"connection_cache_size": config.cache_size, "connection_pool_size": config.pool_size} if config.database_name: dbkw["database_name"] = config.database_name else: factory = config_item dbkw = dict(cgi.parse_qsl(query)) return factory.open, dbkw
def open_storages(options): schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfigFile(schema, options.config_file) source = config.source.open() destination = config.destination.open() return source, destination
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "--dry-run", dest="dry_run", action="store_true", help="Attempt to open the storages, then explain what would be done") parser.add_option( "--clear", dest="clear", action="store_true", help="Clear the contents of the destination storage before copying") parser.set_defaults(dry_run=False, clear=False) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) source = config.source.open() destination = config.destination.open() log.info("Storages opened successfully.") if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s' % (TimeStamp(txn.tid), txn.user, txn.description)) count += 1 log.info("Would copy %d transactions.", count) else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") sys.exit(msg) log.info("Done clearing old data.") if storage_has_data(destination): msg = "Error: the destination storage has data. Try --clear." sys.exit(msg) destination.copyTransactionsFrom(source) source.close() destination.close()
def checkConfigureViaZConfig(self): replica_conf = os.path.join(os.path.dirname(__file__), 'replicas.conf') if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' dsn = ( "dbname='%s' user='******' password='******'" % dbname) conf = """ %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <postgresql> dsn %s </postgresql> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dsn, ) schema_xml = """ <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from StringIO import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = getattr(db, 'storage', None) if storage is None: # ZODB < 3.9 storage = db._storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.postgresql import PostgreSQLAdapter self.assert_(isinstance(adapter, PostgreSQLAdapter)) self.assertEqual(adapter._dsn, dsn) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual( adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close()
def checkConfigureViaZConfig(self): replica_conf = os.path.join(os.path.dirname(__file__), 'replicas.conf') if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' dsn = ("dbname='%s' user='******' password='******'" % dbname) conf = """ %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <postgresql> dsn %s </postgresql> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dsn, ) schema_xml = """ <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from StringIO import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = getattr(db, 'storage', None) if storage is None: # ZODB < 3.9 storage = db._storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.postgresql import PostgreSQLAdapter self.assert_(isinstance(adapter, PostgreSQLAdapter)) self.assertEqual(adapter._dsn, dsn) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual(adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close()
def checkConfigureViaZConfig(self): replica_conf = os.path.join(os.path.dirname(relstorage.tests.__file__), 'replicas.conf') dsn = 'dbname=' + self.dbname conf = u""" %%import relstorage %%import newt.db <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <newt> <postgresql> driver auto dsn %s </postgresql> </newt> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dsn, ) schema_xml = u""" <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from io import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = db.storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.postgresql import PostgreSQLAdapter self.assertIsInstance(adapter, PostgreSQLAdapter) self.assertEqual(adapter._dsn, dsn) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual(adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) from .._adapter import Adapter self.assertEqual(Adapter, storage._adapter.__class__) finally: db.close()
def main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "-d", "--days", dest="days", default=0, help="Days of history to keep (default 0)", type=float, ) parser.add_argument( "--prepack", dest="prepack", default=False, action="store_true", help="Perform only the pre-pack preparation stage of a pack. " "(Only works with some storage types)", ) parser.add_argument( "--use-prepack-state", dest="reuse_prepack", default=False, action="store_true", help="Skip the preparation stage and go straight to packing. " "Requires that a pre-pack has been run, or that packing was aborted " "before it was completed.", ) parser.add_argument("config_file") options = parser.parse_args(argv[1:]) logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfig(schema, options.config_file) t = time.time() - options.days * 86400.0 for s in config.storages: name = '%s (%s)' % ((s.name or 'storage'), s.__class__.__name__) log.info("Opening %s...", name) storage = s.open() log.info("Packing %s.", name) if options.prepack or options.reuse_prepack: storage.pack(t, ZODB.serialize.referencesf, prepack_only=options.prepack, skip_prepack=options.reuse_prepack) else: # Be non-relstorage Storages friendly storage.pack(t, ZODB.serialize.referencesf) storage.close() log.info("Packed %s.", name)
def test_includes_with_defines(self): self.schema = ZConfig.loadSchemaFile(StringIO.StringIO("""\ <schema> <key name='refinner' /> <key name='refouter' /> </schema> """)) conf = self.load("outer.conf") self.assertEqual(conf.refinner, "inner") self.assertEqual(conf.refouter, "outer")
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "-d", "--days", dest="days", default="0", help="Days of history to keep (default 0)", ) parser.add_option( "--prepack", dest="prepack", default=False, action="store_true", help="Perform only the pre-pack preparation stage of a pack. " "(Only works with some storage types)", ) parser.add_option( "--use-prepack-state", dest="reuse_prepack", default=False, action="store_true", help="Skip the preparation stage and go straight to packing. " "Requires that a pre-pack has been run, or that packing was aborted " "before it was completed.", ) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(BytesIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) t = time.time() - float(options.days) * 86400.0 for s in config.storages: name = '%s (%s)' % ((s.name or 'storage'), s.__class__.__name__) log.info("Opening %s...", name) storage = s.open() log.info("Packing %s.", name) if options.prepack or options.reuse_prepack: storage.pack(t, ZODB.serialize.referencesf, prepack_only=options.prepack, skip_prepack=options.reuse_prepack) else: # Be non-relstorage Storages friendly storage.pack(t, ZODB.serialize.referencesf) storage.close() log.info("Packed %s.", name)
def config(configfile, schemafile=None, features=()): # Load the configuration schema if schemafile is None: schemafile = os.path.join(os.path.dirname(appsetup.__file__), 'schema', 'schema.xml') # Let's support both, an opened file and path if isinstance(schemafile, basestring): schema = ZConfig.loadSchema(schemafile) else: schema = ZConfig.loadSchemaFile(schemafile) # Load the configuration file # Let's support both, an opened file and path try: if isinstance(configfile, basestring): options, handlers = ZConfig.loadConfig(schema, configfile) else: options, handlers = ZConfig.loadConfigFile(schema, configfile) except ZConfig.ConfigurationError as msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.exit(2) # Insert all specified Python paths if options.path: sys.path[:0] = [os.path.abspath(p) for p in options.path] # Parse product configs zope.app.appsetup.product.setProductConfigurations(options.product_config) # Setup the event log options.eventlog() # Setup other defined loggers for logger in options.loggers: logger() # Insert the devmode feature, if turned on if options.devmode: features += ('devmode', ) logging.warning( "Developer mode is enabled: this is a security risk " "and should NOT be enabled on production servers. Developer mode " "can usually be turned off by setting the `devmode` option to " "`off` or by removing it from the instance configuration file " "completely.") # Execute the ZCML configuration. appsetup.config(options.site_definition, features=features) # Connect to and open the database, notify subscribers. db = appsetup.multi_database(options.databases)[0][0] notify(zope.processlifetime.DatabaseOpened(db)) return db
def config(configfile, schemafile=None, features=()): # Load the configuration schema if schemafile is None: schemafile = os.path.join( os.path.dirname(appsetup.__file__), 'schema', 'schema.xml') # Let's support both, an opened file and path if isinstance(schemafile, basestring): schema = ZConfig.loadSchema(schemafile) else: schema = ZConfig.loadSchemaFile(schemafile) # Load the configuration file # Let's support both, an opened file and path try: if isinstance(configfile, basestring): options, handlers = ZConfig.loadConfig(schema, configfile) else: options, handlers = ZConfig.loadConfigFile(schema, configfile) except ZConfig.ConfigurationError as msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.exit(2) # Insert all specified Python paths if options.path: sys.path[:0] = [os.path.abspath(p) for p in options.path] # Parse product configs zope.app.appsetup.product.setProductConfigurations( options.product_config) # Setup the event log options.eventlog() # Setup other defined loggers for logger in options.loggers: logger() # Insert the devmode feature, if turned on if options.devmode: features += ('devmode',) logging.warning("Developer mode is enabled: this is a security risk " "and should NOT be enabled on production servers. Developer mode " "can usually be turned off by setting the `devmode` option to " "`off` or by removing it from the instance configuration file " "completely.") # Execute the ZCML configuration. appsetup.config(options.site_definition, features=features) # Connect to and open the database, notify subscribers. db = appsetup.multi_database(options.databases)[0][0] notify(zope.processlifetime.DatabaseOpened(db)) return db
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "--dry-run", dest="dry_run", action="store_true", help="Attempt to open the storages, then explain what would be done") parser.add_option( "--clear", dest="clear", action="store_true", help="Clear the contents of the destination storage before copying") parser.set_defaults(dry_run=False, clear=False) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) source = config.source.open() destination = config.destination.open() log.info("Storages opened successfully.") if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s' % ( TimeStamp(txn.tid), txn.user, txn.description)) count += 1 log.info("Would copy %d transactions.", count) else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") sys.exit(msg) log.info("Done clearing old data.") if storage_has_data(destination): msg = "Error: the destination storage has data. Try --clear." sys.exit(msg) destination.copyTransactionsFrom(source) source.close() destination.close()
def load_schema(self): # Doing this here avoids needing a separate file for the schema: if self.schema is None: if EnvironmentOptions.saved_schema is None: schema = ZConfig.loadSchemaFile(StringIO("""\ <schema> <key name='opt' datatype='integer' default='12'/> </schema> """)) EnvironmentOptions.saved_schema = schema self.schema = EnvironmentOptions.saved_schema
def test_includes_with_defines(self): self.schema = ZConfig.loadSchemaFile( StringIO("""\ <schema> <key name='refinner' /> <key name='refouter' /> </schema> """)) conf = self.load("outer.conf") self.assertEqual(conf.refinner, "inner") self.assertEqual(conf.refouter, "outer")
def readConfig(self, filename): # Read configuration file schema_string = open(self.ZCONFIG_SCHEMA).read() plugins = [configuration for (configuration, handler) in plugin_configurations] schema_string = schema_string % {'plugins': "\n".join(plugins)} schema_file = StringIO(schema_string) schema = ZConfig.loadSchemaFile(schema_file, self.ZCONFIG_SCHEMA) config, handler = ZConfig.loadConfig(schema, filename) return config, handler
def check_nonexistent_file(self): fn = tempfile.mktemp() schema = ZConfig.loadSchemaFile(StringIO("<schema/>")) self.assertRaises(ZConfig.ConfigurationError, ZConfig.loadSchema, fn) self.assertRaises(ZConfig.ConfigurationError, ZConfig.loadConfig, schema, fn) self.assertRaises(ZConfig.ConfigurationError, ZConfig.loadConfigFile, schema, StringIO("%include " + fn)) self.assertRaises(ZConfig.ConfigurationError, ZConfig.loadSchema, "http://www.zope.org/no-such-document/") self.assertRaises(ZConfig.ConfigurationError, ZConfig.loadConfig, schema, "http://www.zope.org/no-such-document/")
def get_schema(self): if self.schema is None: sio = StringIO.StringIO(""" <schema> <import package='ZServer'/> <multisection name='*' type='ZServer.server' attribute='servers'/> </schema> """) schema = ZConfig.loadSchemaFile(sio) BaseTest.schema = schema return self.schema
def _get_database_from_zconfig(self): settings = self.config.get_settings(self._zconfig_args) path = settings['path'] frag = settings.get('frag', '') schema = ZConfig.loadSchemaFile(StringIO(self._schema_xml_template)) config, _ = ZConfig.loadConfig(schema, path) for database in config.databases: if not frag or frag == database.name: return database.open() else: raise ValueError("Database %r not found." % frag)
def readConfig(self, filename): # Read configuration file schema_string = open(self.ZCONFIG_SCHEMA).read() plugins = [ configuration for (configuration, handler) in plugin_configurations ] schema_string = schema_string % {'plugins': "\n".join(plugins)} schema_file = StringIO(schema_string) schema = ZConfig.loadSchemaFile(schema_file, self.ZCONFIG_SCHEMA) config, handler = ZConfig.loadConfig(schema, filename) return config, handler
def test_zip_import_component_from_schema(self): sio = StringIO(''' <schema> <abstracttype name="something"/> <import package="foo.sample"/> <section name="*" attribute="something" type="something" /> </schema> ''') schema = ZConfig.loadSchemaFile(sio) t = schema.gettype("sample") self.assertFalse(t.isabstract())
def _get_database_from_zconfig(self): settings = self.config.get_settings(self._zconfig_args) from django_zodb.storage.base import norm_and_clean_path path = norm_and_clean_path(settings['path']) frag = settings.get('frag', '') schema = ZConfig.loadSchemaFile(StringIO(self._schema_xml_template)) config, _ = ZConfig.loadConfig(schema, path) for database in config.databases: if not frag or frag == database.name: return database.open() else: raise ValueError("Database %r not found." % frag)
def neo_zconf_options(): neo_schema = """<schema> <import package="ZODB" /> <import package="neo.client" /> </schema>""" neo_schema = StringIO(neo_schema) neo_schema = ZConfig.loadSchemaFile(neo_schema) neo_storage_zconf = neo_schema.gettype('NeoStorage') options = {k for k, _ in neo_storage_zconf} assert 'master_nodes' in options assert 'name' in options return options
def get_storage(config_file): schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, dummy = ZConfig.loadConfig(schema, config_file) if len(config.storages) < 1: raise ValueError('No storages configured') connection = config.storages[0] if connection.config.keep_history: raise RuntimeError('Packing does not support history keeping storages') name = '%s (%s)' % ((connection.name or 'storage'), connection.__class__.__name__) log.info("Opening %s...", name) storage = connection.open() log.info("Successfully openend %s", storage.getName()) if 'PostgreSQLAdapter' not in storage.getName(): raise RuntimeError('Only PostgreSQL databases are supported') return storage
def __call__(self, uri): (scheme, netloc, path, query, frag) = urlparse.urlsplit(uri) # urlparse doesnt understand file URLs and stuffs everything into path (scheme, netloc, path, query, frag) = urlparse.urlsplit('http:' + path) path = os.path.normpath(path) schema_xml = self.schema_xml_template schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, path) for database in config.databases: if not frag: # use the first defined in the file break elif frag == database.name: # match found break else: raise KeyError("No database named %s found" % frag) return (path, frag), (), {}, database.open
def test_zip_import_component_from_config(self): sio = StringIO(''' <schema> <abstracttype name="something"/> <section name="*" attribute="something" type="something" /> </schema> ''') schema = ZConfig.loadSchemaFile(sio) sio = StringIO(''' %import foo.sample <sample> data value </sample> ''') config, _ = ZConfig.loadConfigFile(schema, sio) self.assertEqual(config.something.data, "| value |")
def __call__(self, uri): (scheme, netloc, path, query, frag) = urlsplit(uri) if _BROKEN_URLSPLIT: #pragma NO COVER # urlsplit used not to allow fragments in non-standard schemes, # stuffed everything into 'path' (scheme, netloc, path, query, frag ) = urlsplit('http:' + path) path = os.path.normpath(path) schema_xml = self.schema_xml_template schema = ZConfig.loadSchemaFile(io.BytesIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, path) for database in config.databases: if not frag: # use the first defined in the file break elif frag == database.name: # match found break else: raise KeyError("No database named %s found" % frag) return (path, frag), (), {}, database.open
def config(configfile, schemafile=None, features=()): # Load the configuration schema if schemafile is None: schemafile = os.path.join( os.path.dirname(appsetup.__file__), 'schema', 'schema.xml') # Let's support both, an opened file and path if isinstance(schemafile, basestring): schema = ZConfig.loadSchema(schemafile) else: schema = ZConfig.loadSchemaFile(schemafile) # Load the configuration file # Let's support both, an opened file and path try: if isinstance(configfile, basestring): options, handlers = ZConfig.loadConfig(schema, configfile) else: options, handlers = ZConfig.loadConfigFile(schema, configfile) except ZConfig.ConfigurationError, msg: sys.stderr.write("Error: %s\n" % str(msg)) sys.exit(2)
def setup_eventlog(self): schema = ZConfig.loadSchemaFile( StringIO(""" <schema> <import package='ZConfig.components.logger'/> <section type='eventlog' name='*' attribute='eventlog'/> </schema> """)) self.tempdir = tempfile.mkdtemp() f = open(os.path.join(self.tempdir, 'instance0.log'), 'w') f.close() eventlog_conf = ZConfig.loadConfigFile( schema, StringIO(""" <eventlog> <logfile> path {} level debug </logfile> </eventlog> """.format(f.name)))[0] assert eventlog_conf.eventlog is not None getConfiguration().eventlog = eventlog_conf.eventlog
def get_schema(self): if self._schema is None: sio = StringIO.StringIO(self._schematext) self.__class__._schema = ZConfig.loadSchemaFile(sio) return self._schema
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "--dry-run", dest="dry_run", action="store_true", help="Attempt to open the storages, then explain what would be done") parser.add_option( "--clear", dest="clear", action="store_true", help="Clear the contents of the destination storage before copying") parser.add_option( "--incremental", dest="incremental", action="store_true", help="Assume the destination contains a partial copy of the source " "and resume copying from the last transaction. WARNING: no " "effort is made to verify that the destination holds the same " "transaction data before this point! Use at your own risk. " "Currently only supports RelStorage destinations.") parser.set_defaults(dry_run=False, clear=False) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) source = config.source.open() destination = config.destination.open() start = None log.info("Storages opened successfully.") if options.incremental: if not storage_has_data(destination): log.warning( "Destination empty, start conversion from the beginning.") else: iterator = destination.iterator() for trans in iterator: last = trans.tid start = p64(u64(last) + 1) source = IteratorWithDefaultStart(source, start) log.info("Resuming ZODB copy from %s", u64(start)) if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s' % (TimeStamp(txn.tid), txn.user, txn.description)) count += 1 log.info("Would copy %d transactions.", count) else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") sys.exit(msg) log.info("Done clearing old data.") if start is None and storage_has_data(destination): msg = "Error: the destination storage has data. Try --clear." sys.exit(msg) destination.copyTransactionsFrom(source) source.close() destination.close()
def checkConfigureViaZConfig(self): # pylint:disable=too-many-locals import tempfile dsn = os.environ.get('ORACLE_TEST_DSN', 'XE') fd, replica_conf = tempfile.mkstemp() os.write(fd, dsn.encode("ascii")) os.close(fd) try: if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' conf = u""" %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <oracle> user %s password relstoragetest dsn %s </oracle> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dbname, dsn, ) schema_xml = u""" <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from io import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _handler = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = db.storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.oracle import OracleAdapter self.assertIsInstance(adapter, OracleAdapter) self.assertEqual(adapter._user, dbname) self.assertEqual(adapter._password, 'relstoragetest') self.assertEqual(adapter._dsn, dsn) self.assertEqual(adapter._twophase, False) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual( adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close() finally: os.remove(replica_conf)
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "--dry-run", dest="dry_run", action="store_true", help="Attempt to open the storages, then explain what would be done") parser.add_option( "--clear", dest="clear", action="store_true", help="Clear the contents of the destination storage before copying") parser.add_option( "--incremental", dest="incremental", action="store_true", help="Assume the destination contains a partial copy of the source " "and resume copying from the last transaction. WARNING: no " "effort is made to verify that the destination holds the same " "transaction data before this point! Use at your own risk. " "Currently only supports RelStorage destinations.") parser.set_defaults(dry_run=False, clear=False) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) source = config.source.open() destination = config.destination.open() start = None log.info("Storages opened successfully.") if options.incremental: if not storage_has_data(destination): log.warning( "Destination empty, start conversion from the beginning.") else: iterator = destination.iterator() for trans in iterator: last = trans.tid start = p64(u64(last)+1) source = IteratorWithDefaultStart(source, start) log.info("Resuming ZODB copy from %s", u64(start)) if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s' % ( TimeStamp(txn.tid), txn.user, txn.description)) count += 1 log.info("Would copy %d transactions.", count) else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") sys.exit(msg) log.info("Done clearing old data.") if start is None and storage_has_data(destination): msg = "Error: the destination storage has data. Try --clear." sys.exit(msg) destination.copyTransactionsFrom(source) source.close() destination.close()
def checkConfigureViaZConfig(self): replica_conf = os.path.join(os.path.dirname(__file__), 'replicas.conf') if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' conf = u""" %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB cache-local-dir-read-count 12 cache-local-dir-write-max-size 10MB <mysql> driver auto db %s user relstoragetest passwd relstoragetest </mysql> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dbname, ) schema_xml = u""" <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from io import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = db.storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.mysql import MySQLAdapter self.assertIsInstance(adapter, MySQLAdapter) self.assertEqual( adapter._params, { 'passwd': 'relstoragetest', 'db': dbname, 'user': '******', }) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual(adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close()
def load_schema_text(self, text, url=None): sio = StringIO.StringIO(text) self.schema = ZConfig.loadSchemaFile(sio, url) return self.schema
def checkConfigureViaZConfig(self): import tempfile dsn = os.environ.get('ORACLE_TEST_DSN', 'XE') fd, replica_conf = tempfile.mkstemp() os.write(fd, dsn) os.close(fd) try: if self.keep_history: dbname = base_dbname else: dbname = base_dbname + '_hf' conf = """ %%import relstorage <zodb main> <relstorage> name xyz read-only false keep-history %s replica-conf %s blob-chunk-size 10MB <oracle> user %s password relstoragetest dsn %s </oracle> </relstorage> </zodb> """ % ( self.keep_history and 'true' or 'false', replica_conf, dbname, dsn, ) schema_xml = """ <schema> <import package="ZODB"/> <section type="ZODB.database" name="main" attribute="database"/> </schema> """ import ZConfig from StringIO import StringIO schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfigFile(schema, StringIO(conf)) db = config.database.open() try: storage = getattr(db, 'storage', None) if storage is None: # ZODB < 3.9 storage = db._storage self.assertEqual(storage.isReadOnly(), False) self.assertEqual(storage.getName(), "xyz") adapter = storage._adapter from relstorage.adapters.oracle import OracleAdapter self.assert_(isinstance(adapter, OracleAdapter)) self.assertEqual(adapter._user, dbname) self.assertEqual(adapter._password, 'relstoragetest') self.assertEqual(adapter._dsn, dsn) self.assertEqual(adapter._twophase, False) self.assertEqual(adapter.keep_history, self.keep_history) self.assertEqual( adapter.connmanager.replica_selector.replica_conf, replica_conf) self.assertEqual(storage._options.blob_chunk_size, 10485760) finally: db.close() finally: os.remove(replica_conf)
def run_with_options(options): conf_fn = options.config_file # Do the gevent stuff ASAP if getattr(options, 'gevent', False): # Because of what we import up top, this must have # already been done, to be sure that it's effective import gevent.monkey if not gevent.monkey.is_module_patched('threading'): raise AssertionError("gevent monkey-patching should have happened") if options.log: import logging lvl_map = getattr(logging, '_nameToLevel', None) or getattr(logging, '_levelNames', {}) logging.basicConfig(level=lvl_map.get(options.log, logging.INFO), format='%(asctime)s %(levelname)-5.5s [%(name)s][%(thread)d:%(process)d][%(threadName)s] %(message)s') object_size = max(options.object_size, pobject_base_size) if options.profile_dir and not os.path.exists(options.profile_dir): os.makedirs(options.profile_dir) schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _handler = ZConfig.loadConfigFile(schema, conf_fn) contenders = [(db.name, db) for db in config.databases] if options.zap: _zap(contenders) # results: {contender_name: {concurrency_level: {objects_per_txn: [[SpeedTestTimes]...]}}} results = defaultdict(lambda: defaultdict(dict)) try: for objects_per_txn in options.counts or DEFAULT_OBJ_COUNTS: for concurrency in options.concurrency or DEFAULT_CONCURRENCIES: speedtest = SpeedTest( concurrency, objects_per_txn, object_size, options.profile_dir, mp_strategy=(options.threads or 'mp'), test_reps=options.test_reps, use_blobs=options.use_blobs) speedtest.min_object_count = options.min_object_count if options.btrees: import BTrees if options.btrees == 'IO': speedtest.MappingType = BTrees.family64.IO.BTree else: speedtest.MappingType = BTrees.family64.OO.BTree for contender_name, db in contenders: print(( 'Testing %s with objects_per_txn=%d, object_size=%d, ' 'mappingtype=%s, objecttype=%s, min_objects=%d and concurrency=%d (threads? %s)' % (contender_name, objects_per_txn, object_size, speedtest.MappingType, speedtest.ObjectType, options.min_object_count, concurrency, options.threads)), file=sys.stderr) all_times = _run_one_contender(options, speedtest, contender_name, db) #results[key] = all_times results[contender_name][concurrency][objects_per_txn] = all_times # The finally clause causes test results to print even if the tests # stop early. finally: _print_results(options, contenders, results)
from collections import deque from pickle import Unpickler as UnpicklerBase from Products.ZenRelations.RelationshipBase import RelationshipBase from Products.ZenRelations.ToManyContRelationship import ToManyContRelationship from Products.ZenUtils.AutoGCObjectReader import gc_cache_every from Products.ZenUtils.GlobalConfig import getGlobalConfiguration from Products.ZenUtils.ZenScriptBase import ZenScriptBase from relstorage.zodbpack import schema_xml from time import localtime, strftime from ZenToolboxUtils import inline_print from ZODB.DB import DB from ZODB.POSException import POSKeyError from ZODB.transact import transact from ZODB.utils import u64 schema = ZConfig.loadSchemaFile(cStringIO.StringIO(schema_xml)) class Analyzer(UnpicklerBase): """ Able to analyze an object's pickle to try to figure out the name/class of the problem oid. """ def __init__(self, pickle, problem_oid): UnpicklerBase.__init__(self, cStringIO.StringIO(pickle)) self.problem_oid = problem_oid self._marker = object() self.klass = None def persistent_load(self, pickle_id): if isinstance(pickle_id, tuple): oid, klass = pickle_id if oid == self.problem_oid: self.klass = klass
def main(argv=sys.argv): parser = optparse.OptionParser(description=__doc__, usage="%prog [options] config_file") parser.add_option( "--dry-run", dest="dry_run", action="store_true", help="Attempt to open the storages, then explain what would be done") parser.add_option( "--clear", dest="clear", action="store_true", help="Clear the contents of the destination storage before copying") parser.add_option( "--incremental", dest="incremental", action="store_true", help="Assume the destination contains a partial copy of the source " "and resume copying from the last transaction. WARNING: no " "effort is made to verify that the destination holds the same " "transaction data before this point! Use at your own risk. " "Currently only supports RelStorage destinations.") parser.set_defaults(dry_run=False, clear=False) options, args = parser.parse_args(argv[1:]) if len(args) != 1: parser.error("The name of one configuration file is required.") logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, handler = ZConfig.loadConfig(schema, args[0]) source = config.source.open() destination = config.destination.open() log.info("Storages opened successfully.") if options.incremental: if not hasattr(destination, 'lastTransaction'): msg = ("Error: no API is known for determining the last committed " "transaction of the destination storage. Aborting " "conversion.") sys.exit(msg) if not storage_has_data(destination): log.warning( "Destination empty, start conversion from the beginning.") else: # This requires that the storage produce a valid (not z64) value before # anything is loaded with it. last_tid = destination.lastTransaction() if isinstance(last_tid, bytes): # This *should* be a byte string. last_tid = u64(last_tid) next_tid = p64(last_tid + 1) # Compensate for the RelStorage bug(?) and get a reusable iterator # that starts where we want it to. There's no harm in wrapping it for # other sources like FileStorage too. source = _DefaultStartStorageIteration(source, next_tid) log.info("Resuming ZODB copy from %s", readable_tid_repr(next_tid)) if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s' % (TimeStamp(txn.tid), txn.user, txn.description)) count += 1 log.info("Would copy %d transactions.", count) else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") sys.exit(msg) log.info("Done clearing old data.") if storage_has_data(destination) and not options.incremental: msg = "Error: the destination storage has data. Try --clear." sys.exit(msg) destination.copyTransactionsFrom(source) source.close() destination.close()
def main(argv=None): if argv is None: argv = sys.argv parser = argparse.ArgumentParser(description=__doc__) parser.add_argument( "--dry-run", dest="dry_run", action="store_true", default=False, help="Attempt to open both storages, then explain what would be done.") parser.add_argument( "--clear", dest="clear", action="store_true", default=False, help="Clear the contents of the destination storage before copying. Only works if the destination is a RelStorage." " WARNING: use this only if you are certain the destination has no useful data.") parser.add_argument( "--incremental", dest="incremental", action="store_true", help="Assume the destination contains a partial copy of the source " "and resume copying from the last transaction. WARNING: no " "effort is made to verify that the destination holds the same " "transaction data before this point! Use at your own risk. ") parser.add_argument("config_file") options = parser.parse_args(argv[1:]) logging.basicConfig( level=logging.INFO, format="%(asctime)s [%(name)s] %(levelname)s %(message)s") schema = ZConfig.loadSchemaFile(StringIO(schema_xml)) config, _ = ZConfig.loadConfig(schema, options.config_file) source = config.source.open() destination = config.destination.open() def cleanup_and_exit(exit_msg=None): source.close() destination.close() if exit_msg: sys.exit(msg) log.info("Storages opened successfully.") if options.incremental: assert hasattr(destination, 'lastTransaction'), ("Error: no API is known for determining the last committed " "transaction of the destination storage. Aborting " "conversion.") if not storage_has_data(destination): log.warning("Destination empty, start conversion from the beginning.") else: # This requires that the storage produce a valid (not z64) value before # anything is loaded with it. last_tid = destination.lastTransaction() if isinstance(last_tid, bytes): # This *should* be a byte string. last_tid = u64(last_tid) next_tid = p64(last_tid+1) # Compensate for the RelStorage bug(?) and get a reusable iterator # that starts where we want it to. There's no harm in wrapping it for # other sources like FileStorage too. source = _DefaultStartStorageIteration(source, next_tid) log.info("Resuming ZODB copy from %s", readable_tid_repr(next_tid)) if options.dry_run: log.info("Dry run mode: not changing the destination.") if storage_has_data(destination): log.warning("The destination storage has data.") count = 0 for txn in source.iterator(): log.info('%s user=%s description=%s', TimeStamp(txn.tid), txn.user, txn.description) count += 1 log.info("Would copy %d transactions.", count) cleanup_and_exit() else: if options.clear: log.info("Clearing old data...") if hasattr(destination, 'zap_all'): destination.zap_all() else: msg = ("Error: no API is known for clearing this type " "of storage. Use another method.") cleanup_and_exit(msg) log.info("Done clearing old data.") if storage_has_data(destination) and not options.incremental: msg = "Error: the destination storage has data. Try --clear." cleanup_and_exit(msg) destination.copyTransactionsFrom(source) cleanup_and_exit()