def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): if not driver_available: raise unittest.SkipTest(str(driver_available)) assert 'driver' not in kw kw['driver'] = driver_available.driver_name db = self.db_names[name] if not keep_history: db += '_hf' options = Options(keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter_maker = self.use_adapter() adapter_maker.driver_name = driver_available.driver_name adapter = adapter_maker.make_adapter(options, db) __traceback_info__ = adapter, options storage = RelStorage(adapter, name=name, options=options) storage.zap_all() return storage
def make_storage(self, zap=True, **kw): from . import util from relstorage.storage import RelStorage if ('cache_servers' not in kw and 'cache_module_name' not in kw and kw.get('share_local_cache', True)): if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: kw['cache_servers'] = util.CACHE_SERVERS kw['cache_module_name'] = util.CACHE_MODULE_NAME if 'cache_prefix' not in kw: kw['cache_prefix'] = type(self).__name__ + self._testMethodName if 'cache_local_dir' not in kw: # Always use a persistent cache. This helps discover errors in # the persistent cache. # These tests run in a temporary directory that gets cleaned up, so the CWD is # appropriate. BUT: it should be an abspath just in case we change directories kw['cache_local_dir'] = os.path.abspath('.') if 'commit_lock_timeout' not in kw: # Cut this way down so we get better feedback. kw['commit_lock_timeout'] = self.DEFAULT_COMMIT_LOCK_TIMEOUT assert self.driver_name options = Options(keep_history=self.keep_history, driver=self.driver_name, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) if zap: storage.zap_all(slow=self.zap_slow) return self._wrap_storage(storage)
def RelStorageConfigurationFactory(key, dbconfig): if not RELSTORAGE: raise Exception("You must install the relstorage package before you can use " "it as a dabase adapter.") config = dbconfig.get('configuration', {}) options = Options(**dbconfig['options']) if dbconfig['type'] == 'postgres': from relstorage.adapters.postgresql import PostgreSQLAdapter dsn = "dbname={dbname} user={username} host={host} password={password} port={port}".format(**dbconfig['dsn']) # noqa adapter = PostgreSQLAdapter(dsn=dsn, options=options) rs = RelStorage(adapter=adapter, options=options) db = DB(rs) try: conn = db.open() rootobj = conn.root() if not IDatabase.providedBy(rootobj): alsoProvides(rootobj, IDatabase) transaction.commit() except: pass finally: rootobj = None conn.close() db.close() rs = RelStorage(adapter=adapter, options=options) db = RequestAwareDB(rs, **config) return Database(key, db)
def make_storage(self, zap=True, **kw): from relstorage.options import Options from relstorage.storage import RelStorage options = Options(keep_history=self.keep_history, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) storage._batcher_row_limit = 1 if zap: storage.zap_all() return storage
def make_storage(self, zap=True, **kw): if ('cache_servers' not in kw and 'cache_module_name' not in kw and kw.get('share_local_cache', True)): if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: kw['cache_servers'] = util.CACHE_SERVERS kw['cache_module_name'] = util.CACHE_MODULE_NAME kw['cache_prefix'] = type(self).__name__ + self._testMethodName options = Options(keep_history=self.keep_history, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) storage._batcher_row_limit = 1 if zap: storage.zap_all() return storage
def makeOne(self, adapter=None, **kw): from relstorage.storage import RelStorage # Constructed so as to avoid the need to use a database connection. return RelStorage(adapter or MockAdapter(), create=False, cache_prefix='Mock', **kw)
def getRelstorageConnection(host='localhost', port=3306, user='******', passwd=None, db='zodb', socket=None, keep_history=False): from relstorage.storage import RelStorage from relstorage.adapters.mysql import MySQLAdapter connectionParams = { 'host': host, 'port': port, 'user': user, 'passwd': passwd, 'db': db, } if socket: connectionParams['unix_socket'] = socket kwargs = { 'keep_history': keep_history, } from relstorage.options import Options adapter = MySQLAdapter(options=Options(**kwargs), **connectionParams) storage = RelStorage(adapter, **kwargs) from ZODB import DB db = DB(storage, 0) return db
def open(self): config = self.config # Hoist the driver setting to the section we really want it. config.driver = config.adapter.config.driver config.adapter.config.driver = None options = Options.copy_valid_options(config) adapter = config.adapter.create(options) return RelStorage(adapter, name=config.name, options=options)
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): from relstorage.storage import RelStorage from relstorage.adapters.oracle import OracleAdapter db = db_names[name] if not keep_history: db += "_hf" options = Options( keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw ) adapter = OracleAdapter(user=db, password="******", dsn=dsn, options=options) storage = RelStorage(adapter, name=name, options=options) storage.zap_all() return storage
def make_storage(self, zap=True, **kw): if "cache_servers" not in kw and "cache_module_name" not in kw and kw.get("share_local_cache", True): if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: kw["cache_servers"] = util.CACHE_SERVERS kw["cache_module_name"] = util.CACHE_MODULE_NAME kw["cache_prefix"] = type(self).__name__ + self._testMethodName options = Options(keep_history=self.keep_history, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) storage._batcher_row_limit = 1 if zap: # XXX: Some ZODB tests, possibly check4ExtStorageThread and # check7StorageThreads don't close storages when done with them? # This leads to connections remaining open with locks on PyPy, so on PostgreSQL # we can't TRUNCATE tables and have to go the slow route. storage.zap_all(slow=True) return self._wrap_storage(storage)
def open(self): config = self.config options = Options() for key in options.__dict__.keys(): value = getattr(config, key, None) if value is not None: setattr(options, key, value) adapter = config.adapter.create(options) return RelStorage(adapter, name=config.name, options=options)
def open(self): config = self.config # Hoist the driver setting to the section we really want it. config.driver = config.adapter.config.driver # But don't remove it or otherwise mutate the config object; # that would prevent us from being correctly opened again. #config.adapter.config.driver = None options = Options.copy_valid_options(config) adapter = config.adapter.create(options) return RelStorage(adapter, name=config.name, options=options)
def make_storage(self, zap=True, **kw): if ('cache_servers' not in kw and 'cache_module_name' not in kw and kw.get('share_local_cache', True)): if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: kw['cache_servers'] = util.CACHE_SERVERS kw['cache_module_name'] = util.CACHE_MODULE_NAME kw['cache_prefix'] = type(self).__name__ + self._testMethodName options = Options(keep_history=self.keep_history, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) storage._batcher_row_limit = 1 if zap: # XXX: Some ZODB tests, possibly check4ExtStorageThread and # check7StorageThreads don't close storages when done with them? # This leads to connections remaining open with locks on PyPy, so on PostgreSQL # we can't TRUNCATE tables and have to go the slow route. storage.zap_all(slow=True) return self._wrap_storage(storage)
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): from relstorage.storage import RelStorage from relstorage.adapters.postgresql import PostgreSQLAdapter db = db_names[name] if not keep_history: db += '_hf' dsn = ('dbname=%s user=relstoragetest ' 'password=relstoragetest' % db) options = Options( keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter = PostgreSQLAdapter(dsn=dsn, options=options) storage = RelStorage(adapter, name=name, options=options) storage.zap_all(slow=True) return storage
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): from relstorage.storage import RelStorage from relstorage.adapters.postgresql import PostgreSQLAdapter db = db_names[name] if not keep_history: db += '_hf' dsn = ('dbname=%s user=relstoragetest ' 'password=relstoragetest' % db) options = Options(keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter = PostgreSQLAdapter(dsn=dsn, options=options) storage = RelStorage(adapter, name=name, options=options) storage.zap_all() return storage
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): from relstorage.storage import RelStorage from relstorage.adapters.oracle import OracleAdapter db = db_names[name] if not keep_history: db += '_hf' options = Options( keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter = OracleAdapter( user=db, password='******', dsn=dsn, options=options, ) storage = RelStorage(adapter, name=name, options=options) storage.zap_all() return storage
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): if not is_available: raise unittest.SkipTest("Driver %s is not installed" % (driver_name,)) assert driver_name not in kw kw['driver'] = driver_name db = self.db_names[name] if not keep_history: db += '_hf' options = Options( keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter_maker = self.use_adapter() adapter = adapter_maker.make_adapter(options, db) storage = RelStorage(adapter, name=name, options=options) storage.zap_all(slow=True) return storage
def create_storage(name, blob_dir, shared_blob_dir=shared_blob_dir, keep_history=keep_history, **kw): from relstorage.storage import RelStorage from relstorage.adapters.mysql import MySQLAdapter db = db_names[name] if not keep_history: db += '_hf' options = Options( keep_history=keep_history, shared_blob_dir=shared_blob_dir, blob_dir=os.path.abspath(blob_dir), **kw) adapter = MySQLAdapter( options=options, db=db, user='******', passwd='relstoragetest', ) storage = RelStorage(adapter, name=name, options=options) storage.zap_all() return storage
def make_storage(self, zap=True, **kw): from . import util from relstorage.storage import RelStorage if ('cache_servers' not in kw and 'cache_module_name' not in kw and kw.get('share_local_cache', True)): if util.CACHE_SERVERS and util.CACHE_MODULE_NAME: kw['cache_servers'] = util.CACHE_SERVERS kw['cache_module_name'] = util.CACHE_MODULE_NAME if 'cache_prefix' not in kw: kw['cache_prefix'] = type(self).__name__ + self._testMethodName if 'cache_local_dir' not in kw: # Always use a persistent cache. This helps discover errors in # the persistent cache. # These tests run in a temporary directory that gets cleaned up, so the CWD is # appropriate. BUT: it should be an abspath just in case we change directories kw['cache_local_dir'] = os.path.abspath('.') if 'commit_lock_timeout' not in kw: # Cut this way down so we get better feedback. kw['commit_lock_timeout'] = self.DEFAULT_COMMIT_LOCK_TIMEOUT assert self.driver_name options = Options(keep_history=self.keep_history, driver=self.driver_name, **kw) adapter = self.make_adapter(options) storage = RelStorage(adapter, options=options) if zap: # XXX: Some ZODB tests, possibly check4ExtStorageThread # and check7StorageThreads don't close storages when done # with them? This leads to connections remaining open with # locks on PyPy, so on PostgreSQL we can't TRUNCATE tables # and have to go the slow route. # # As of 2019-06-20 with PyPy 7.1.1, I'm no longer able to replicate # a problem like that locally, so we go back to the fast way. storage.zap_all() return self._wrap_storage(storage)
def open(self): config = self.config # Hoist the driver setting to the section we really want it. config.driver = config.adapter.config.driver # But don't remove it or otherwise mutate the config object; # that would prevent us from being correctly opened again. #config.adapter.config.driver = None options = Options.copy_valid_options(config) options.adapter = config.adapter # The adapter factories may modify the global options (or raise an exception) # if something at the top-level is specifically not allowed based on # their configuration. adapter = config.adapter.create(options) return RelStorage(adapter, name=config.name, options=options)
def main(): logging.basicConfig( stream=sys.stderr, level=logging.DEBUG, format='%(asctime)s [%(name)s] %(levelname)s %(message)s') log.info("Opening") adapter = PostgreSQLAdapter() storage = RelStorage(adapter) db = DB(storage) log.info("Filling") fill_db(db) log.info("Packing") start = time.time() db.pack() end = time.time() log.info("Packed in %0.3f seconds", end - start)
def __init__(self, db_uri): uri = urlparse(db_uri) self.mysql = MySQLAdapter(host=uri.hostname, port=uri.port, user=uri.username, passwd=uri.password, db=uri.path[1:], options=Options(keep_history=False)) self.storage = RelStorage(adapter=self.mysql) self.db = ZODB.DB(self.storage) with self.db.transaction() as c: if "nodes" not in c.root(): c.root.nodes = BTrees.OOBTree.BTree() if "classes" not in c.root(): c.root.classes = BTrees.OOBTree.BTree()
def testRelstorage(): import ZODB, transaction from ZODB import FileStorage, DB from relstorage.adapters.mysql import MySQLAdapter from relstorage.storage import RelStorage from MySQLdb import OperationalError server = 'peat.ucd.ie' username = '******' password = '******' project = 'test' port = 8080 adapter = MySQLAdapter(host=server, user=username, passwd=password, db=project, port=port) storage = RelStorage(adapter, shared_blob_dir=False, blob_dir='tempblob') db = DB(storage) connection = db.open() print storage connection = db.open() dbroot = connection.root() data = dbroot['data'] print data def addfile(fname): myblob = Blob() b = myblob.open('w') o = open(fname) data = o.read() b.write(data) print b.name b.close() return myblob '''f='gogh.chambre-arles.jpg' b=addfile(f) data['aaa'] = FileRecord(name=f,blob=b)''' #t = transaction.get() #t.commit() return
class RelStorageTestBase(StorageTestBase.StorageTestBase): def make_adapter(self): # abstract method raise NotImplementedError def open(self, **kwargs): from relstorage.storage import RelStorage adapter = self.make_adapter() self._storage = RelStorage(adapter, **kwargs) self._storage._batcher_row_limit = 1 def setUp(self): self.open(create=1) self._storage.zap_all() def tearDown(self): transaction.abort() self._storage.close() self._storage.cleanup()
def pack(): import ZODB from relstorage.options import Options from relstorage.storage import RelStorage from ploud.relstorage import local, PostgreSQLAdapter ploud_config.initializeConfig() conn = ploud_config.PLOUD_POOL.getconn() cursor = conn.cursor() clients_conn = ploud_config.CLIENTS_POOL.getconn() clients_cursor = clients_conn.cursor() options = Options( keep_history = False, blob_dir = '/tmp/ploud_pack_blobs', shared_blob_dir = False) dsn = "dbname=%(database)s user=%(user)s password=%(password)s "\ "host=%(host)s "%ploud_config.CLIENTS_DSN try: shutil.rmtree('/tmp/ploud_pack_blobs') except: pass ids = [] if (sys.argv) > 1: for id in sys.argv[1:]: try: ids.append(int(id)) except: pass if ids: force = 1 cursor.execute("SELECT id,site_name,packed,packed_size,size FROM sites WHERE id in (%s) ORDER BY id"%(str(ids)[1:-1])) else: force = 0 cursor.execute("SELECT id,site_name,packed,packed_size,size FROM sites ORDER BY id") for row in cursor.fetchall(): uid, name, packed, packed_size, size = row local.prefix = 'ploud%s_'%uid # pack if db size is more than 115% of packed db size if not force and packed_size and (size/(packed_size/100.0) < 115): print "Skiping '%s' %s"%(name, uid) continue clients_cursor.execute("DELETE FROM object_ref") clients_cursor.execute("DELETE FROM object_refs_added") clients_cursor.execute("DELETE FROM pack_object") clients_cursor.execute("COMMIT") print "Packing '%s' %s:"%(name, uid), t1 = datetime.now() pgadapter = PostgreSQLAdapter( ploud_config.CLIENTS_POOL, ploud_config.CLIENTS_POOL, dsn, options=options) storage = RelStorage(pgadapter, options=options) db = ZODB.DB(storage, database_name='main', cache_size=15000, cache_byte_size=10485760) db.pack() db.close() storage.release() del storage del db del pgadapter psize = dbsize.dbsize(clients_cursor, uid) print "size: %0.2fmb was %0.2fmb %s"%( psize/(1024*1024.0), size/(1024*1024.0), str(datetime.now()-t1)[:-5]) cursor.execute("UPDATE sites SET packed=%s, packed_size=%s, size=%s WHERE id=%s", (datetime.now(), psize, psize, uid)) cursor.execute('commit') try: shutil.rmtree('/tmp/ploud_pack_blobs') except: pass clients_cursor.close() ploud_config.CLIENTS_POOL.putconn(clients_conn) cursor.close() ploud_config.PLOUD_POOL.putconn(conn)
def open(self, **kwargs): from relstorage.storage import RelStorage adapter = self.make_adapter() self._storage = RelStorage(adapter, **kwargs) self._storage._batcher_row_limit = 1
import logging import sys format = '%(asctime)s [%(name)s] %(levelname)s %(message)s' logging.basicConfig(stream=sys.stdout, level=logging.DEBUG, format=format) import transaction from relstorage.storage import RelStorage from relstorage.options import Options from relstorage.adapters.mysql import MySQLAdapter from ZODB.DB import DB options = Options() adapter = MySQLAdapter(db='shane', options=options) storage = RelStorage(adapter, options=options) db = DB(storage) conn = db.open() root = conn.root() root['x'] = root.get('x', 0) + 1 transaction.commit() conn.close() db.pack()
'password=relstoragetest', options=Options(keep_history=keep_history), ) elif use == 'oracle': from relstorage.adapters.oracle import OracleAdapter dsn = os.environ.get('ORACLE_TEST_DSN', 'XE') a = OracleAdapter( user='******', password='******', dsn=dsn, options=Options(keep_history=keep_history), ) else: raise AssertionError("which database?") s = RelStorage(a) d = DB(s) c = d.open() print 'size:' print d.getSize() if 1: print 'initializing...' container = PersistentMapping() c.root()['container'] = container container_size = 10000 for i in range(container_size): container[i] = PersistentMapping() transaction.commit()
def factory(): adapter = adapter_factory(options) storage = RelStorage(adapter=adapter, options=options) if demostorage: storage = DemoStorage(base=storage) return storage
def factory(): adapter = adapter_factory(options) storage = RelStorage(adapter=adapter, options=options) return storage if not demostorage else DemoStorage(base=storage)