if not locked: raise StorageError('A pack or undo operation is in progress') else: # b/w compat try: cursor.execute("LOCK pack_lock IN EXCLUSIVE MODE NOWAIT") except self.lock_exceptions: # psycopg2.DatabaseError: raise StorageError('A pack or undo operation is in progress') def release_pack_lock(self, cursor): """Release the pack lock.""" if self._pg_has_advisory_locks(cursor): cursor.execute("SELECT pg_advisory_unlock(1)") # else no action needed since the lock will be released at txn commit PostgreSQLLocker = implementer(ILocker)(PostgreSQLLocker) class MySQLLocker(Locker): implements(ILocker) @metricmethod def hold_commit_lock(self, cursor, ensure_current=False, nowait=False): timeout = not nowait and self.commit_lock_timeout or 0 stmt = "SELECT GET_LOCK(CONCAT(DATABASE(), '.commit'), %s)" cursor.execute(stmt, (timeout,)) locked = cursor.fetchone()[0] if nowait and locked in (0, 1): return bool(locked) if not locked: raise StorageError("Unable to acquire commit lock")
lines = [] else: lines.append(line) if lines: stmt = '\n'.join(lines) self.run_script_stmt(cursor, stmt, params) def run_many(self, cursor, stmt, items): """Execute a statement repeatedly. Items should be a list of tuples. stmt should use '%s' parameter format. """ cursor.executemany(stmt, items) ScriptRunner = implementer(IScriptRunner)(ScriptRunner) class OracleScriptRunner(ScriptRunner): script_vars = { 'TRUE': "'Y'", 'FALSE': "'N'", 'TRUNCATE': 'TRUNCATE TABLE', 'oid': ':oid', 'tid': ':tid', 'pack_tid': ':pack_tid', 'undo_tid': ':undo_tid', 'self_tid': ':self_tid', 'min_tid': ':min_tid', 'max_tid': ':max_tid',
try: chunk_num = 0 while True: blob = None params = dict(oid=oid, chunk_num=chunk_num) if use_tid: params['tid'] = tid cursor.execute(insert_stmt, params) cursor.execute(select_stmt, params) blob, = cursor.fetchone() blob.open() write_chunk_size = int(max(round( 1.0 * self.blob_chunk_size / blob.getchunksize()), 1) * blob.getchunksize()) offset = 1 # Oracle still uses 1-based indexing. for _i in xrange(int(maxsize / write_chunk_size)): write_chunk = f.read(write_chunk_size) if not blob.write(write_chunk, offset): # EOF. return offset += len(write_chunk) if blob is not None and blob.isopen(): blob.close() chunk_num += 1 finally: f.close() if blob is not None and blob.isopen(): blob.close() ObjectMover = implementer(IObjectMover)(ObjectMover)
lines = [] else: lines.append(line) if lines: stmt = "\n".join(lines) self.run_script_stmt(cursor, stmt, params) def run_many(self, cursor, stmt, items): """Execute a statement repeatedly. Items should be a list of tuples. stmt should use '%s' parameter format. """ cursor.executemany(stmt, items) ScriptRunner = implementer(IScriptRunner)(ScriptRunner) class OracleScriptRunner(ScriptRunner): script_vars = { "TRUE": "'Y'", "FALSE": "'N'", "TRUNCATE": "TRUNCATE TABLE", "oid": ":oid", "tid": ":tid", "pack_tid": ":pack_tid", "undo_tid": ":undo_tid", "self_tid": ":self_tid", "min_tid": ":min_tid", "max_tid": ":max_tid",
"The database connection is stale: new_polled_tid=%d, " "prev_polled_tid=%d." % (new_polled_tid, prev_polled_tid)) def list_changes(self, cursor, after_tid, last_tid): """Return the (oid, tid) values changed in a range of transactions. The returned iterable must include the latest changes in the range after_tid < tid <= last_tid. """ if self.keep_history: stmt = """ SELECT zoid, tid FROM current_object WHERE tid > %(min_tid)s AND tid <= %(max_tid)s """ else: stmt = """ SELECT zoid, tid FROM object_state WHERE tid > %(min_tid)s AND tid <= %(max_tid)s """ params = {'min_tid': after_tid, 'max_tid': last_tid} stmt = intern(stmt % self.runner.script_vars) cursor.execute(stmt, params) return cursor.fetchall() Poller = implementer(IPoller)(Poller)
self._skip_index = None self._iterating = True elif not self._iterating: # Start iterating. self._skip_index = self._current_index i = 0 if i == self._skip_index: i = 1 if i >= len(self._replicas): # There are no more replicas to try. self._select(0) return None self._select(i) self._iterating = True else: # Continue iterating. i = self._current_index + 1 if i == self._skip_index: i += 1 if i >= len(self._replicas): # There are no more replicas to try. self._select(0) return None self._select(i) return self._current_replica ReplicaSelector = implementer(IReplicaSelector)(ReplicaSelector)
def new_instance(self): return PostgreSQLAdapter(dsn=self._dsn, options=self.options) def __str__(self): parts = [self.__class__.__name__] if self.keep_history: parts.append('history preserving') else: parts.append('history free') dsnparts = self._dsn.split() s = ' '.join(p for p in dsnparts if not p.startswith('password')) parts.append('dsn=%r' % s) return ", ".join(parts) PostgreSQLAdapter = implementer(IRelStorageAdapter)(PostgreSQLAdapter) class Psycopg2Connection(psycopg2.extensions.connection): # The replica attribute holds the name of the replica this # connection is bound to. __slots__ = ('replica', ) class Psycopg2ConnectionManager(AbstractConnectionManager): isolation_read_committed = ( psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) isolation_serializable = (psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE) disconnected_exceptions = disconnected_exceptions
self._select(0) self._skip_index = None self._iterating = True elif not self._iterating: # Start iterating. self._skip_index = self._current_index i = 0 if i == self._skip_index: i = 1 if i >= len(self._replicas): # There are no more replicas to try. self._select(0) return None self._select(i) self._iterating = True else: # Continue iterating. i = self._current_index + 1 if i == self._skip_index: i += 1 if i >= len(self._replicas): # There are no more replicas to try. self._select(0) return None self._select(i) return self._current_replica ReplicaSelector = implementer(IReplicaSelector)(ReplicaSelector)
packed=False): """Add a transaction.""" if self.keep_history: stmt = """ INSERT INTO transaction (tid, packed, username, description, extension) VALUES (%s, %s, decode(%s, 'base64'), decode(%s, 'base64'), decode(%s, 'base64')) """ cursor.execute(stmt, (tid, packed, encode_bytes_param(username, True), encode_bytes_param(description, True), encode_bytes_param(extension, True))) PostgreSQLTransactionControl = implementer(ITransactionControl)(PostgreSQLTransactionControl) class MySQLTransactionControl(TransactionControl): implements(ITransactionControl) def __init__(self, keep_history, Binary): self.keep_history = keep_history self.Binary = Binary def get_tid(self, cursor): """Returns the most recent tid.""" if self.keep_history: stmt = """ SELECT tid FROM transaction
SELECT CASE WHEN %s > nextval('zoid_seq') THEN setval('zoid_seq', %s) ELSE 0 END """, (n, n)) @metricmethod def new_oids(self, cursor): """Return a sequence of new, unused OIDs.""" stmt = "SELECT NEXTVAL('zoid_seq')" cursor.execute(stmt) n = cursor.fetchone()[0] return range(n * 16 - 15, n * 16 + 1) PostgreSQLOIDAllocator = implementer(IOIDAllocator)(PostgreSQLOIDAllocator) class MySQLOIDAllocator(object): implements(IOIDAllocator) def set_min_oid(self, cursor, oid): """Ensure the next OID is at least the given OID.""" n = (oid + 15) // 16 cursor.execute("REPLACE INTO new_oid VALUES(%s)", (n, )) @metricmethod def new_oids(self, cursor): """Return a sequence of new, unused OIDs.""" stmt = "INSERT INTO new_oid VALUES ()" cursor.execute(stmt)
def new_instance(self): return PostgreSQLAdapter(dsn=self._dsn, options=self.options) def __str__(self): parts = [self.__class__.__name__] if self.keep_history: parts.append('history preserving') else: parts.append('history free') dsnparts = self._dsn.split() s = ' '.join(p for p in dsnparts if not p.startswith('password')) parts.append('dsn=%r' % s) return ", ".join(parts) PostgreSQLAdapter = implementer(IRelStorageAdapter)(PostgreSQLAdapter) class Psycopg2Connection(psycopg2.extensions.connection): # The replica attribute holds the name of the replica this # connection is bound to. __slots__ = ('replica',) class Psycopg2ConnectionManager(AbstractConnectionManager): isolation_read_committed = ( psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED) isolation_serializable = ( psycopg2.extensions.ISOLATION_LEVEL_SERIALIZABLE)
CREATE TRIGGER blob_chunk_delete BEFORE DELETE ON blob_chunk FOR EACH ROW EXECUTE PROCEDURE blob_chunk_delete_trigger() """ cursor.execute(stmt) def drop_all(self): def callback(conn, cursor): if 'blob_chunk' in self.list_tables(cursor): # Trigger deletion of blob OIDs. cursor.execute("DELETE FROM blob_chunk") self.connmanager.open_and_call(callback) super(PostgreSQLSchemaInstaller, self).drop_all() PostgreSQLSchemaInstaller = implementer(ISchemaInstaller)(PostgreSQLSchemaInstaller) class MySQLSchemaInstaller(AbstractSchemaInstaller): implements(ISchemaInstaller) database_type = 'mysql' def get_database_name(self, cursor): cursor.execute("SELECT DATABASE()") for (name,) in cursor: return name def list_tables(self, cursor): cursor.execute("SHOW TABLES") return [name for (name,) in cursor.fetchall()]
self.runner.run_script_stmt(cursor, stmt) deleted = cursor.rowcount conn.commit() self.locker.release_commit_lock(cursor) if deleted < 1000: # Last set of deletions complete break # perform cleanup that does not require the commit lock log.debug("pack: clearing temporary pack state") for _table in ('pack_object', 'pack_state', 'pack_state_tid'): stmt = '%(TRUNCATE)s ' + _table self.runner.run_script_stmt(cursor, stmt) HistoryPreservingPackUndo = implementer(IPackUndo)(HistoryPreservingPackUndo) class MySQLHistoryPreservingPackUndo(HistoryPreservingPackUndo): # Work around a MySQL performance bug by avoiding an expensive subquery. # See: http://mail.zope.org/pipermail/zodb-dev/2008-May/011880.html # http://bugs.mysql.com/bug.php?id=28257 _script_create_temp_pack_visit = """ CREATE TEMPORARY TABLE temp_pack_visit ( zoid BIGINT UNSIGNED NOT NULL, keep_tid BIGINT UNSIGNED NOT NULL ); CREATE UNIQUE INDEX temp_pack_visit_zoid ON temp_pack_visit (zoid); CREATE INDEX temp_pack_keep_tid ON temp_pack_visit (keep_tid); """
raise StorageError('A pack or undo operation is in progress') else: # b/w compat try: cursor.execute("LOCK pack_lock IN EXCLUSIVE MODE NOWAIT") except self.lock_exceptions: # psycopg2.DatabaseError: raise StorageError('A pack or undo operation is in progress') def release_pack_lock(self, cursor): """Release the pack lock.""" if self._pg_has_advisory_locks(cursor): cursor.execute("SELECT pg_advisory_unlock(1)") # else no action needed since the lock will be released at txn commit PostgreSQLLocker = implementer(ILocker)(PostgreSQLLocker) class MySQLLocker(Locker): implements(ILocker) @metricmethod def hold_commit_lock(self, cursor, ensure_current=False, nowait=False): timeout = not nowait and self.commit_lock_timeout or 0 stmt = "SELECT GET_LOCK(CONCAT(DATABASE(), '.commit'), %s)" cursor.execute(stmt, (timeout, )) locked = cursor.fetchone()[0] if nowait and locked in (0, 1): return bool(locked) if not locked: raise StorageError("Unable to acquire commit lock")
while True: blob = None params = dict(oid=oid, chunk_num=chunk_num) if use_tid: params['tid'] = tid cursor.execute(insert_stmt, params) cursor.execute(select_stmt, params) blob, = cursor.fetchone() blob.open() write_chunk_size = int( max( round(1.0 * self.blob_chunk_size / blob.getchunksize()), 1) * blob.getchunksize()) offset = 1 # Oracle still uses 1-based indexing. for _i in xrange(int(maxsize / write_chunk_size)): write_chunk = f.read(write_chunk_size) if not blob.write(write_chunk, offset): # EOF. return offset += len(write_chunk) if blob is not None and blob.isopen(): blob.close() chunk_num += 1 finally: f.close() if blob is not None and blob.isopen(): blob.close() ObjectMover = implementer(IObjectMover)(ObjectMover)
SELECT CASE WHEN %s > nextval('zoid_seq') THEN setval('zoid_seq', %s) ELSE 0 END """, (n, n)) @metricmethod def new_oids(self, cursor): """Return a sequence of new, unused OIDs.""" stmt = "SELECT NEXTVAL('zoid_seq')" cursor.execute(stmt) n = cursor.fetchone()[0] return range(n * 16 - 15, n * 16 + 1) PostgreSQLOIDAllocator = implementer(IOIDAllocator)(PostgreSQLOIDAllocator) class MySQLOIDAllocator(object): implements(IOIDAllocator) def set_min_oid(self, cursor, oid): """Ensure the next OID is at least the given OID.""" n = (oid + 15) // 16 cursor.execute("REPLACE INTO new_oid VALUES(%s)", (n,)) @metricmethod def new_oids(self, cursor): """Return a sequence of new, unused OIDs.""" stmt = "INSERT INTO new_oid VALUES ()" cursor.execute(stmt)
def open_for_store(self): """Open and initialize a connection for storing objects. Returns (conn, cursor). """ conn, cursor = self.open() try: if self.on_store_opened is not None: self.on_store_opened(cursor, restart=False) return conn, cursor except: self.close(conn, cursor) raise def restart_store(self, conn, cursor): """Reuse a store connection.""" self.check_replica(conn, cursor) conn.rollback() if self.on_store_opened is not None: self.on_store_opened(cursor, restart=True) def open_for_pre_pack(self): """Open a connection to be used for the pre-pack phase. Returns (conn, cursor). """ return self.open() AbstractConnectionManager = implementer(IConnectionManager)( AbstractConnectionManager)
else: stmt = """ SELECT tid, username, description, extension, state_size """ stmt += """ FROM transaction JOIN object_state USING (tid) WHERE zoid = %(oid)s AND packed = %(FALSE)s ORDER BY tid DESC """ self.runner.run_script_stmt(cursor, stmt, {'oid': oid}) return self._transaction_iterator(cursor) HistoryPreservingDatabaseIterator = implementer(IDatabaseIterator)(HistoryPreservingDatabaseIterator) class HistoryFreeDatabaseIterator(DatabaseIterator): implements(IDatabaseIterator) def iter_transactions(self, cursor): """Iterate over the transaction log, newest first. Skips packed transactions. Yields (tid, username, description, extension) for each transaction. """ return [] def iter_transactions_range(self, cursor, start=None, stop=None): """Iterate over the transactions in the given range, oldest first.
else: stmt = """ SELECT tid, username, description, extension, state_size """ stmt += """ FROM transaction JOIN object_state USING (tid) WHERE zoid = %(oid)s AND packed = %(FALSE)s ORDER BY tid DESC """ self.runner.run_script_stmt(cursor, stmt, {'oid': oid}) return self._transaction_iterator(cursor) HistoryPreservingDatabaseIterator = implementer(IDatabaseIterator)( HistoryPreservingDatabaseIterator) class HistoryFreeDatabaseIterator(DatabaseIterator): implements(IDatabaseIterator) def iter_transactions(self, cursor): """Iterate over the transaction log, newest first. Skips packed transactions. Yields (tid, username, description, extension) for each transaction. """ return [] def iter_transactions_range(self, cursor, start=None, stop=None): """Iterate over the transactions in the given range, oldest first.
def open_for_store(self): """Open and initialize a connection for storing objects. Returns (conn, cursor). """ conn, cursor = self.open() try: if self.on_store_opened is not None: self.on_store_opened(cursor, restart=False) return conn, cursor except: self.close(conn, cursor) raise def restart_store(self, conn, cursor): """Reuse a store connection.""" self.check_replica(conn, cursor) conn.rollback() if self.on_store_opened is not None: self.on_store_opened(cursor, restart=True) def open_for_pre_pack(self): """Open a connection to be used for the pre-pack phase. Returns (conn, cursor). """ return self.open() AbstractConnectionManager = implementer(IConnectionManager)(AbstractConnectionManager)