def check_compatibility(self, cursor, tables): if self.keep_history: if 'transaction' not in tables and 'current_object' not in tables: raise StorageError( "Schema mismatch: a history-preserving adapter " "can not connect to a history-free database. " "If you need to convert, use the zodbconvert utility.") else: if 'transaction' in tables and 'current_object' in tables: raise StorageError( "Schema mismatch: a history-free adapter " "can not connect to a history-preserving database. " "If you need to convert, use the zodbconvert utility.")
def hold_pack_lock(self, cursor): """Try to acquire the pack lock. Raise an exception if packing or undo is already in progress. """ if self._pg_has_advisory_locks(cursor): cursor.execute("SELECT pg_try_advisory_lock(1)") locked = cursor.fetchone()[0] if not locked: raise StorageError('A pack or undo operation is in progress') else: # b/w compat try: cursor.execute("LOCK pack_lock IN EXCLUSIVE MODE NOWAIT") except self.lock_exceptions: # psycopg2.DatabaseError: raise StorageError('A pack or undo operation is in progress')
def hold_commit_lock(self, cursor, ensure_current=False, nowait=False): try: if ensure_current: # Hold commit_lock to prevent concurrent commits # (for as short a time as possible). # Lock transaction and current_object in share mode to ensure # conflict detection has the most current data. if self.keep_history: stmt = """ LOCK TABLE commit_lock IN EXCLUSIVE MODE%s; LOCK TABLE transaction IN SHARE MODE; LOCK TABLE current_object IN SHARE MODE """ % (nowait and ' NOWAIT' or '', ) else: stmt = """ LOCK TABLE commit_lock IN EXCLUSIVE MODE%s; LOCK TABLE object_state IN SHARE MODE """ % (nowait and ' NOWAIT' or '', ) cursor.execute(stmt) else: cursor.execute("LOCK TABLE commit_lock IN EXCLUSIVE MODE%s" % (nowait and ' NOWAIT' or '', )) except self.lock_exceptions: if nowait: return False raise StorageError('Acquiring a commit lock failed') return True
def hold_commit_lock(self, cursor, ensure_current=False, nowait=False): # Hold commit_lock to prevent concurrent commits # (for as short a time as possible). timeout = not nowait and self.commit_lock_timeout or 0 status = cursor.callfunc( "DBMS_LOCK.REQUEST", self.inputsize_NUMBER, ( self.commit_lock_id, 6, # exclusive (X_MODE) timeout, True, )) if status != 0: if nowait and status == 1: return False # Lock failed due to a timeout if status >= 1 and status <= 5: msg = ('', 'timeout', 'deadlock', 'parameter error', 'lock already owned', 'illegal handle')[int(status)] else: msg = str(status) raise StorageError("Unable to acquire commit lock (%s)" % msg) # Alternative: #cursor.execute("LOCK TABLE commit_lock IN EXCLUSIVE MODE") if ensure_current: if self.keep_history: # Lock transaction and current_object in share mode to ensure # conflict detection has the most current data. cursor.execute("LOCK TABLE transaction IN SHARE MODE") cursor.execute("LOCK TABLE current_object IN SHARE MODE") else: cursor.execute("LOCK TABLE object_state IN SHARE MODE") return True
def check_compatibility(self, cursor, tables): if self.keep_history: if 'transaction' not in tables and 'current_object' not in tables: raise StorageError( "Schema mismatch: a history-preserving adapter " "can not connect to a history-free database. " "If you need to convert, use the zodbconvert utility.") else: if 'transaction' in tables and 'current_object' in tables: raise StorageError( "Schema mismatch: a history-free adapter " "can not connect to a history-preserving database. " "If you need to convert, use the zodbconvert utility.") if not 'blob_chunk' in tables: raise StorageError( "Schema mismatch; please create the blob_chunk tables." "See migration instructions for RelStorage 1.5.")
def check_compatibility(self, cursor, tables): super(MySQLSchemaInstaller, self).check_compatibility(cursor, tables) tables_that_are_not_innodb = self.__list_tables_not_innodb(cursor) if tables_that_are_not_innodb: raise StorageError( "All RelStorage tables should be InnoDB; MyISAM is no longer supported. " "These tables are not using InnoDB: %r" % (tables_that_are_not_innodb, ))
def hold_commit_lock(self, cursor, ensure_current=False, nowait=False): timeout = not nowait and self.commit_lock_timeout or 0 stmt = "SELECT GET_LOCK(CONCAT(DATABASE(), '.commit'), %s)" cursor.execute(stmt, (timeout, )) locked = cursor.fetchone()[0] if nowait and locked in (0, 1): return bool(locked) if not locked: raise StorageError("Unable to acquire commit lock")
def _check_permissions(self, data, oid=None): if not ( data.endswith(self.user_id) or oid == self.user_id or oid == z64 ): raise StorageError( "Attempt to access encrypted data of others at <%s> by <%s>" % ( u64(oid), u64(self.user_id)))
def hold_pack_lock(self, cursor): """Try to acquire the pack lock. Raise an exception if packing or undo is already in progress. """ stmt = "SELECT GET_LOCK(CONCAT(DATABASE(), '.pack'), 0)" cursor.execute(stmt) res = cursor.fetchone()[0] if not res: raise StorageError('A pack or undo operation is in progress')
def check_compatibility(self, cursor, tables): # pylint:disable=unused-argument tables = self._normalize_schema_object_names(tables) if self.keep_history: if 'transaction' not in tables and 'current_object' not in tables: raise StorageError( "Schema mismatch: a history-preserving adapter " "can not connect to a history-free database. " "If you need to convert, use the zodbconvert utility.") else: if 'transaction' in tables and 'current_object' in tables: raise StorageError( "Schema mismatch: a history-free adapter " "can not connect to a history-preserving database. " "If you need to convert, use the zodbconvert utility.") if 'blob_chunk' not in tables: raise StorageError( "Schema mismatch; please create the blob_chunk tables. " "See migration instructions for RelStorage 1.5. " "All tables: %s" % (tables, ))
def hold_pack_lock(self, cursor): """Try to acquire the pack lock. Raise an exception if packing or undo is already in progress. """ stmt = """ LOCK TABLE pack_lock IN EXCLUSIVE MODE NOWAIT """ try: cursor.execute(stmt) except self.lock_exceptions: # cx_Oracle.DatabaseError: raise StorageError('A pack or undo operation is in progress')
def check_compatibility(self, cursor, tables): super(MySQLSchemaInstaller, self).check_compatibility(cursor, tables) stmt = "SHOW TABLE STATUS LIKE 'object_state'" cursor.execute(stmt) for row in cursor: for col_index, col in enumerate(cursor.description): if col[0].lower() == 'engine': engine = row[col_index] if engine.lower() != 'innodb': raise StorageError( "The object_state table must use the InnoDB " "engine, but it is using the %s engine." % engine)
def check_serials(self): """Verifies that all cached objects are in sync with the data. This is useful for finding gateways that generate inconsistent hashes. """ for oid, ob in self._cache.items(): if ob._p_changed is not None: self.before_load() p, serial = self._storage.load(oid, self._version) if serial != self._get_serial(ob): raise StorageError("Inconsistent serial for oid %s" % repr(oid))
def register(self, obj): o = self.real_jar[self.real_oid] if o._p_changed is None: # The application held on to this UPO even after its # container was ghosted. The container needs to be # reactivated, but reactivation would create a new UPO in # place of the UPO held by this jar. The application # would continue to refer to this old UPO. Don't let the # application continue to change this abandoned object, # since all changes will be lost. raise StorageError( 'Tried to change an unmanaged persistent object ' 'when the containing persistent object is a ghost') o._p_changed = 1
def commit(self, obj, transaction): if obj is self: self._may_begin(transaction) # We registered ourself. Execute a commit action, if any. # XXX Where is the _Connection_onCommitActions ever set? # if self._Connection__onCommitActions is not None: # method_name, args, kw = \ # self._Connection__onCommitActions.pop(0) # apply(getattr(self, method_name), (transaction,) + args, kw) return oid = obj._p_oid assert oid != 'unmanaged', repr(obj) #invalid=self._invalidated.get invalid = self._invalidated.__contains__ modified = getattr(self, '_modified', None) if modified is None: modified = self._invalidating if oid is None or obj._p_jar is not self: # new object oid = self.new_oid() obj._p_jar = self obj._p_oid = oid self._creating.append(oid) elif obj._p_changed: if ((invalid(oid) and not hasattr(obj, '_p_resolveConflict')) or invalid(None)): raise ConflictError(object=obj) modified.append(oid) else: # Nothing to do return self._may_begin(transaction) stack = [obj] file = StringIO() seek = file.seek pickler = Pickler(file, 1) # SDH: external references are computed in a different way. # pickler.persistent_id=new_persistent_id(self, stack.append) dbstore = self._storage.store file = file.getvalue cache = self._cache get = cache.get dump = pickler.dump clear_memo = pickler.clear_memo version = self._version while stack: obj = stack[-1] del stack[-1] oid = obj._p_oid assert oid != 'unmanaged', repr(obj) serial = self._get_serial(obj) if serial == HASH0: # new object self._creating[oid] = True else: #XXX We should never get here # SDH: Actually it looks like we should, but only # for the first object on the stack. if ((invalid(oid) and not hasattr(obj, '_p_resolveConflict')) or invalid(None)): raise ConflictError(object=obj) modified.append(oid) # SDH: hook in the serializer. # state=obj.__getstate__() osio = self._get_osio() event, classification, state = osio.serialize(oid, obj) ext_refs = event.external if ext_refs: for (ext_oid, ext_ref) in ext_refs: assert ext_oid assert ext_ref is not None if self._cache.get(ext_oid, None) is not ext_ref: # New object or a bad reference if ext_ref._p_jar is not None: if ext_ref._p_jar is not self: raise InvalidObjectReference, ( "Can't refer from %s in %s to %s in %s" % (repr(obj), repr(self), repr(ext_ref), repr(ext_ref._p_jar))) else: ext_ref._p_jar = self if ext_ref._p_oid: if ext_ref._p_oid != ext_oid: raise StorageError('Conflicting OIDs') else: ext_ref._p_oid = ext_oid stack.append(ext_ref) if event.upos: self._handle_unmanaged(obj, event.upos) seek(0) clear_memo() dump(classification) dump(state) p = file(1) s = dbstore(oid, serial, p, version, transaction) self._store_count = self._store_count + 1 # Put the object in the cache before handling the # response, just in case the response contains the # serial number for a newly created object try: cache[oid] = obj except: if aq_base(obj) is not obj: # Yuck, someone tried to store a wrapper. Try to # cache it unwrapped. cache[oid] = aq_base(obj) else: raise self._handle_serial(s, oid)
def __getstate__(self): from ZODB.POSException import StorageError raise StorageError("Instance of AntiPersistent class %s " "cannot be stored." % self.__class__.__name__)
def protocolError(self, conn, message): raise StorageError("protocol error: %s" % message)