async def lock(self, obj): assert not obj.__new_marker__ # should be modifying an object if obj.__locked__: # we've already locked this... return obj.__locked__ = True key = self._get_key(obj) try: if await asyncio.wait_for(self._wait_for_lock(key, prev_exist=False), timeout=self._etcd_acquire_timeout): # have lock; however, need to refresh object so we don't get # tid conflicts await self._transaction.refresh(obj) except asyncio.TimeoutError: self.status = Status.ABORTED await self._storage.abort(self._transaction) await self._transaction._cache.close(invalidate=False) self._transaction.tpc_cleanup() await self._transaction._manager._close_txn(self._transaction) raise ConflictError('Could not lock ob for writing') if obj._p_oid not in self._transaction.modified: # need to added it when locking... self._transaction.modified[obj._p_oid] = obj
async def tpc_vote(self): """Verify that a data manager can commit the transaction.""" ok = await self._strategy.tpc_vote() if ok is False: await self._manager.abort(request=self.request, txn=self) await self._cache.close(invalidate=False) raise ConflictError(self)
async def test_trns_retries_with_app(dummy_guillotina, dummy_request): with mock.patch('aiohttp.web.Application._handle') as handle_mock: # noqa f = asyncio.Future() f.set_result(None) handle_mock.return_value = f handle_mock.side_effect = ConflictError() resp = await dummy_guillotina._handle(dummy_request) assert resp.status_code == 409
async def get_one_row(self, smt, *args): # Helper function to provide easy adaptation to cockroach try: result = await smt.fetch(*args) except asyncpg.exceptions.SerializationError as ex: if 'restart transaction' in ex.args[0]: raise ConflictError(ex.args[0]) return result[0] if len(result) > 0 else None
async def store(self, oid, old_serial, writer, obj, txn): assert oid is not None pickled = writer.serialize() # This calls __getstate__ of obj if len(pickled) >= self._large_record_size: logger.warning(f"Large object {obj.__class__}: {len(pickled)}") part = writer.part if part is None: part = 0 statement_sql = self._sql.get("CR_NAIVE_UPSERT", self._objects_table_name) update = False if not obj.__new_marker__ and obj.__serial__ is not None: # we should be confident this is an object update statement_sql = self._sql.get("CR_UPDATE", self._objects_table_name) update = True conn = await txn.get_connection() async with txn._lock: try: result = await conn.fetch( statement_sql, oid, # The OID of the object txn._tid, # Our TID len(pickled), # Len of the object part, # Partition indicator writer.resource, # Is a resource ? writer.of, # It belogs to a main old_serial, # Old serial writer.parent_id, # Parent OID writer.id, # Traversal ID writer.type, # Guillotina type pickled, # Pickle state) ) except asyncpg.exceptions.UniqueViolationError as ex: if "duplicate key value (parent_id,id)" in ex.detail: raise ConflictIdOnContainer(ex) raise except asyncpg.exceptions._base.InterfaceError as ex: if "another operation is in progress" in ex.args[0]: raise ConflictError( f"asyncpg error, another operation in progress.", oid, txn, old_serial, writer) raise if update and len(result) != 1: # raise tid conflict error raise TIDConflictError( f"Mismatch of tid of object being updated. This is likely " f"caused by a cache invalidation race condition and should " f"be an edge case. This should resolve on request retry.", oid, txn, old_serial, writer, ) await txn._cache.store_object(obj, pickled)
async def test_trns_retries_with_app(container_requester): async with container_requester as requester: with mock.patch("aiohttp.web.Application._handle") as handle_mock: # noqa f = asyncio.Future() f.set_result(None) handle_mock.return_value = f handle_mock.side_effect = ConflictError() response, status = await requester("GET", "/db/guillotina/@types") status == 409
async def test_trns_retries_with_app(container_requester): async with container_requester as requester: with mock.patch("guillotina.traversal.MatchInfo.handler" ) as handle_mock: # noqa f = asyncio.Future() f.set_result(None) handle_mock.return_value = f handle_mock.side_effect = ConflictError() _, status = await requester("GET", "/db/guillotina/@types") assert status == 409
async def get_one_row(self, smt, *args): # Helper function to provide easy adaptation to cockroach try: result = await smt.fetch(*args) except asyncpg.exceptions.SerializationError as ex: if ex.sqlstate == '40001': # these are not handled with the ROLLBACK TO SAVEPOINT COCKROACH_RESTART # logic unfortunately; however, it does give us a chance to handle # it like a restart with higher priority raise ConflictError(ex.args[0]) return result[0] if len(result) > 0 else None
async def commit(self, transaction): if transaction._db_txn is not None: async with transaction._lock: try: await transaction._db_txn.commit() except asyncpg.exceptions.SerializationError as ex: if 'restart transaction' in ex.args[0]: raise ConflictError(ex.args[0]) elif self._transaction_strategy not in ('none', 'tidonly'): logger.warning('Do not have db transaction to commit') return transaction._tid
async def restart_connection(self): log.error('Connection potentially lost to pg, restarting') await self._pool.close() self._pool.terminate() # re-bind, throw conflict error so the request is restarted... self._pool = await asyncpg.create_pool(dsn=self._dsn, max_size=self._pool_size, min_size=2, loop=self._pool._loop, **self._connection_options) # shared read connection on all transactions self._read_conn = await self.open() await self.initialize_tid_statements() self._connection_initialized_on = time.time() raise ConflictError('Restarting connection to postgresql')
async def store(self, oid, old_serial, writer, obj, txn): assert oid is not None p = writer.serialize() # This calls __getstate__ of obj if len(p) >= self._large_record_size: logger.warning(f"Large object {obj.__class__}: {len(p)}") part = writer.part if part is None: part = 0 update = False statement_sql = NAIVE_UPSERT if not obj.__new_marker__ and obj._p_serial is not None: # we should be confident this is an object update statement_sql = UPDATE update = True async with txn._lock: smt = await txn._db_conn.prepare(statement_sql) try: result = await smt.fetch( oid, # The OID of the object txn._tid, # Our TID len(p), # Len of the object part, # Partition indicator writer.resource, # Is a resource ? writer.of, # It belogs to a main old_serial, # Old serial writer.parent_id, # Parent OID writer.id, # Traversal ID writer.type, # Guillotina type p # Pickle state) ) except asyncpg.exceptions._base.InterfaceError as ex: if 'another operation is in progress' in ex.args[0]: conflict_summary = self.get_conflict_summary(oid, txn, old_serial, writer) raise ConflictError( f'asyncpg error, another operation in progress.\n{conflict_summary}') raise if update and len(result) != 1: # raise tid conflict error conflict_summary = self.get_conflict_summary(oid, txn, old_serial, writer) raise TIDConflictError( f'Mismatch of tid of object being updated. This is likely ' f'caused by a cache invalidation race condition and should ' f'be an edge case. This should resolve on request retry.\n' f'{conflict_summary}')
async def get_one_row(self, txn, sql, *args, prepare=False): # Helper function to provide easy adaptation to cockroach conn = await txn.get_connection() try: # Helper function to provide easy adaptation to cockroach if prepare: # latest version of asyncpg has prepare bypassing statement cache smt = await conn.prepare(sql) result = await smt.fetch(*args) else: result = await conn.fetch(sql, *args) except asyncpg.exceptions.SerializationError as ex: if ex.sqlstate == '40001': # these are not handled with the ROLLBACK TO SAVEPOINT COCKROACH_RESTART # logic unfortunately; however, it does give us a chance to handle # it like a restart with higher priority raise ConflictError(ex.args[0]) return result[0] if len(result) > 0 else None
async def restart_connection(self, timeout=0.1): log.error('Connection potentially lost to pg, restarting') try: await asyncio.wait_for(self._pool.close(), timeout) except asyncio.TimeoutError: pass self._pool.terminate() # re-bind, throw conflict error so the request is restarted... self._pool = await asyncpg.create_pool( dsn=self._dsn, max_size=self._pool_size, min_size=2, loop=self._pool._loop, connection_class=app_settings['pg_connection_class'], **self._connection_options) # shared read connection on all transactions self._read_conn = await self.open() await self.initialize_tid_statements() self._connection_initialized_on = time.time() raise ConflictError('Restarting connection to postgresql')
async def restart_connection(self, timeout=0.1): log.error('Connection potentially lost to pg, restarting') await self._connection_manager.restart() await self.initialize_tid_statements() self._connection_initialized_on = time.time() raise ConflictError('Restarting connection to postgresql')
async def store(self, oid, old_serial, writer, obj, txn): assert oid is not None pickled = writer.serialize() # This calls __getstate__ of obj if len(pickled) >= self._large_record_size: log.info(f"Large object {obj.__class__}: {len(pickled)}") if self._store_json: json_dict = await writer.get_json() json = orjson.dumps(json_dict).decode("utf-8") else: json = None part = writer.part if part is None: part = 0 update = False statement_sql = self._sql.get("NAIVE_UPSERT", self._objects_table_name) if not obj.__new_marker__ and obj.__serial__ is not None: # we should be confident this is an object update statement_sql = self._sql.get("UPDATE", self._objects_table_name) update = True conn = await txn.get_connection() async with txn._lock: try: result = await conn.fetch( statement_sql, oid, # The OID of the object txn._tid, # Our TID len(pickled), # Len of the object part, # Partition indicator writer.resource, # Is a resource ? writer.of, # It belogs to a main old_serial, # Old serial writer.parent_id, # Parent OID writer.id, # Traversal ID writer.type, # Guillotina type json, # JSON catalog pickled, # Pickle state) ) except asyncpg.exceptions.UniqueViolationError as ex: if "Key (parent_id, id)" in ex.detail or "Key (of, id)" in ex.detail: raise ConflictIdOnContainer(ex) raise except asyncpg.exceptions.ForeignKeyViolationError: txn.deleted[obj.__uuid__] = obj raise TIDConflictError( f"Bad value inserting into database that could be caused " f"by a bad cache value. This should resolve on request retry.", oid, txn, old_serial, writer, ) except asyncpg.exceptions._base.InterfaceError as ex: if "another operation is in progress" in ex.args[0]: raise ConflictError( f"asyncpg error, another operation in progress.", oid, txn, old_serial, writer ) raise except asyncpg.exceptions.DeadlockDetectedError: raise ConflictError(f"Deadlock detected.", oid, txn, old_serial, writer) if len(result) != 1 or result[0]["count"] != 1: if update: # raise tid conflict error raise TIDConflictError( f"Mismatch of tid of object being updated. This is likely " f"caused by a cache invalidation race condition and should " f"be an edge case. This should resolve on request retry.", oid, txn, old_serial, writer, ) else: log.error( "Incorrect response count from database update. " "This should not happen. tid: {}".format(txn._tid) ) await txn._cache.store_object(obj, pickled)
async def tpc_vote(self): """Verify that a data manager can commit the transaction.""" ok = await self._strategy.tpc_vote() if ok is False: raise ConflictError(self)
async def store(self, oid, old_serial, writer, obj, txn): assert oid is not None pickled = writer.serialize() # This calls __getstate__ of obj if len(pickled) >= self._large_record_size: log.warning(f"Large object {obj.__class__}: {len(pickled)}") json_dict = await writer.get_json() json = ujson.dumps(json_dict) part = writer.part if part is None: part = 0 update = False statement_sql = NAIVE_UPSERT if not obj.__new_marker__ and obj._p_serial is not None: # we should be confident this is an object update statement_sql = UPDATE update = True async with txn._lock: smt = await txn._db_conn.prepare(statement_sql) try: result = await smt.fetch( oid, # The OID of the object txn._tid, # Our TID len(pickled), # Len of the object part, # Partition indicator writer.resource, # Is a resource ? writer.of, # It belogs to a main old_serial, # Old serial writer.parent_id, # Parent OID writer.id, # Traversal ID writer.type, # Guillotina type json, # JSON catalog pickled # Pickle state) ) except asyncpg.exceptions.ForeignKeyViolationError: txn.deleted[obj._p_oid] = obj raise TIDConflictError( f'Bad value inserting into database that could be caused ' f'by a bad cache value. This should resolve on request retry.', oid, txn, old_serial, writer) except asyncpg.exceptions._base.InterfaceError as ex: if 'another operation is in progress' in ex.args[0]: raise ConflictError( f'asyncpg error, another operation in progress.', oid, txn, old_serial, writer) raise except asyncpg.exceptions.DeadlockDetectedError: raise ConflictError(f'Deadlock detected.', oid, txn, old_serial, writer) if len(result) != 1 or result[0]['count'] != 1: if update: # raise tid conflict error raise TIDConflictError( f'Mismatch of tid of object being updated. This is likely ' f'caused by a cache invalidation race condition and should ' f'be an edge case. This should resolve on request retry.', oid, txn, old_serial, writer) else: log.error('Incorrect response count from database update. ' 'This should not happen. tid: {}'.format( txn._tid)) await txn._cache.store_object(obj, pickled)
async def restart_connection(self, timeout=0.1): log.error("Connection potentially lost to pg, restarting") await self._connection_manager.restart() self._connection_initialized_on = time.time() raise ConflictError("Restarting connection to postgresql")