def _transform_snapshot( raw_snapshot: str, storage: SQLiteStorage, cache: BlockHashCache, ) -> str: """Upgrades a single snapshot by adding the blockhash to it and to any pending transactions""" snapshot = json.loads(raw_snapshot) block_number = int(snapshot['block_number']) snapshot['block_hash'] = cache.get(block_number) pending_transactions = snapshot['pending_transactions'] new_pending_transactions = [] for transaction_data in pending_transactions: if 'raiden.transfer.events.ContractSend' not in transaction_data['_type']: raise InvalidDBData( "Error during v18 -> v19 upgrade. Chain state's pending transactions " "should only contain ContractSend transactions", ) # For each pending transaction find the corresponding DB event record. event_record = storage.get_latest_event_by_data_field( filters=transaction_data, ) if not event_record.data: raise InvalidDBData( 'Error during v18 -> v19 upgrade. Could not find a database event ' 'table entry for a pending transaction.', ) event_record_data = json.loads(event_record.data) transaction_data['triggered_by_block_hash'] = event_record_data['triggered_by_block_hash'] new_pending_transactions.append(transaction_data) snapshot['pending_transactions'] = new_pending_transactions return json.dumps(snapshot)
def __init__(self, database_path: DatabasePath): sqlite3.register_adapter(ULID, adapt_ulid_identifier) sqlite3.register_converter("ULID", convert_ulid_identifier) conn = sqlite3.connect(database_path, detect_types=sqlite3.PARSE_DECLTYPES) conn.text_factory = str conn.execute("PRAGMA foreign_keys=ON") # Skip the acquire/release cycle for the exclusive write lock. # References: # https://sqlite.org/atomiccommit.html#_exclusive_access_mode # https://sqlite.org/pragma.html#pragma_locking_mode conn.execute("PRAGMA locking_mode=EXCLUSIVE") # Keep the journal around and skip inode updates. # References: # https://sqlite.org/atomiccommit.html#_persistent_rollback_journals # https://sqlite.org/pragma.html#pragma_journal_mode try: conn.execute("PRAGMA journal_mode=PERSIST") except sqlite3.DatabaseError: raise InvalidDBData( f"Existing DB {database_path} was found to be corrupt at Raiden startup. " f"Manual user intervention required. Bailing.") with conn: conn.executescript(DB_SCRIPT_CREATE_TABLES) self.conn = conn self.in_transaction = False # Dict[Type[ID], ULIDMonotonicFactory[ID]] is not supported yet. # Reference: https://github.com/python/mypy/issues/4928 self._ulid_factories: Dict = dict()
def __init__(self, database_path, serializer): conn = sqlite3.connect(database_path) conn.text_factory = str conn.execute('PRAGMA foreign_keys=ON') self.conn = conn with conn: try: conn.executescript(DB_SCRIPT_CREATE_TABLES) except sqlite3.DatabaseError: raise InvalidDBData( 'Existing DB {} was found to be corrupt at Raiden startup. ' 'Manual user intervention required. Bailing ...'.format( database_path), ) self._run_updates() # When writting to a table where the primary key is the identifier and we want # to return said identifier we use cursor.lastrowid, which uses sqlite's last_insert_rowid # https://github.com/python/cpython/blob/2.7/Modules/_sqlite/cursor.c#L727-L732 # # According to the documentation (http://www.sqlite.org/c3ref/last_insert_rowid.html) # if a different thread tries to use the same connection to write into the table # while we query the last_insert_rowid, the result is unpredictable. For that reason # we have this write lock here. # # TODO (If possible): # Improve on this and find a better way to protect against this potential race # condition. self.write_lock = threading.Lock() self.serializer = serializer
def get_latest_event_by_data_field( self, filters: Dict[str, Any], ) -> EventRecord: """ Return all state changes filtered by a named field and value.""" cursor = self.conn.cursor() where_clauses = [] args = [] for field, value in filters.items(): where_clauses.append('json_extract(data, ?)=?') args.append(f'$.{field}') args.append(value) cursor.execute( "SELECT identifier, source_statechange_id, data FROM state_events WHERE " f"{' AND '.join(where_clauses)}" "ORDER BY identifier DESC LIMIT 1", args, ) result = EventRecord( event_identifier=0, state_change_identifier=0, data=None, ) try: row = cursor.fetchone() if row: event_id = row[0] state_change_identifier = row[1] event = row[2] result = EventRecord( event_identifier=event_id, state_change_identifier=state_change_identifier, data=event, ) except AttributeError: raise InvalidDBData( 'Your local database is corrupt. Bailing ...', ) return result
def __init__(self, database_path): conn = sqlite3.connect(database_path, detect_types=sqlite3.PARSE_DECLTYPES) conn.text_factory = str conn.execute('PRAGMA foreign_keys=ON') # Skip the acquire/release cycle for the exclusive write lock. # References: # https://sqlite.org/atomiccommit.html#_exclusive_access_mode # https://sqlite.org/pragma.html#pragma_locking_mode conn.execute('PRAGMA locking_mode=EXCLUSIVE') # Keep the journal around and skip inode updates. # References: # https://sqlite.org/atomiccommit.html#_persistent_rollback_journals # https://sqlite.org/pragma.html#pragma_journal_mode try: conn.execute('PRAGMA journal_mode=PERSIST') except sqlite3.DatabaseError: raise InvalidDBData( f'Existing DB {database_path} was found to be corrupt at Raiden startup. ' f'Manual user intervention required. Bailing.', ) with conn: conn.executescript(DB_SCRIPT_CREATE_TABLES) # When writting to a table where the primary key is the identifier and we want # to return said identifier we use cursor.lastrowid, which uses sqlite's last_insert_rowid # https://github.com/python/cpython/blob/2.7/Modules/_sqlite/cursor.c#L727-L732 # # According to the documentation (http://www.sqlite.org/c3ref/last_insert_rowid.html) # if a different thread tries to use the same connection to write into the table # while we query the last_insert_rowid, the result is unpredictable. For that reason # we have this write lock here. # # TODO (If possible): # Improve on this and find a better way to protect against this potential race # condition. self.conn = conn self.write_lock = threading.Lock() self.in_transaction = False self.update_version()
def get_latest_state_change_by_data_field( self, filters: Dict[str, str], ) -> StateChangeRecord: """ Return all state changes filtered by a named field and value.""" cursor = self.conn.cursor() where_clauses = [] args = [] for field, value in filters.items(): where_clauses.append('json_extract(data, ?)=?') args.append(f'$.{field}') args.append(value) where = ' AND '.join(where_clauses) sql = ( f'SELECT identifier, data ' f'FROM state_changes ' f'WHERE {where} ' f'ORDER BY identifier ' f'DESC LIMIT 1' ) cursor.execute(sql, args) result = StateChangeRecord(state_change_identifier=0, data=None) try: row = cursor.fetchone() if row: state_change_identifier = row[0] state_change = row[1] result = StateChangeRecord( state_change_identifier=state_change_identifier, data=state_change, ) except AttributeError: raise InvalidDBData( 'Your local database is corrupt. Bailing ...', ) return result
def get_statechanges_by_identifier(self, from_identifier, to_identifier): if not (from_identifier == 'latest' or isinstance(from_identifier, int)): raise ValueError("from_identifier must be an integer or 'latest'") if not (to_identifier == 'latest' or isinstance(to_identifier, int)): raise ValueError("to_identifier must be an integer or 'latest'") cursor = self.conn.cursor() if from_identifier == 'latest': assert to_identifier is None cursor.execute( 'SELECT identifier FROM state_changes ORDER BY identifier DESC LIMIT 1', ) from_identifier = cursor.fetchone() if to_identifier == 'latest': cursor.execute( 'SELECT data FROM state_changes WHERE identifier >= ?', (from_identifier, ), ) else: cursor.execute( 'SELECT data FROM state_changes WHERE identifier ' 'BETWEEN ? AND ?', (from_identifier, to_identifier), ) try: result = [ self.serializer.deserialize(entry[0]) for entry in cursor.fetchall() ] except AttributeError: raise InvalidDBData( 'Your local database is corrupt. Bailing ...', ) return result