def test_regression_delete_should_not_commit_the_upgrade_transaction( tmp_path, monkeypatch): FORMAT = os.path.join(tmp_path, "v{}_log.db") def failure(storage, **kwargs): # pylint: disable=unused-argument storage.delete_state_changes([1, 2]) # Create the db to be upgraded with monkeypatch.context() as m: m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 1) storage = SQLiteStorage(FORMAT.format(1)) storage.update_version() del storage with pytest.raises(ValueError): # This should not fail with 'OperationalError' with monkeypatch.context() as m: m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 2) upgrade_list = [UpgradeRecord(from_version=1, function=failure)] m.setattr("raiden.utils.upgrades.UPGRADES_LIST", upgrade_list) manager = UpgradeManager(FORMAT.format(2)) manager.run() storage = SQLiteStorage(FORMAT.format(2)) assert storage.get_version() == 1, "The upgrade must have failed"
def test_upgrade_v17_to_v18(tmp_path): old_db_filename = tmp_path / Path('v17_log.db') with patch('raiden.utils.upgrades.latest_db_file') as latest_db_file: latest_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=17): storage.update_version() storage.conn.close() web3, _ = create_fake_web3_for_block_hash(number_of_blocks=100) db_path = tmp_path / Path('v18_log.db') manager = UpgradeManager(db_filename=str(db_path), web3=web3) with patch( 'raiden.utils.upgrades.UPGRADES_LIST', new=[UpgradeRecord(from_version=17, function=upgrade_v17_to_v18)], ): manager.run() storage = SQLiteStorage(str(db_path)) _, snapshot = storage.get_latest_state_snapshot() snapshot_data = json.loads(snapshot) secrethash = list( snapshot_data['payment_mapping']['secrethashes_to_task'].keys())[0] mediator_task = snapshot_data['payment_mapping']['secrethashes_to_task'][ secrethash] assert mediator_task['mediator_state']['waiting_transfer'] is not None assert mediator_task['mediator_state']['routes']
def test_upgrade_manager_restores_backup(tmp_path): db_path = tmp_path / Path('v17_log.db') upgrade_manager = UpgradeManager(db_filename=db_path) old_db_filename = tmp_path / Path('v16_log.db') storage = None storage = setup_storage(old_db_filename) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16): storage.update_version() with patch('raiden.utils.upgrades.older_db_file') as older_db_file: older_db_file.return_value = str(old_db_filename) upgrade_manager.run() # Once restored, the state changes written above should be # in the restored database storage = SerializedSQLiteStorage(str(db_path), JSONSerializer()) state_change_record = storage.get_latest_state_change_by_data_field( {'_type': 'raiden.transfer.state_change.ActionInitChain'}, ) assert state_change_record.data is not None assert not old_db_filename.exists() assert Path(str(old_db_filename).replace('_log.db', '_log.backup')).exists()
def maybe_upgrade_db(self) -> None: manager = UpgradeManager( db_filename=self.database_path, raiden=self, web3=self.chain.client.web3, ) manager.run()
def upgrade_db(current_version: int, new_version: int): log.debug(f'Upgrading database from v{current_version} to v{new_version}') # Prevent unique constraint error in DB when recording raiden "runs" gevent.sleep(1) manager = UpgradeManager( db_filename=database_path, current_version=current_version, new_version=new_version, ) try: manager.run() except (RaidenDBUpgradeError, InvalidDBData) as e: manager.restore_backup() log.error(f'Failed to upgrade database: {str(e)}')
def test_sequential_version_numbers(tmp_path): """ Test that the version received by each migration function is sequantially incremented according to the version returned by the previous migration. Sequence of events: - The first migration runs and returns v16 as the version it upgraded the database to. - The next migration should receive the old_version as v16 returned previously. - the above goes on for subsequent migrations. """ db_path = tmp_path / Path('v19_log.db') upgrade_manager = UpgradeManager(db_filename=db_path) old_db_filename = tmp_path / Path('v16_log.db') storage = None upgrade_functions = [Mock(), Mock(), Mock()] upgrade_functions[0].return_value = 17 upgrade_functions[1].return_value = 18 upgrade_functions[2].return_value = 19 with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16): storage = setup_storage(old_db_filename) storage.update_version() with ExitStack() as stack: stack.enter_context( patch( 'raiden.utils.upgrades.UPGRADES_LIST', new=upgrade_functions, )) stack.enter_context( patch( 'raiden.utils.upgrades.RAIDEN_DB_VERSION', new=19, )) older_db_file = stack.enter_context( patch('raiden.utils.upgrades.older_db_file')) older_db_file.return_value = str(old_db_filename) upgrade_manager.run() upgrade_functions[0].assert_called_once_with(ANY, 16, 19) upgrade_functions[1].assert_called_once_with(ANY, 17, 19) upgrade_functions[2].assert_called_once_with(ANY, 18, 19) assert get_db_version(str(db_path)) == 19
def test_upgrade_v21_to_v22(tmp_path): address = to_canonical_address( "0x87A749D9b9c0c91AC009AeeBd74313D1a736A24C") with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file: old_db_filename = tmp_path / Path(f"v{SOURCE_VERSION}_log.db") latest_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=SOURCE_VERSION): storage.update_version() storage.conn.close() raiden_service_mock = MockRaidenService() raiden_service_mock.address = address db_path = tmp_path / Path(f"v{TARGET_VERSION}_log.db") manager = UpgradeManager(db_filename=str(db_path), raiden=raiden_service_mock) with patch( "raiden.utils.upgrades.UPGRADES_LIST", new=[ UpgradeRecord(from_version=SOURCE_VERSION, function=upgrade_v21_to_v22) ], ): manager.run() storage = SQLiteStorage(str(db_path)) for batch in storage.batch_query_event_records(batch_size=500): for event in batch: walk_dicts( event, constraint_has_canonical_identifier_or_values_removed) for batch in storage.batch_query_state_changes(batch_size=500): for state_change in batch: walk_dicts( state_change, constraint_has_canonical_identifier_or_values_removed) for snapshot in storage.get_snapshots(): walk_dicts(snapshot, constraint_has_canonical_identifier_or_values_removed) assert os.path.isfile(str(db_path)) assert os.path.isfile(str(old_db_filename)) os.unlink(str(db_path)) os.unlink(str(old_db_filename)) assert not os.path.exists(str(db_path)) assert not os.path.exists(str(old_db_filename))
def test_upgrade_v16_to_v17(tmp_path): db_path = tmp_path / Path('test.db') old_db_filename = tmp_path / Path('v16_log.db') with patch('raiden.utils.upgrades.older_db_file') as older_db_file: older_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16): storage.update_version() manager = UpgradeManager(db_filename=str(db_path)) manager.run() storage = SerializedSQLiteStorage(str(db_path), JSONSerializer()) snapshot = storage.get_latest_state_snapshot() assert snapshot is not None
def test_upgrade_v17_to_v18(tmp_path): db_path = tmp_path / Path('test.db') old_db_filename = tmp_path / Path('v17_log.db') with patch('raiden.utils.upgrades.older_db_file') as older_db_file: older_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=17): storage.update_version() manager = UpgradeManager(db_filename=str(db_path)) manager.run() storage = SerializedSQLiteStorage(str(db_path), JSONSerializer()) _, snapshot = storage.get_latest_state_snapshot() secrethash = list(snapshot.payment_mapping.secrethashes_to_task.keys())[0] mediator_task = snapshot.payment_mapping.secrethashes_to_task[secrethash] assert mediator_task.mediator_state.waiting_transfer is not None assert mediator_task.mediator_state.routes
def test_upgrade_v20_to_v21(tmp_path): old_db_filename = tmp_path / Path("v20_log.db") with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file: latest_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=20): storage.update_version() storage.conn.close() db_path = tmp_path / Path("v21_log.db") manager = UpgradeManager(db_filename=str(db_path)) with patch( "raiden.utils.upgrades.UPGRADES_LIST", new=[UpgradeRecord(from_version=20, function=upgrade_v20_to_v21)], ): manager.run() storage = SQLiteStorage(str(db_path)) assert_state_changes_are_transformed(storage) assert_snapshots_are_transformed(storage)
def test_upgrade_v16_to_v17(tmp_path): old_db_filename = tmp_path / Path('v16_log.db') with patch('raiden.utils.upgrades.latest_db_file') as latest_db_file: latest_db_file.return_value = str(old_db_filename) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16): storage = setup_storage(str(old_db_filename)) storage.update_version() storage.conn.close() db_path = tmp_path / Path('v17_log.db') web3, _ = create_fake_web3_for_block_hash(number_of_blocks=100) manager = UpgradeManager(db_filename=str(db_path), web3=web3) with patch( 'raiden.utils.upgrades.UPGRADES_LIST', new=[UpgradeRecord(from_version=16, function=upgrade_v16_to_v17)], ): manager.run() storage = SQLiteStorage(str(db_path)) snapshot = storage.get_latest_state_snapshot() assert snapshot is not None
def test_upgrade_manager_transaction_rollback(tmp_path, monkeypatch): FORMAT = os.path.join(tmp_path, "v{}_log.db") def failure(**kwargs): # pylint: disable=unused-argument raise RuntimeError() # Create the db to be upgraded with monkeypatch.context() as m: m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 1) storage = SQLiteStorage(Path(FORMAT.format(1))) storage.update_version() del storage # This should not fail with 'OperationalError' with pytest.raises(RuntimeError): with monkeypatch.context() as m: m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 2) upgrade_list = [UpgradeRecord(from_version=1, function=failure)] m.setattr("raiden.utils.upgrades.UPGRADES_LIST", upgrade_list) manager = UpgradeManager(Path(FORMAT.format(2))) manager.run() storage = SQLiteStorage(Path(FORMAT.format(2))) assert storage.get_version() == 1, "The upgrade must have failed"
def test_upgrade_v17_to_v18(tmp_path): db_path = tmp_path / Path('test.db') old_db_filename = tmp_path / Path('v17_log.db') with patch('raiden.utils.upgrades.older_db_file') as older_db_file: older_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=17): storage.update_version() storage.conn.close() manager = UpgradeManager(db_filename=str(db_path)) manager.run() storage = SQLiteStorage(str(db_path)) _, snapshot = storage.get_latest_state_snapshot() snapshot_data = json.loads(snapshot) secrethash = list( snapshot_data['payment_mapping']['secrethashes_to_task'].keys())[0] mediator_task = snapshot_data['payment_mapping']['secrethashes_to_task'][ secrethash] assert mediator_task['mediator_state']['waiting_transfer'] is not None assert mediator_task['mediator_state']['routes']
def maybe_upgrade_db(self): manager = UpgradeManager(db_filename=self.database_path) manager.run()
def test_upgrade_v19_to_v20(tmp_path): old_db_filename = tmp_path / Path("v19_log.db") with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file: latest_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=19): storage.update_version() storage.conn.close() raiden_service_mock = MockRaidenService() our_onchain_locksroot = make_32bytes() partner_onchain_locksroot = make_32bytes() details = Mock() details.our_details.address = make_address() details.our_details.locksroot = our_onchain_locksroot details.partner_details.address = make_address() details.partner_details.locksroot = partner_onchain_locksroot payment_channel = Mock() payment_channel.token_network.detail_participants.return_value = details payment_channel_func = Mock() payment_channel_func.return_value = payment_channel raiden_service_mock.chain.payment_channel = payment_channel_func db_path = tmp_path / Path("v20_log.db") manager = UpgradeManager(db_filename=str(db_path), raiden=raiden_service_mock) with patch( "raiden.utils.upgrades.UPGRADES_LIST", new=[UpgradeRecord(from_version=19, function=upgrade_v19_to_v20)], ): manager.run() storage = SQLiteStorage(str(db_path)) batch_query = storage.batch_query_state_changes( batch_size=500, filters=[("_type", "raiden.transfer.state_change.ContractReceiveChannelNew")], ) for state_changes_batch in batch_query: for state_change_record in state_changes_batch: data = json.loads(state_change_record.data) assert "onchain_locksroot" in data["channel_state"]["our_state"] assert "onchain_locksroot" in data["channel_state"]["partner_state"] batch_query = storage.batch_query_state_changes( batch_size=500, filters=[("_type", "raiden.transfer.state_change.ContractReceiveChannelSettled")], ) for state_changes_batch in batch_query: for state_change_record in state_changes_batch: data = json.loads(state_change_record.data) assert data["our_onchain_locksroot"] == serialize_bytes(our_onchain_locksroot) assert data["partner_onchain_locksroot"] == serialize_bytes(partner_onchain_locksroot) batch_query = storage.batch_query_event_records( batch_size=500, filters=[("_type", "events.ContractSendChannelBatchUnlock")] ) for events_batch in batch_query: for event_record in events_batch: data = json.loads(event_record.data) assert "partner" in data _, snapshot = storage.get_latest_state_snapshot() assert snapshot is not None snapshot = json.loads(snapshot) for payment_network in snapshot["identifiers_to_paymentnetworks"].values(): for token_network in payment_network["tokennetworks"]: for channel in token_network["channelidentifiers_to_channels"].values(): channel_our_locksroot = channel["our_state"]["onchain_locksroot"] channel_partner_locksroot = channel["partner_state"]["onchain_locksroot"] assert channel_our_locksroot == serialize_bytes(our_onchain_locksroot) assert channel_partner_locksroot == serialize_bytes(partner_onchain_locksroot)
def test_upgrade_v18_to_v19(tmp_path): old_db_filename = tmp_path / Path("v18_log.db") with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file: latest_db_file.return_value = str(old_db_filename) storage = setup_storage(str(old_db_filename)) with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=18): storage.update_version() storage.conn.close() web3, block_to_blockhash = create_fake_web3_for_block_hash( number_of_blocks=100) db_path = tmp_path / Path("v19_log.db") manager = UpgradeManager(db_filename=str(db_path), web3=web3) with patch( "raiden.utils.upgrades.UPGRADES_LIST", new=[UpgradeRecord(from_version=18, function=upgrade_v18_to_v19)], ): manager.run() storage = SQLiteStorage(str(db_path)) # Check that all the relevant state changes now have the blockhash attribute batch_query = storage.batch_query_state_changes( batch_size=500, filters=[ ("_type", "raiden.transfer.state_change.ContractReceive%"), ("_type", "raiden.transfer.state_change.ActionInitChain"), ], ) for state_changes_batch in batch_query: for state_change_record in state_changes_batch: data = json.loads(state_change_record.data) affected_state_change = ( "raiden.transfer.state_change.ContractReceive" in data["_type"] or "raiden.transfer.state_change.ActionInitChain" in data["_type"]) assert affected_state_change, "filtering did not work correctly" assert "block_hash" in data block_number = int(data["block_number"]) assert block_to_blockhash[block_number].hex() == data["block_hash"] # Check that all the relevant events now have the triggered_by_blockhash attribute event_records = [] batch_query = storage.batch_query_event_records( batch_size=500, filters=[("_type", "%events.ContractSend%")]) for events_batch in batch_query: event_records.extend(events_batch) assert len(event_records) for event_record in event_records: data = json.loads(event_record.data) assert "events.ContractSend" in data["_type"] assert "triggered_by_block_hash" in data # Finally check that the snapshot is updated and that it contains a blockhash and that all # pending transactions in the list also contain one _, snapshot = storage.get_latest_state_snapshot() snapshot_data = json.loads(snapshot) assert "block_hash" in snapshot_data assert len(snapshot_data["pending_transactions"]) == 2 for transaction_data in snapshot_data["pending_transactions"]: assert "triggered_by_block_hash" in transaction_data