Ejemplo n.º 1
0
def test_upgrade_executes_necessary_migration_functions(tmp_path, monkeypatch):
    old_db_filename = tmp_path / Path("v18_log.db")

    with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION",
               new=18), SQLiteStorage(old_db_filename) as storage:
        storage.update_version()

    db_path = tmp_path / Path("v20_log.db")

    upgrade_functions = []
    for i in range(16, 20):
        mock = Mock()
        mock.return_value = i + 1
        upgrade_functions.append(UpgradeRecord(from_version=i, function=mock))

    with monkeypatch.context() as m:
        m.setattr(raiden.utils.upgrades, "UPGRADES_LIST", upgrade_functions)
        m.setattr(raiden.utils.upgrades, "RAIDEN_DB_VERSION", 19)

        UpgradeManager(db_filename=db_path).run()

    assert upgrade_functions[0].function.call_count == 0
    assert upgrade_functions[1].function.call_count == 0
    assert upgrade_functions[2].function.call_count == 1
    assert upgrade_functions[3].function.call_count == 1
Ejemplo n.º 2
0
def test_upgrade_v17_to_v18(tmp_path):
    old_db_filename = tmp_path / Path('v17_log.db')
    with patch('raiden.utils.upgrades.latest_db_file') as latest_db_file:
        latest_db_file.return_value = str(old_db_filename)
        storage = setup_storage(str(old_db_filename))
        with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=17):
            storage.update_version()
        storage.conn.close()

    web3, _ = create_fake_web3_for_block_hash(number_of_blocks=100)
    db_path = tmp_path / Path('v18_log.db')
    manager = UpgradeManager(db_filename=str(db_path), web3=web3)

    with patch(
            'raiden.utils.upgrades.UPGRADES_LIST',
            new=[UpgradeRecord(from_version=17, function=upgrade_v17_to_v18)],
    ):
        manager.run()

    storage = SQLiteStorage(str(db_path))
    _, snapshot = storage.get_latest_state_snapshot()

    snapshot_data = json.loads(snapshot)
    secrethash = list(
        snapshot_data['payment_mapping']['secrethashes_to_task'].keys())[0]
    mediator_task = snapshot_data['payment_mapping']['secrethashes_to_task'][
        secrethash]
    assert mediator_task['mediator_state']['waiting_transfer'] is not None
    assert mediator_task['mediator_state']['routes']
Ejemplo n.º 3
0
def test_regression_delete_should_not_commit_the_upgrade_transaction(
        tmp_path, monkeypatch):
    FORMAT = os.path.join(tmp_path, "v{}_log.db")

    def failure(storage, **kwargs):  # pylint: disable=unused-argument
        storage.delete_state_changes([1, 2])

    # Create the db to be upgraded
    with monkeypatch.context() as m:
        m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 1)
        storage = SQLiteStorage(FORMAT.format(1))
        storage.update_version()
        del storage

    with pytest.raises(ValueError):
        # This should not fail with 'OperationalError'
        with monkeypatch.context() as m:
            m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 2)
            upgrade_list = [UpgradeRecord(from_version=1, function=failure)]
            m.setattr("raiden.utils.upgrades.UPGRADES_LIST", upgrade_list)
            manager = UpgradeManager(FORMAT.format(2))
            manager.run()

    storage = SQLiteStorage(FORMAT.format(2))
    assert storage.get_version() == 1, "The upgrade must have failed"
Ejemplo n.º 4
0
def test_upgrade_manager_restores_backup(tmp_path, monkeypatch):
    db_path = tmp_path / Path("v17_log.db")

    old_db_filename = tmp_path / Path("v16_log.db")

    storage = setup_storage(old_db_filename)

    with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=16):
        storage.update_version()
        storage.conn.close()

    upgrade_functions = [UpgradeRecord(from_version=16, function=Mock())]

    upgrade_functions[0].function.return_value = 17

    web3, _ = create_fake_web3_for_block_hash(number_of_blocks=1)
    with monkeypatch.context() as m:
        m.setattr(raiden.utils.upgrades, "UPGRADES_LIST", upgrade_functions)
        m.setattr(raiden.utils.upgrades, "RAIDEN_DB_VERSION", 19)
        UpgradeManager(db_filename=db_path, web3=web3).run()

    # Once restored, the state changes written above should be
    # in the restored database
    storage = SQLiteStorage(str(db_path))
    state_change_record = storage.get_latest_state_change_by_data_field(
        {"_type": "raiden.transfer.state_change.ActionInitChain"}
    )
    assert state_change_record.data is not None
Ejemplo n.º 5
0
def test_upgrade_v21_to_v22(tmp_path):
    address = to_canonical_address(
        "0x87A749D9b9c0c91AC009AeeBd74313D1a736A24C")
    with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file:
        old_db_filename = tmp_path / Path(f"v{SOURCE_VERSION}_log.db")
        latest_db_file.return_value = str(old_db_filename)
        storage = setup_storage(str(old_db_filename))
        with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION",
                   new=SOURCE_VERSION):
            storage.update_version()
        storage.conn.close()

        raiden_service_mock = MockRaidenService()
        raiden_service_mock.address = address

        db_path = tmp_path / Path(f"v{TARGET_VERSION}_log.db")
        manager = UpgradeManager(db_filename=str(db_path),
                                 raiden=raiden_service_mock)
        with patch(
                "raiden.utils.upgrades.UPGRADES_LIST",
                new=[
                    UpgradeRecord(from_version=SOURCE_VERSION,
                                  function=upgrade_v21_to_v22)
                ],
        ):
            manager.run()

        storage = SQLiteStorage(str(db_path))
        for batch in storage.batch_query_event_records(batch_size=500):
            for event in batch:
                walk_dicts(
                    event,
                    constraint_has_canonical_identifier_or_values_removed)
        for batch in storage.batch_query_state_changes(batch_size=500):
            for state_change in batch:
                walk_dicts(
                    state_change,
                    constraint_has_canonical_identifier_or_values_removed)
        for snapshot in storage.get_snapshots():
            walk_dicts(snapshot,
                       constraint_has_canonical_identifier_or_values_removed)

        assert os.path.isfile(str(db_path))
        assert os.path.isfile(str(old_db_filename))
        os.unlink(str(db_path))
        os.unlink(str(old_db_filename))
        assert not os.path.exists(str(db_path))
        assert not os.path.exists(str(old_db_filename))
Ejemplo n.º 6
0
def test_sequential_version_numbers(tmp_path, monkeypatch):
    """ Test that the version received by each migration
    function is sequentially incremented according to the
    version returned by the previous migration.
    Sequence of events:
    - The first migration runs and returns v16 as the
      version it upgraded the database to.
    - The next migration should receive the old_version
      as v16 returned previously.
    - the above goes on for subsequent migrations.
    """
    db_path = tmp_path / Path("v19_log.db")

    old_db_filename = tmp_path / Path("v16_log.db")

    upgrade_functions = []
    for i in range(16, 19):
        mock = Mock()
        mock.return_value = i + 1
        upgrade_functions.append(UpgradeRecord(from_version=i, function=mock))

    with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=16):
        storage = setup_storage(old_db_filename)
        storage.update_version()
        storage.conn.close()

    with monkeypatch.context() as m:

        def latest_db_file(paths):  # pylint: disable=unused-argument
            return old_db_filename

        m.setattr(raiden.utils.upgrades, "UPGRADES_LIST", upgrade_functions)
        m.setattr(raiden.utils.upgrades, "RAIDEN_DB_VERSION", 19)
        m.setattr(raiden.utils.upgrades, "latest_db_file", latest_db_file)

        UpgradeManager(db_filename=db_path).run()

        upgrade_functions[0].function.assert_called_once_with(
            old_version=16, current_version=19, storage=ANY
        )
        upgrade_functions[1].function.assert_called_once_with(
            old_version=17, current_version=19, storage=ANY
        )
        upgrade_functions[2].function.assert_called_once_with(
            old_version=18, current_version=19, storage=ANY
        )

        assert get_db_version(db_path) == 19
Ejemplo n.º 7
0
def test_upgrade_manager_restores_backup(tmp_path, monkeypatch):
    db_path = tmp_path / Path("v17_log.db")

    old_db_filename = tmp_path / Path("v16_log.db")

    with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION",
               new=16), SQLiteStorage(str(old_db_filename)) as storage:
        state_change = ActionInitChain(
            chain_id=1,
            our_address=factories.make_address(),
            block_number=1,
            block_hash=factories.make_block_hash(),
            pseudo_random_generator=random.Random(),
        )
        action_init_chain_data = JSONSerializer.serialize(state_change)
        storage.write_state_changes(state_changes=[action_init_chain_data])
        storage.update_version()

    upgrade_functions = [UpgradeRecord(from_version=16, function=Mock())]

    upgrade_functions[0].function.return_value = 17

    web3, _ = create_fake_web3_for_block_hash(number_of_blocks=1)
    with monkeypatch.context() as m:
        m.setattr(raiden.utils.upgrades, "UPGRADES_LIST", upgrade_functions)
        m.setattr(raiden.utils.upgrades, "RAIDEN_DB_VERSION", 19)
        UpgradeManager(db_filename=db_path, web3=web3).run()

    # Once restored, the state changes written above should be
    # in the restored database
    with SQLiteStorage(str(db_path)) as storage:
        state_change_record = storage.get_latest_state_change_by_data_field(
            FilteredDBQuery(
                filters=[{
                    "_type":
                    "raiden.transfer.state_change.ActionInitChain"
                }],
                main_operator=Operator.NONE,
                inner_operator=Operator.NONE,
            ))
        assert state_change_record.data is not None
Ejemplo n.º 8
0
def test_upgrade_v20_to_v21(tmp_path):
    old_db_filename = tmp_path / Path("v20_log.db")
    with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file:
        latest_db_file.return_value = str(old_db_filename)
        storage = setup_storage(str(old_db_filename))
        with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=20):
            storage.update_version()
        storage.conn.close()

    db_path = tmp_path / Path("v21_log.db")
    manager = UpgradeManager(db_filename=str(db_path))
    with patch(
            "raiden.utils.upgrades.UPGRADES_LIST",
            new=[UpgradeRecord(from_version=20, function=upgrade_v20_to_v21)],
    ):
        manager.run()

    storage = SQLiteStorage(str(db_path))

    assert_state_changes_are_transformed(storage)
    assert_snapshots_are_transformed(storage)
Ejemplo n.º 9
0
def test_upgrade_v16_to_v17(tmp_path):
    old_db_filename = tmp_path / Path('v16_log.db')
    with patch('raiden.utils.upgrades.latest_db_file') as latest_db_file:
        latest_db_file.return_value = str(old_db_filename)

        with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=16):
            storage = setup_storage(str(old_db_filename))
            storage.update_version()

        storage.conn.close()

    db_path = tmp_path / Path('v17_log.db')
    web3, _ = create_fake_web3_for_block_hash(number_of_blocks=100)
    manager = UpgradeManager(db_filename=str(db_path), web3=web3)
    with patch(
            'raiden.utils.upgrades.UPGRADES_LIST',
            new=[UpgradeRecord(from_version=16, function=upgrade_v16_to_v17)],
    ):
        manager.run()

    storage = SQLiteStorage(str(db_path))
    snapshot = storage.get_latest_state_snapshot()
    assert snapshot is not None
Ejemplo n.º 10
0
def test_upgrade_executes_necessary_migration_functions(tmp_path, monkeypatch):
    old_db_filename = tmp_path / Path(f'v18_log.db')

    storage = setup_storage(old_db_filename)
    with patch('raiden.storage.sqlite.RAIDEN_DB_VERSION', new=18):
        storage.update_version()
        storage.conn.close()

    db_path = tmp_path / Path('v20_log.db')

    upgrade_functions = []
    for i in range(16, 20):
        mock = Mock()
        mock.return_value = i + 1
        upgrade_functions.append(UpgradeRecord(
            from_version=i,
            function=mock,
        ))

    with monkeypatch.context() as m:
        m.setattr(
            raiden.utils.upgrades,
            'UPGRADES_LIST',
            upgrade_functions,
        )
        m.setattr(
            raiden.utils.upgrades,
            'RAIDEN_DB_VERSION',
            19,
        )

        UpgradeManager(db_filename=db_path).run()

    assert upgrade_functions[0].function.call_count == 0
    assert upgrade_functions[1].function.call_count == 0
    assert upgrade_functions[2].function.call_count == 1
    assert upgrade_functions[3].function.call_count == 1
Ejemplo n.º 11
0
def test_upgrade_manager_transaction_rollback(tmp_path, monkeypatch):
    FORMAT = os.path.join(tmp_path, "v{}_log.db")

    def failure(**kwargs):  # pylint: disable=unused-argument
        raise RuntimeError()

    # Create the db to be upgraded
    with monkeypatch.context() as m:
        m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 1)
        storage = SQLiteStorage(Path(FORMAT.format(1)))
        storage.update_version()
        del storage

    # This should not fail with 'OperationalError'
    with pytest.raises(RuntimeError):
        with monkeypatch.context() as m:
            m.setattr("raiden.storage.sqlite.RAIDEN_DB_VERSION", 2)
            upgrade_list = [UpgradeRecord(from_version=1, function=failure)]
            m.setattr("raiden.utils.upgrades.UPGRADES_LIST", upgrade_list)
            manager = UpgradeManager(Path(FORMAT.format(2)))
            manager.run()

    storage = SQLiteStorage(Path(FORMAT.format(2)))
    assert storage.get_version() == 1, "The upgrade must have failed"
Ejemplo n.º 12
0
def test_upgrade_v19_to_v20(tmp_path):
    old_db_filename = tmp_path / Path("v19_log.db")
    with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file:
        latest_db_file.return_value = str(old_db_filename)
        storage = setup_storage(str(old_db_filename))
        with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=19):
            storage.update_version()
        storage.conn.close()

    raiden_service_mock = MockRaidenService()

    our_onchain_locksroot = make_32bytes()
    partner_onchain_locksroot = make_32bytes()

    details = Mock()
    details.our_details.address = make_address()
    details.our_details.locksroot = our_onchain_locksroot
    details.partner_details.address = make_address()
    details.partner_details.locksroot = partner_onchain_locksroot

    payment_channel = Mock()
    payment_channel.token_network.detail_participants.return_value = details

    payment_channel_func = Mock()
    payment_channel_func.return_value = payment_channel

    raiden_service_mock.chain.payment_channel = payment_channel_func

    db_path = tmp_path / Path("v20_log.db")
    manager = UpgradeManager(db_filename=str(db_path), raiden=raiden_service_mock)
    with patch(
        "raiden.utils.upgrades.UPGRADES_LIST",
        new=[UpgradeRecord(from_version=19, function=upgrade_v19_to_v20)],
    ):
        manager.run()

    storage = SQLiteStorage(str(db_path))

    batch_query = storage.batch_query_state_changes(
        batch_size=500,
        filters=[("_type", "raiden.transfer.state_change.ContractReceiveChannelNew")],
    )
    for state_changes_batch in batch_query:
        for state_change_record in state_changes_batch:
            data = json.loads(state_change_record.data)
            assert "onchain_locksroot" in data["channel_state"]["our_state"]
            assert "onchain_locksroot" in data["channel_state"]["partner_state"]

    batch_query = storage.batch_query_state_changes(
        batch_size=500,
        filters=[("_type", "raiden.transfer.state_change.ContractReceiveChannelSettled")],
    )
    for state_changes_batch in batch_query:
        for state_change_record in state_changes_batch:
            data = json.loads(state_change_record.data)
            assert data["our_onchain_locksroot"] == serialize_bytes(our_onchain_locksroot)
            assert data["partner_onchain_locksroot"] == serialize_bytes(partner_onchain_locksroot)

    batch_query = storage.batch_query_event_records(
        batch_size=500, filters=[("_type", "events.ContractSendChannelBatchUnlock")]
    )
    for events_batch in batch_query:
        for event_record in events_batch:
            data = json.loads(event_record.data)
            assert "partner" in data

    _, snapshot = storage.get_latest_state_snapshot()
    assert snapshot is not None

    snapshot = json.loads(snapshot)

    for payment_network in snapshot["identifiers_to_paymentnetworks"].values():
        for token_network in payment_network["tokennetworks"]:
            for channel in token_network["channelidentifiers_to_channels"].values():
                channel_our_locksroot = channel["our_state"]["onchain_locksroot"]
                channel_partner_locksroot = channel["partner_state"]["onchain_locksroot"]
                assert channel_our_locksroot == serialize_bytes(our_onchain_locksroot)
                assert channel_partner_locksroot == serialize_bytes(partner_onchain_locksroot)
Ejemplo n.º 13
0
def test_upgrade_v18_to_v19(tmp_path):
    old_db_filename = tmp_path / Path("v18_log.db")
    with patch("raiden.utils.upgrades.latest_db_file") as latest_db_file:
        latest_db_file.return_value = str(old_db_filename)
        storage = setup_storage(str(old_db_filename))
        with patch("raiden.storage.sqlite.RAIDEN_DB_VERSION", new=18):
            storage.update_version()
        storage.conn.close()

    web3, block_to_blockhash = create_fake_web3_for_block_hash(
        number_of_blocks=100)
    db_path = tmp_path / Path("v19_log.db")
    manager = UpgradeManager(db_filename=str(db_path), web3=web3)
    with patch(
            "raiden.utils.upgrades.UPGRADES_LIST",
            new=[UpgradeRecord(from_version=18, function=upgrade_v18_to_v19)],
    ):
        manager.run()

    storage = SQLiteStorage(str(db_path))
    # Check that all the relevant state changes now have the blockhash attribute
    batch_query = storage.batch_query_state_changes(
        batch_size=500,
        filters=[
            ("_type", "raiden.transfer.state_change.ContractReceive%"),
            ("_type", "raiden.transfer.state_change.ActionInitChain"),
        ],
    )
    for state_changes_batch in batch_query:
        for state_change_record in state_changes_batch:
            data = json.loads(state_change_record.data)
            affected_state_change = (
                "raiden.transfer.state_change.ContractReceive" in data["_type"]
                or "raiden.transfer.state_change.ActionInitChain"
                in data["_type"])
            assert affected_state_change, "filtering did not work correctly"
            assert "block_hash" in data
            block_number = int(data["block_number"])
            assert block_to_blockhash[block_number].hex() == data["block_hash"]

    # Check that all the relevant events now have the triggered_by_blockhash attribute
    event_records = []
    batch_query = storage.batch_query_event_records(
        batch_size=500, filters=[("_type", "%events.ContractSend%")])

    for events_batch in batch_query:
        event_records.extend(events_batch)

    assert len(event_records)
    for event_record in event_records:
        data = json.loads(event_record.data)
        assert "events.ContractSend" in data["_type"]
        assert "triggered_by_block_hash" in data

    # Finally check that the snapshot is updated and that it contains a blockhash and that all
    # pending transactions in the list also contain one
    _, snapshot = storage.get_latest_state_snapshot()
    snapshot_data = json.loads(snapshot)
    assert "block_hash" in snapshot_data
    assert len(snapshot_data["pending_transactions"]) == 2
    for transaction_data in snapshot_data["pending_transactions"]:
        assert "triggered_by_block_hash" in transaction_data