def test_waiting_transactions(ms_database: Database): assert ms_database.get_waiting_transactions() == [] ms_database.add_waiting_transaction(TransactionHash(b"A")) assert ms_database.get_waiting_transactions() == [b"A"] ms_database.add_waiting_transaction(TransactionHash(b"B")) assert ms_database.get_waiting_transactions() == [b"A", b"B"] ms_database.remove_waiting_transaction(TransactionHash(b"A")) assert ms_database.get_waiting_transactions() == [b"B"]
def test_save_and_load_monitor_request(ms_database: Database): request = create_signed_monitor_request() ms_database.upsert_monitor_request(request) restored = ms_database.get_monitor_request( token_network_address=request.token_network_address, channel_id=request.channel_identifier, non_closing_signer=request.non_closing_signer, ) assert request == restored
def test_purge_old_monitor_requests( ms_database: Database, build_request_monitoring, request_collector, monitoring_service: MonitoringService, ): # We'll test the purge on MRs for three different channels req_mons = [ build_request_monitoring(channel_id=1), build_request_monitoring(channel_id=2), build_request_monitoring(channel_id=3), ] for req_mon in req_mons: request_collector.on_monitor_request(req_mon) # Channel 1 exists in the db token_network_address = req_mons[0].balance_proof.token_network_address ms_database.conn.execute( "INSERT INTO token_network VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) ms_database.upsert_channel( Channel( identifier=ChannelID(1), token_network_address=token_network_address, participant1=Address(b"1" * 20), participant2=Address(b"2" * 20), )) # The request for channel 2 is recent (default), but the one for channel 3 # has been added 16 minutes ago. saved_at = (datetime.utcnow() - timedelta(minutes=16)).timestamp() ms_database.conn.execute( """ UPDATE monitor_request SET saved_at = ? WHERE channel_identifier = ? """, [saved_at, hex256(3)], ) monitoring_service._purge_old_monitor_requests() # pylint: disable=protected-access remaining_mrs = ms_database.conn.execute(""" SELECT channel_identifier, waiting_for_channel FROM monitor_request ORDER BY channel_identifier """).fetchall() assert [tuple(mr) for mr in remaining_mrs] == [(1, False), (2, True)]
def test_saveing_multiple_channel(ms_database: Database): ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(DEFAULT_TOKEN_NETWORK_ADDRESS), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) tn_address2 = make_token_network_address() ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(tn_address2), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) channel1 = create_channel() channel2 = create_channel() channel2.token_network_address = tn_address2 ms_database.upsert_channel(channel1) loaded_channel1 = ms_database.get_channel( token_network_address=channel1.token_network_address, channel_id=channel1.identifier) assert loaded_channel1 == channel1 assert ms_database.channel_count() == 1 ms_database.upsert_channel(channel2) loaded_channel2 = ms_database.get_channel( token_network_address=channel2.token_network_address, channel_id=channel2.identifier) assert loaded_channel2 == channel2 assert ms_database.channel_count() == 2
def __init__( # pylint: disable=too-many-arguments self, web3: Web3, private_key: PrivateKey, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber, required_confirmations: BlockTimeout, poll_interval: float, min_reward: int = 0, get_timestamp_now: Callable = get_posix_utc_time_now, ): self.web3 = web3 self.chain_id = ChainID(web3.eth.chain_id) self.private_key = private_key self.address = private_key_to_address(private_key) self.poll_interval = poll_interval self.service_registry = contracts[CONTRACT_SERVICE_REGISTRY] self.token_network_registry = contracts[ CONTRACT_TOKEN_NETWORK_REGISTRY] self.get_timestamp_now = get_timestamp_now self.try_scheduled_events_after = get_timestamp_now() web3.middleware_onion.add( construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] self.database = Database( filename=db_filename, chain_id=self.chain_id, registry_address=to_canonical_address( self.token_network_registry.address), receiver=self.address, msc_address=MonitoringServiceAddress( to_canonical_address(monitoring_contract.address)), sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, database=self.database, web3=self.web3, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, required_confirmations=required_confirmations, )
def test_save_and_load_channel(ms_database: Database): ms_database.conn.execute( "INSERT INTO token_network (address) VALUES (?)", [to_checksum_address(DEFAULT_TOKEN_NETWORK_ADDRESS)], ) for update_status in [ None, OnChainUpdateStatus(update_sender_address=Address(bytes([1] * 20)), nonce=random.randint(0, UINT256_MAX)), ]: channel = create_channel(update_status) ms_database.upsert_channel(channel) loaded_channel = ms_database.get_channel( token_network_address=channel.token_network_address, channel_id=channel.identifier) assert loaded_channel == channel
def ms_database(): return Database( filename=":memory:", chain_id=1, msc_address=Address("0x" + "2" * 40), registry_address=Address("0x" + "3" * 40), receiver=Address("0x" + "4" * 40), )
def ms_database(): return Database( filename=":memory:", chain_id=ChainID(1), msc_address=Address(bytes([2] * 20)), registry_address=Address(bytes([3] * 20)), receiver=Address(bytes([4] * 20)), )
def ms_database(): return Database( filename=':memory:', chain_id=1, msc_address='0x' + '2' * 40, registry_address='0x' + '3' * 40, receiver='0x' + '4' * 40, )
def ms_database() -> Database: return Database( filename=":memory:", chain_id=ChainID(61), msc_address=TEST_MSC_ADDRESS, registry_address=Address(bytes([3] * 20)), receiver=Address(bytes([4] * 20)), )
def __init__( # pylint: disable=too-many-arguments self, web3: Web3, private_key: str, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber = BlockNumber(0), required_confirmations: int = DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, poll_interval: float = 1, min_reward: int = 0, ): self.web3 = web3 self.private_key = private_key self.address = private_key_to_address(private_key) self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.service_registry = contracts[CONTRACT_SERVICE_REGISTRY] web3.middleware_stack.add( construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] chain_id = ChainID(int(web3.net.version)) self.database = Database( filename=db_filename, chain_id=chain_id, registry_address=contracts[CONTRACT_TOKEN_NETWORK_REGISTRY]. address, receiver=self.address, msc_address=monitoring_contract.address, sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, db=self.database, web3=self.web3, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, required_confirmations=required_confirmations, )
def __init__( self, web3: Web3, private_key: str, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber = BlockNumber(0), required_confirmations: int = DEFAULT_REQUIRED_CONFIRMATIONS, poll_interval: float = 1, min_reward: int = 0, ): self.web3 = web3 self.private_key = private_key self.address = private_key_to_address(private_key) self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.last_gas_check_block = 0 web3.middleware_stack.add(construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] chain_id = int(web3.net.version) self.database = Database( filename=db_filename, chain_id=chain_id, registry_address=contracts[CONTRACT_TOKEN_NETWORK_REGISTRY].address, receiver=self.address, msc_address=monitoring_contract.address, sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, db=self.database, w3=self.web3, contract_manager=CONTRACT_MANAGER, last_known_block=0, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, )
def context(ms_database: Database): return Context( ms_state=ms_database.load_state(), database=ms_database, web3=Web3Mock(), monitoring_service_contract=Mock(), user_deposit_contract=Mock(), min_reward=1, required_confirmations=1, )
def context(ms_database: Database): return Context( ms_state=ms_database.load_state(), db=ms_database, w3=Mock(), last_known_block=0, monitoring_service_contract=Mock(), user_deposit_contract=Mock(), min_reward=1, )
def get_scheduled_claim_event(database: Database) -> Optional[ScheduledEvent]: events = database.get_scheduled_events(max_trigger_timestamp=999_999 * 15) filtered_events = [ event for event in events if isinstance(event.event, ActionClaimRewardTriggeredEvent) ] assert len(filtered_events) <= 1 if len(filtered_events) == 0: return None return filtered_events[0]
class MonitoringService: # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__( # pylint: disable=too-many-arguments self, web3: Web3, private_key: PrivateKey, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber, required_confirmations: BlockTimeout, poll_interval: float, min_reward: int = 0, ): self.web3 = web3 self.chain_id = ChainID(web3.eth.chainId) self.private_key = private_key self.address = private_key_to_address(private_key) self.poll_interval = poll_interval self.service_registry = contracts[CONTRACT_SERVICE_REGISTRY] self.token_network_registry = contracts[ CONTRACT_TOKEN_NETWORK_REGISTRY] web3.middleware_onion.add( construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] self.database = Database( filename=db_filename, chain_id=self.chain_id, registry_address=to_canonical_address( self.token_network_registry.address), receiver=self.address, msc_address=MonitoringServiceAddress( to_canonical_address(monitoring_contract.address)), sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, database=self.database, web3=self.web3, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, required_confirmations=required_confirmations, ) def start(self) -> None: if not self.service_registry.functions.hasValidRegistration( self.address).call(): log.error("No valid registration in ServiceRegistry", address=self.address) sys.exit(1) last_gas_check_block = 0 while True: last_confirmed_block = self.context.latest_confirmed_block # check gas reserve do_gas_reserve_check = ( last_confirmed_block >= last_gas_check_block + DEFAULT_GAS_CHECK_BLOCKS) if do_gas_reserve_check: check_gas_reserve(self.web3, self.private_key) last_gas_check_block = last_confirmed_block self._process_new_blocks( latest_confirmed_block=last_confirmed_block) self._trigger_scheduled_events() self._check_pending_transactions() self._purge_old_monitor_requests() gevent.sleep(self.poll_interval) def _process_new_blocks(self, latest_confirmed_block: BlockNumber) -> None: token_network_addresses = self.context.database.get_token_network_addresses( ) events = get_blockchain_events_adaptive( web3=self.web3, blockchain_state=self.context.ms_state.blockchain_state, token_network_addresses=token_network_addresses, latest_confirmed_block=latest_confirmed_block, ) if events is None: return for event in events: handle_event(event, self.context) def _trigger_scheduled_events(self) -> None: """Trigger scheduled events Here `latest_block` is used instead of `latest_confirmed_block`, because triggered events only rely on block number, and not on certain events that might change during a chain reorg. """ triggered_events = self.context.database.get_scheduled_events( max_trigger_block=self.context.get_latest_unconfirmed_block()) for scheduled_event in triggered_events: event = scheduled_event.event handle_event(event, self.context) self.context.database.remove_scheduled_event(scheduled_event) def _check_pending_transactions(self) -> None: """Checks if pending transaction have been mined and confirmed. This is done here so we don't have to block waiting for receipts in the state machine. In theory it's not necessary to check all pending transactions, but only the one with the smallest nonce, and continue from there when this one is mined and confirmed. However, as it is not expected that this list becomes to big this isn't optimized currently. """ for tx_hash in self.context.database.get_waiting_transactions(): try: receipt = self.web3.eth.getTransactionReceipt(Hash32(tx_hash)) except TransactionNotFound: continue tx_block = receipt.get("blockNumber") if tx_block is None: continue confirmation_block = tx_block + self.context.required_confirmations if self.web3.eth.blockNumber < confirmation_block: continue self.context.database.remove_waiting_transaction(tx_hash) if receipt["status"] == 1: log.info( "Transaction was mined successfully", transaction_hash=tx_hash, receipt=receipt, ) else: log.error( "Transaction was not mined successfully", transaction_hash=tx_hash, receipt=receipt, ) def _purge_old_monitor_requests(self) -> None: """Delete all old MRs for which still no channel exists. Also marks all MRs which have a channel as not waiting_for_channel to avoid checking them again, every time. """ with self.context.database.conn: self.context.database.conn.execute(""" UPDATE monitor_request SET waiting_for_channel = 0 WHERE waiting_for_channel AND EXISTS ( SELECT 1 FROM channel WHERE channel.identifier = monitor_request.channel_identifier AND channel.token_network_address = monitor_request.token_network_address ) """) before_this_is_old = datetime.utcnow() - KEEP_MRS_WITHOUT_CHANNEL self.context.database.conn.execute( """ DELETE FROM monitor_request WHERE waiting_for_channel AND saved_at < ? """, [before_this_is_old], )
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network (address, settle_timeout) VALUES (?, ?)", [ to_checksum_address(token_network_address), DEFAULT_TOKEN_NETWORK_SETTLE_TIMEOUT ], ) event1 = ScheduledEvent( trigger_timestamp=23 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_timestamp=24 * 15, event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(22 * 15)) == 0 assert len(ms_database.get_scheduled_events(23 * 15)) == 1 assert len(ms_database.get_scheduled_events(24 * 15)) == 1
class MonitoringService: # pylint: disable=too-few-public-methods,too-many-instance-attributes def __init__( # pylint: disable=too-many-arguments self, web3: Web3, private_key: str, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber = BlockNumber(0), required_confirmations: int = DEFAULT_NUMBER_OF_BLOCK_CONFIRMATIONS, poll_interval: float = 1, min_reward: int = 0, ): self.web3 = web3 self.private_key = private_key self.address = private_key_to_address(private_key) self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.service_registry = contracts[CONTRACT_SERVICE_REGISTRY] web3.middleware_stack.add( construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] chain_id = ChainID(int(web3.net.version)) self.database = Database( filename=db_filename, chain_id=chain_id, registry_address=contracts[CONTRACT_TOKEN_NETWORK_REGISTRY]. address, receiver=self.address, msc_address=monitoring_contract.address, sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, db=self.database, web3=self.web3, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, required_confirmations=required_confirmations, ) def start(self, wait_function: Callable = time.sleep, check_account_gas_reserve: bool = True) -> None: if not self.service_registry.functions.hasValidRegistration( self.address).call(): log.error("No valid registration in ServiceRegistry", address=self.address) exit(1) last_gas_check_block = 0 while True: last_confirmed_block = self.context.latest_confirmed_block # check gas reserve do_gas_reserve_check = ( check_account_gas_reserve and last_confirmed_block >= last_gas_check_block + DEFAULT_GAS_CHECK_BLOCKS) if do_gas_reserve_check: check_gas_reserve(self.web3, self.private_key) last_gas_check_block = last_confirmed_block max_query_interval_end_block = ( self.context.ms_state.blockchain_state.latest_commited_block + MAX_FILTER_INTERVAL) # Limit the max number of blocks that is processed per iteration last_block = BlockNumber( min(last_confirmed_block, max_query_interval_end_block)) self._process_new_blocks(last_block) self._check_pending_transactions() self._purge_old_monitor_requests() try: wait_function(self.poll_interval) except KeyboardInterrupt: log.info("Shutting down") sys.exit(0) def _process_new_blocks(self, last_block: BlockNumber) -> None: # BCL return a new state and events related to channel lifecycle new_chain_state, events = get_blockchain_events( web3=self.web3, contract_manager=CONTRACT_MANAGER, chain_state=self.context.ms_state.blockchain_state, to_block=last_block, ) # If a new token network was found we need to write it to the DB, otherwise # the constraints for new channels will not be constrained. But only update # the network addresses here, all else is done later. token_networks_changed = ( self.context.ms_state.blockchain_state.token_network_addresses != new_chain_state.token_network_addresses) if token_networks_changed: self.context.ms_state.blockchain_state.token_network_addresses = ( new_chain_state.token_network_addresses) self.context.db.update_blockchain_state( self.context.ms_state.blockchain_state) # Now set the updated chain state to the context, will be stored later self.context.ms_state.blockchain_state = new_chain_state for event in events: handle_event(event, self.context) # check triggered events and trigger the correct ones triggered_events = self.context.db.get_scheduled_events( max_trigger_block=last_block) for scheduled_event in triggered_events: event = scheduled_event.event handle_event(event, self.context) self.context.db.remove_scheduled_event(scheduled_event) def _check_pending_transactions(self) -> None: """ Checks if pending transaction have been mined and confirmed. This is done here so we don't have to block waiting for receipts in the state machine. In theory it's not necessary to check all pending transactions, but only the one with the smallest nonce, and continue from there when this one is mined and confirmed. However, as it is not expected that this list becomes to big this isn't optimized currently. """ for tx_hash in self.context.db.get_waiting_transactions(): receipt = self.web3.eth.getTransactionReceipt(tx_hash) if receipt is None: continue tx_block = receipt.get("blockNumber") if tx_block is None: continue confirmation_block = tx_block + self.context.required_confirmations if self.web3.eth.blockNumber < confirmation_block: continue self.context.db.remove_waiting_transaction(tx_hash) if receipt["status"] == 1: log.info("Transaction was mined successfully", transaction_hash=tx_hash, receipt=receipt) else: log.error( "Transaction was not mined successfully", transaction_hash=tx_hash, receipt=receipt, ) def _purge_old_monitor_requests(self) -> None: """ Delete all old MRs for which still no channel exists. Also marks all MRs which have a channel as not waiting_for_channel to avoid checking them again, every time. """ with self.context.db.conn: self.context.db.conn.execute(""" UPDATE monitor_request SET waiting_for_channel = 0 WHERE waiting_for_channel AND EXISTS ( SELECT 1 FROM channel WHERE channel.identifier = monitor_request.channel_identifier AND channel.token_network_address = monitor_request.token_network_address ) """) before_this_is_old = datetime.utcnow() - KEEP_MRS_WITHOUT_CHANNEL self.context.db.conn.execute( """ DELETE FROM monitor_request WHERE waiting_for_channel AND saved_at < ? """, [before_this_is_old], )
class MonitoringService: # pylint: disable=too-few-public-methods def __init__( self, web3: Web3, private_key: str, db_filename: str, contracts: Dict[str, Contract], sync_start_block: BlockNumber = BlockNumber(0), required_confirmations: int = DEFAULT_REQUIRED_CONFIRMATIONS, poll_interval: float = 1, min_reward: int = 0, ): self.web3 = web3 self.private_key = private_key self.address = private_key_to_address(private_key) self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.last_gas_check_block = 0 web3.middleware_stack.add(construct_sign_and_send_raw_middleware(private_key)) monitoring_contract = contracts[CONTRACT_MONITORING_SERVICE] user_deposit_contract = contracts[CONTRACT_USER_DEPOSIT] chain_id = int(web3.net.version) self.database = Database( filename=db_filename, chain_id=chain_id, registry_address=contracts[CONTRACT_TOKEN_NETWORK_REGISTRY].address, receiver=self.address, msc_address=monitoring_contract.address, sync_start_block=sync_start_block, ) ms_state = self.database.load_state() self.context = Context( ms_state=ms_state, db=self.database, w3=self.web3, contract_manager=CONTRACT_MANAGER, last_known_block=0, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, ) def start( self, wait_function: Callable = time.sleep, check_account_gas_reserve: bool = True ) -> None: while True: last_confirmed_block = self.web3.eth.blockNumber - self.required_confirmations # check gas reserve do_gas_reserve_check = ( check_account_gas_reserve and last_confirmed_block >= self.last_gas_check_block + DEFAULT_GAS_CHECK_BLOCKS ) if do_gas_reserve_check: check_gas_reserve(self.web3, self.private_key) self.last_gas_check_block = last_confirmed_block max_query_interval_end_block = ( self.context.ms_state.blockchain_state.latest_known_block + MAX_FILTER_INTERVAL ) # Limit the max number of blocks that is processed per iteration last_block = min(last_confirmed_block, max_query_interval_end_block) self._process_new_blocks(last_block) try: wait_function(self.poll_interval) except KeyboardInterrupt: log.info("Shutting down") sys.exit(0) def _process_new_blocks(self, last_block: BlockNumber) -> None: self.context.last_known_block = last_block # BCL return a new state and events related to channel lifecycle new_chain_state, events = get_blockchain_events( web3=self.web3, contract_manager=CONTRACT_MANAGER, chain_state=self.context.ms_state.blockchain_state, to_block=last_block, ) # If a new token network was found we need to write it to the DB, otherwise # the constraints for new channels will not be constrained. But only update # the network addresses here, all else is done later. token_networks_changed = ( self.context.ms_state.blockchain_state.token_network_addresses != new_chain_state.token_network_addresses ) if token_networks_changed: self.context.ms_state.blockchain_state.token_network_addresses = ( new_chain_state.token_network_addresses ) self.context.db.update_blockchain_state(self.context.ms_state.blockchain_state) # Now set the updated chain state to the context, will be stored later self.context.ms_state.blockchain_state = new_chain_state for event in events: handle_event(event, self.context) # check triggered events and trigger the correct ones triggered_events = self.context.db.get_scheduled_events(max_trigger_block=last_block) for scheduled_event in triggered_events: event = scheduled_event.event handle_event(event, self.context) self.context.db.remove_scheduled_event(scheduled_event) # check pending transactions # this is done here so we don't have to block waiting for receipts in the state machine for tx_hash in self.context.db.get_waiting_transactions(): receipt = self.web3.eth.getTransactionReceipt(tx_hash) if receipt is not None: self.context.db.remove_waiting_transaction(tx_hash) if receipt["status"] == 1: log.info( "Transaction was mined successfully", transaction_hash=tx_hash, receipt=receipt, ) else: log.error( "Transaction was not mined successfully", transaction_hash=tx_hash, receipt=receipt, )
def test_scheduled_events(ms_database: Database): # Add token network used as foreign key token_network_address = TokenNetworkAddress(bytes([1] * 20)) ms_database.conn.execute( "INSERT INTO token_network(address) VALUES (?)", [to_checksum_address(token_network_address)], ) event1 = ScheduledEvent( trigger_block_number=BlockNumber(23), event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) assert ms_database.scheduled_event_count() == 0 ms_database.upsert_scheduled_event(event=event1) assert ms_database.scheduled_event_count() == 1 event2 = ScheduledEvent( trigger_block_number=BlockNumber(24), event=ActionMonitoringTriggeredEvent( token_network_address=token_network_address, channel_identifier=ChannelID(1), non_closing_participant=Address(bytes([1] * 20)), ), ) ms_database.upsert_scheduled_event(event2) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2 ms_database.upsert_scheduled_event(event1) assert ms_database.scheduled_event_count() == 2 assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 2 ms_database.remove_scheduled_event(event2) assert len(ms_database.get_scheduled_events(BlockNumber(22))) == 0 assert len(ms_database.get_scheduled_events(BlockNumber(23))) == 1 assert len(ms_database.get_scheduled_events(BlockNumber(24))) == 1
def __init__( self, web3: Web3, contract_manager: ContractManager, private_key: str, registry_address: Address, monitor_contract_address: Address, user_deposit_contract_address: Address, db_filename: str, sync_start_block: BlockNumber = 0, required_confirmations: int = DEFAULT_REQUIRED_CONFIRMATIONS, poll_interval: float = 1, min_reward: int = 0, ): self.web3 = web3 self.contract_manager = contract_manager self.private_key = private_key self.address = private_key_to_address(private_key) self.required_confirmations = required_confirmations self.poll_interval = poll_interval self.last_gas_check_block = 0 web3.middleware_stack.add( construct_sign_and_send_raw_middleware(private_key), ) monitoring_contract = self.web3.eth.contract( abi=self.contract_manager.get_contract_abi( CONTRACT_MONITORING_SERVICE, ), address=monitor_contract_address, ) user_deposit_contract = self.web3.eth.contract( abi=self.contract_manager.get_contract_abi( CONTRACT_USER_DEPOSIT, ), address=user_deposit_contract_address, ) chain_id = int(web3.net.version) self.database = Database( filename=db_filename, chain_id=chain_id, registry_address=registry_address, receiver=self.address, msc_address=monitor_contract_address, ) ms_state = self.database.load_state() self.bcl = BlockchainListener( web3=self.web3, contract_manager=contract_manager, ) self.context = Context( ms_state=ms_state, db=self.database, w3=self.web3, contract_manager=contract_manager, last_known_block=0, monitoring_service_contract=monitoring_contract, user_deposit_contract=user_deposit_contract, min_reward=min_reward, )