def receive(self, data): # ignore large packets if len(data) > self.max_message_size: log.error('receive packet larger than maximum size', length=len(data)) return msghash = sha3(data) # check if we handled this message already, if so repeat Ack if msghash in self.sent_acks: return self.send_ack(*self.sent_acks[msghash]) # We ignore the sending endpoint as this can not be known w/ UDP msg = messages.decode(data) if isinstance(msg, Ack): # we might receive the same Ack more than once if msg.echo in self.number_of_tries: log.debug('ACK RECEIVED {} [echo={}]'.format( pex(self.raiden.address), pex(msg.echo) )) del self.number_of_tries[msg.echo] else: log.debug('DUPLICATED ACK RECEIVED {} [echo={}]'.format( pex(self.raiden.address), pex(msg.echo) )) else: assert isinstance(msg, Secret) or msg.sender self.raiden.on_message(msg, msghash)
def register_locked_transfer(self, locked_transfer): if not isinstance(locked_transfer, LockedTransfer): raise ValueError('transfer must be LockedTransfer') lock = locked_transfer.lock lockhashed = sha3(lock.as_bytes) if self.is_known(lock.hashlock): raise ValueError('hashlock is already registered') merkletree = self.unclaimed_merkletree() merkletree.append(lockhashed) new_locksroot = merkleroot(merkletree) if locked_transfer.locksroot != new_locksroot: raise ValueError( 'locksroot mismatch expected:{} got:{}'.format( pex(new_locksroot), pex(locked_transfer.locksroot), ) ) self.hashlock_pendinglocks[lock.hashlock] = PendingLock(lock, lockhashed) self.transfer = locked_transfer self.hashlock_unlockedlocks = dict()
def _repeat_until_ack(self, receiver_address, msg): data = msg.encode() host_port = self.discovery.get(receiver_address) # msghash is removed from the `number_of_tries` once a Ack is # received, resend until we receive it or give up msghash = sha3(data) self.number_of_tries[msghash] = 0 log.info('SENDING {} > {} : [{}] {}'.format( pex(self.raiden.address), pex(receiver_address), pex(msghash), msg, )) while msghash in self.number_of_tries: if self.number_of_tries[msghash] > self.max_tries: # FIXME: suspend node + recover from the failure raise Exception('DEACTIVATED MSG resents {} {}'.format( pex(receiver_address), msg, )) self.number_of_tries[msghash] += 1 self.transport.send(self.raiden, host_port, data) gevent.sleep(self.try_interval)
def receive(self, messagedata: bytes): """ Handle an UDP packet. """ # pylint: disable=unidiomatic-typecheck if len(messagedata) > self.UDP_MAX_MESSAGE_SIZE: log.error( 'INVALID MESSAGE: Packet larger than maximum size', node=pex(self.raiden.address), message=hexlify(messagedata), length=len(messagedata), ) return message = decode(messagedata) if type(message) == Pong: self.receive_pong(message) elif type(message) == Ping: self.receive_ping(message) elif type(message) == Delivered: self.receive_delivered(message) elif message is not None: self.receive_message(message) else: log.error( 'INVALID MESSAGE: Unknown cmdid', node=pex(self.raiden.address), message=hexlify(messagedata), )
def __init__(self, expected_locksroot, got_locksroot): msg = 'Locksroot mismatch. Expected {} but got {}'.format( pex(expected_locksroot), pex(got_locksroot), ) super(InvalidLocksRoot, self).__init__(msg)
def __init__(self, asset_address, netcontract_address, address_A, address_B, settle_timeout): log.debug( 'creating nettingchannelcontract', a=pex(address_A), b=pex(address_B), ) self.asset_address = asset_address self.netcontract_address = netcontract_address self.participants = { address_A: Participant(), address_B: Participant(), } # opened, settled, and closed default to 0 as it's the value used in # the contract self.opened = 0 """ Block number when deposit() was first called. """ self.settled = 0 """ Block number when settle was sucessfully called. """ self.closed = 0 """ Block number when close() was first called (might be zero in testing scenarios). """ self.closer = None """ The participant that called the close method. """ self.settle_timeout = settle_timeout """ Number of blocks that we are required to wait before allowing settlement. """
def __repr__(self): return '<{} [msgid:{} secrethash:{} hash:{}]>'.format( self.__class__.__name__, self.message_identifier, pex(self.secrethash), pex(self.hash), )
def __repr__(self): return '<SendLockExpired msgid:{} balance_proof:{} secrethash:{} recipient:{}>'.format( self.message_identifier, self.balance_proof, pex(self.secrethash), pex(self.recipient), )
def add_token(self, token_address): if not isaddress(token_address): raise ValueError('token_address must be a valid address') transaction_hash = estimate_and_transact( self.proxy, 'addToken', self.startgas, self.gasprice, token_address, ) self.client.poll(unhexlify(transaction_hash), timeout=self.poll_timeout) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: raise TransactionThrew('AddToken', receipt_or_none) manager_address = self.manager_address_by_token(token_address) if manager_address is None: log.error('Transaction failed and check_transaction_threw didnt detect it') raise RuntimeError('channelManagerByToken failed') if log.isEnabledFor(logging.INFO): log.info( 'add_token called', token_address=pex(token_address), registry_address=pex(self.address), manager_address=pex(manager_address), ) return manager_address
def join_channel(self, partner_address, partner_deposit): """Will be called, when we were selected as channel partner by another node. It will fund the channel with up to the partners deposit, but not more than remaining funds or the initial funding per channel. If the connection manager has no funds, this is a noop. """ with self.lock: joining_funds = min( partner_deposit, self._funds_remaining, self._initial_funding_per_partner, ) if joining_funds <= 0 or self._leaving_state: return self.api.set_total_channel_deposit( self.registry_address, self.token_address, partner_address, joining_funds, ) log.debug( 'joined a channel!', funds=joining_funds, me=pex(self.raiden.address), partner=pex(partner_address), )
def get_best_routes(self, amount, target, lock_timeout=None): """ Yield a two-tuple (path, channel) that can be used to mediate the transfer. The result is ordered from the best to worst path. """ available_paths = self.channelgraph.get_shortest_paths(self.raiden.address, target) # XXX: consider using multiple channels for a single transfer. Useful # for cases were the `amount` is larger than what is available # individually in any of the channels. # # One possible approach is to _not_ filter these channels based on the # distributable amount, but to sort them based on available balance and # let the task use as many as required to finish the transfer. for path in available_paths: assert path[0] == self.raiden.address assert path[1] in self.partneraddress_channel assert path[-1] == target partner = path[1] channel = self.partneraddress_channel[partner] if not channel.isopen: if log.isEnabledFor(logging.INFO): log.info("channel %s - %s is close, ignoring", pex(path[0]), pex(path[1])) continue # we can't intermediate the transfer if we don't have enough funds if amount > channel.distributable: if log.isEnabledFor(logging.INFO): log.info( "channel %s - %s doesnt have enough funds [%s], ignoring", pex(path[0]), pex(path[1]), amount ) continue if lock_timeout: # Our partner wont accept a lock timeout that: # - is larger than the settle timeout, otherwise the lock's # secret could be release /after/ the channel is settled. # - is smaller than the reveal timeout, because that is the # minimum number of blocks required by the partner to learn the # secret. valid_timeout = channel.reveal_timeout <= lock_timeout < channel.settle_timeout if not valid_timeout and log.isEnabledFor(logging.INFO): log.info( "lock_expiration is too large, channel/path cannot be used", lock_timeout=lock_timeout, reveal_timeout=channel.reveal_timeout, settle_timeout=channel.settle_timeout, nodeid=pex(path[0]), partner=pex(path[1]), ) # do not try the route since we know the transfer will be rejected. if not valid_timeout: continue yield (path, channel)
def __repr__(self): representation = ( '<{} [' 'chainid:{} msgid:{} paymentid:{} token_network:{} channel:{} ' 'nonce:{} transferred_amount:{} ' 'locked_amount:{} locksroot:{} hash:{} secrethash:{} expiration:{} amount:{}' ']>' ).format( self.__class__.__name__, self.chain_id, self.message_identifier, self.payment_identifier, pex(self.token_network_address), pex(self.channel), self.nonce, self.transferred_amount, self.locked_amount, pex(self.locksroot), pex(self.hash), pex(self.lock.secrethash), self.lock.expiration, self.lock.amount, ) return representation
def unlock(self, our_address, unlock_proofs): # force a list to get the length (could be a generator) unlock_proofs = list(unlock_proofs) log.info('{} locks to unlock'.format(len(unlock_proofs)), contract=pex(self.address)) for merkle_proof, locked_encoded, secret in unlock_proofs: if isinstance(locked_encoded, messages.Lock): raise ValueError('unlock must be called with a lock encoded `.as_bytes`') merkleproof_encoded = ''.join(merkle_proof) self.proxy.unlock( locked_encoded, merkleproof_encoded, secret, ) self.tester_state.mine(number_of_blocks=1) lock = messages.Lock.from_bytes(locked_encoded) log.info( 'unlock called', contract=pex(self.address), lock=lock, secret=encode_hex(secret), )
def close(self, our_address, first_transfer, second_transfer): if first_transfer and second_transfer: first_encoded = first_transfer.encode() second_encoded = second_transfer.encode() self.proxy.close( first_encoded, second_encoded, ) self.tester_state.mine(number_of_blocks=1) log.info( 'close called', contract=pex(self.address), first_transfer=first_transfer, second_transfer=second_transfer, ) elif first_transfer: first_encoded = first_transfer.encode() self.proxy.closeSingleTransfer(first_encoded) self.tester_state.mine(number_of_blocks=1) log.info('close called', contract=pex(self.address), first_transfer=first_transfer) elif second_transfer: second_encoded = second_transfer.encode() self.proxy.closeSingleTransfer.transact(second_encoded) self.tester_state.mine(number_of_blocks=1) log.info('close called', contract=pex(self.address), second_transfer=second_transfer) else: # TODO: allow to close nevertheless raise ValueError('channel wasnt used')
def update_transfer(self, nonce, transferred_amount, locksroot, extra_hash, signature): self._check_exists() if signature: log.info( 'update_transfer called', contract=pex(self.proxy.address), nonce=nonce, transferred_amount=transferred_amount, locksroot=locksroot, extra_hash=extra_hash, signature=signature, ) self.proxy.updateTransfer( nonce, transferred_amount, locksroot, extra_hash, signature, ) self.tester_chain.mine(number_of_blocks=1) log.info( 'update_transfer sucessfull', contract=pex(self.address), nonce=nonce, transferred_amount=transferred_amount, locksroot=locksroot, extra_hash=extra_hash, signature=signature, )
def __repr__(self): return '<ReceiveSecretRequest paymentid:{} amount:{} secrethash:{} sender:{}>'.format( self.payment_identifier, self.amount, pex(self.secrethash), pex(self.sender), )
def __init__(self, asset_address, netcontract_address, address_A, address_B): log.debug( 'creating nettingchannelcontract', a=pex(address_A), b=pex(address_B), ) self.asset_address = asset_address self.netcontract_address = netcontract_address self.participants = { address_A: Participant(), address_B: Participant(), } self.opened = None """ Block number when deposit() was first called. """ self.settled = None """ Block number when settle was sucessfully called. """ self.closed = None """ Block number when close() was first called (might be zero in testing scenarios). """ self.closer = None """ The participant that called the close method. """
def _register_secret_batch(self, secrets): transaction_hash = self.proxy.transact( 'registerSecretBatch', secrets, ) self.client.poll(unhexlify(transaction_hash)) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: log.critical( 'registerSecretBatch failed', node=pex(self.node_address), contract=pex(self.address), secrets=secrets, ) raise TransactionThrew('registerSecretBatch', receipt_or_none) log.info( 'registerSecretBatch successful', node=pex(self.node_address), contract=pex(self.address), secrets=secrets, ) return transaction_hash
def register_endpoint(self, node_address, endpoint): if node_address != self.client.address: raise ValueError("node_address doesnt match this node's address") log_details = { 'node': pex(self.node_address), 'node_address': pex(node_address), 'endpoint': endpoint, } log.debug('registerEndpoint called', **log_details) transaction_hash = self.proxy.transact( 'registerEndpoint', safe_gas_limit(GAS_REQUIRED_FOR_ENDPOINT_REGISTER), endpoint, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: log.critical('registerEndpoint failed', **log_details) raise TransactionThrew('Register Endpoint', receipt_or_none) log.debug('registerEndpoint successful', **log_details)
def unlock(self, our_address, unlock_proofs): # force a list to get the length (could be a generator) unlock_proofs = list(unlock_proofs) log.info( '%s locks to unlock', len(unlock_proofs), contract=pex(self.address), ) for merkle_proof, locked_encoded, secret in unlock_proofs: if isinstance(locked_encoded, messages.Lock): raise ValueError('unlock must be called with a lock encoded `.as_bytes`') merkleproof_encoded = ''.join(merkle_proof) transaction_hash = self.proxy.unlock.transact( locked_encoded, merkleproof_encoded, secret, startgas=self.startgas, gasprice=self.gasprice, ) self.client.poll(transaction_hash.decode('hex'), timeout=self.poll_timeout) # TODO: check if the ChannelSecretRevealed event was emitted and if # it wasn't raise an error # if log.getEffectiveLevel() >= logging.INFO: # only decode the lock if need to lock = messages.Lock.from_bytes(locked_encoded) log.info( 'unlock called', contract=pex(self.address), lock=lock, secret=encode_hex(secret), )
def register_transfer(self, block_number, transfer): """ Register a signed transfer, updating the channel's state accordingly. """ if transfer.sender == self.our_state.address: self.register_transfer_from_to( block_number, transfer, from_state=self.our_state, to_state=self.partner_state, ) self.sent_transfers.append(transfer) elif transfer.sender == self.partner_state.address: self.register_transfer_from_to( block_number, transfer, from_state=self.partner_state, to_state=self.our_state, ) self.received_transfers.append(transfer) else: if log.isEnabledFor(logging.WARN): log.warn( 'Received a transfer from party that is not a part of the channel', node=self.our_state.address, from_=pex(transfer.sender), channel=pex(transfer.channel) ) raise UnknownAddress(transfer)
def transfer(self, to_address, amount): log_details = { 'node': pex(self.node_address), 'contract': pex(self.address), 'to_address': pex(to_address), 'amount': amount, } log.debug('transfer called', **log_details) startgas = GAS_LIMIT_FOR_TOKEN_CONTRACT_CALL transaction_hash = self.proxy.transact( 'transfer', safe_gas_limit(startgas), to_checksum_address(to_address), amount, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: log.critical('transfer failed', **log_details) raise TransactionThrew('Transfer', receipt_or_none) # TODO: check Transfer event (issue: #2598) log.info('transfer successful', **log_details)
def __repr__(self): return '<{} [hashlock:{} amount:{} hash:{}]>'.format( self.__class__.__name__, pex(self.hashlock), self.amount, pex(self.hash), )
def _receive_delivered(self, delivered: Delivered): # FIXME: The signature doesn't seem to be verified - check in UDPTransport as well self._raiden_service.handle_state_change( ReceiveDelivered(delivered.delivered_message_identifier), ) async_result = self._messageids_to_asyncresult.pop( delivered.delivered_message_identifier, None, ) if async_result is not None: async_result.set(True) self.log.debug( 'DELIVERED MESSAGE RECEIVED', node=pex(self._raiden_service.address), receiver=pex(delivered.sender), message_identifier=delivered.delivered_message_identifier, ) else: self.log.debug( 'DELIVERED MESSAGE UNKNOWN', node=pex(self._raiden_service.address), message_identifier=delivered.delivered_message_identifier, )
def _run(self): # pylint: disable=method-hidden transfer = self.originating_transfer assetmanager = self.transfermanager.assetmanager raiden = assetmanager.raiden transfer_details = '{} -> {} hash:{}'.format( pex(transfer.target), pex(transfer.initiator), pex(transfer.hash), ) log.debug('END MEDIATED TRANSFER {}'.format(transfer_details)) secret_request = SecretRequest(transfer.lock.hashlock) raiden.sign(secret_request) raiden.send(transfer.initiator, secret_request) self.event = AsyncResult() response = self.event.wait(raiden.config['msg_timeout']) if response is None: log.error('SECRETREQUEST TIMED OUT!') self.transfermanager.on_hashlock_result(transfer.hashlock, False) return if not isinstance(response, Secret): raise Exception('Invalid message received.') if sha3(response.secret) != transfer.lock.hashlock: raise Exception('Invalid secret received.') # update all channels and propagate the secret assetmanager.register_secret(response.secret) self.transfermanager.on_hashlock_result(transfer.lock.hashlock, True)
def send_ping(self, receiver_address): if not isaddress(receiver_address): raise ValueError('Invalid address {}'.format(pex(receiver_address))) nonce = self._ping_nonces[receiver_address] self._ping_nonces[receiver_address] += 1 message = Ping(nonce) self.raiden.sign(message) if log.isEnabledFor(logging.INFO): log.info( 'SENDING PING %s > %s', pex(self.raiden.address), pex(receiver_address) ) message_data = message.encode() echohash = sha3(message_data + receiver_address) async_result = AsyncResult() if echohash not in self.echohash_asyncresult: self.echohash_asyncresult[echohash] = WaitAck(async_result, receiver_address) # Just like ACK, a PING message is sent directly. No need for queuing self.transport.send( self.raiden, self.discovery.get(receiver_address), message_data ) return async_result
def __repr__(self): return '<{} {} from_token:{} to_token:{}>'.format( self.__class__.__name__, pex(self.raiden.address), pex(self.from_mediated_transfer.token), pex(self.tokenswap.to_token), )
def close(self, nonce, transferred_amount, locksroot, extra_hash, signature): # this transaction may fail if there is a race to close the channel self._check_exists() log.info( 'closing channel', contract=pex(self.proxy.address), nonce=nonce, transferred_amount=transferred_amount, locksroot=locksroot, extra_hash=extra_hash, signature=signature, ) self.proxy.close( nonce, transferred_amount, locksroot, extra_hash, signature, ) self.tester_chain.mine(number_of_blocks=1) log.info( 'close sucessfull', contract=pex(self.proxy.address), nonce=nonce, transferred_amount=transferred_amount, locksroot=pex(locksroot), extra_hash=pex(extra_hash), signature=pex(signature), )
def __repr__(self): tokenswap = self.tokenswap return '<{} {} from_token:{} to_token:{}>'.format( self.__class__.__name__, pex(self.raiden.address), pex(tokenswap.from_token), pex(tokenswap.to_token), )
def __repr__(self): return '<{} [sender:{} hashlock:{} asset:{} hash:{}]>'.format( self.__class__.__name__, pex(self.sender), pex(self.hashlock), pex(self.asset), pex(self.hash), )
def deploy_solidity_contract( self, # pylint: disable=too-many-locals contract_name: str, all_contracts: Dict[str, ABI], libraries: Dict[str, Address] = None, constructor_parameters: Tuple[Any] = None, contract_path: str = None, ): """ Deploy a solidity contract. Args: contract_name: The name of the contract to compile. all_contracts: The json dictionary containing the result of compiling a file. libraries: A list of libraries to use in deployment. constructor_parameters: A tuple of arguments to pass to the constructor. contract_path: If we are dealing with solc >= v0.4.9 then the path to the contract is a required argument to extract the contract data from the `all_contracts` dict. """ if libraries: libraries = dict(libraries) else: libraries = dict() constructor_parameters = constructor_parameters or list() all_contracts = copy.deepcopy(all_contracts) if contract_name in all_contracts: contract_key = contract_name elif contract_path is not None: contract_key = os.path.basename( contract_path) + ':' + contract_name if contract_key not in all_contracts: raise ValueError('Unknown contract {}'.format(contract_name)) else: raise ValueError( 'Unknown contract {} and no contract_path given'.format( contract_name), ) contract = all_contracts[contract_key] contract_interface = contract['abi'] symbols = solidity_unresolved_symbols(contract['bin']) if symbols: available_symbols = list( map(solidity_library_symbol, all_contracts.keys())) unknown_symbols = set(symbols) - set(available_symbols) if unknown_symbols: msg = 'Cannot deploy contract, known symbols {}, unresolved symbols {}.'.format( available_symbols, unknown_symbols, ) raise Exception(msg) dependencies = deploy_dependencies_symbols(all_contracts) deployment_order = dependencies_order_of_build( contract_key, dependencies) deployment_order.pop() # remove `contract_name` from the list log.debug( 'Deploying dependencies: {}'.format(str(deployment_order)), node=pex(self.address), ) for deploy_contract in deployment_order: dependency_contract = all_contracts[deploy_contract] hex_bytecode = solidity_resolve_symbols( dependency_contract['bin'], libraries) bytecode = decode_hex(hex_bytecode) dependency_contract['bin'] = bytecode gas_limit = self.web3.eth.getBlock( 'latest')['gasLimit'] * 8 // 10 transaction_hash = self.send_transaction( to=Address(b''), startgas=gas_limit, data=bytecode, ) self.poll(transaction_hash) receipt = self.get_transaction_receipt(transaction_hash) contract_address = receipt['contractAddress'] # remove the hexadecimal prefix 0x from the address contract_address = remove_0x_prefix(contract_address) libraries[deploy_contract] = contract_address deployed_code = self.web3.eth.getCode( to_checksum_address(contract_address)) if not deployed_code: raise RuntimeError( 'Contract address has no code, check gas usage.') hex_bytecode = solidity_resolve_symbols(contract['bin'], libraries) bytecode = decode_hex(hex_bytecode) contract['bin'] = bytecode if isinstance(contract['bin'], str): contract['bin'] = decode_hex(contract['bin']) if not constructor_parameters: constructor_parameters = () contract = self.web3.eth.contract(abi=contract['abi'], bytecode=contract['bin']) contract_transaction = contract.constructor( *constructor_parameters).buildTransaction() transaction_hash = self.send_transaction( to=Address(b''), data=contract_transaction['data'], startgas=self._gas_estimate_correction( contract_transaction['gas']), ) self.poll(transaction_hash) receipt = self.get_transaction_receipt(transaction_hash) contract_address = receipt['contractAddress'] deployed_code = self.web3.eth.getCode( to_checksum_address(contract_address)) if not deployed_code: raise RuntimeError( 'Deployment of {} failed. Contract address has no code, check gas usage.' .format(contract_name, ), ) return self.new_contract_proxy(contract_interface, contract_address), receipt
def new_netting_channel( self, partner: typing.Address, settle_timeout: int, ) -> typing.ChannelID: """ Creates a new channel in the TokenNetwork contract. Args: partner: The peer to open the channel with. settle_timeout: The settle timout to use for this channel. Returns: The address of the new netting channel. """ if not is_binary_address(partner): raise InvalidAddress( 'Expected binary address format for channel partner') invalid_timeout = (settle_timeout < self.settlement_timeout_min() or settle_timeout > self.settlement_timeout_max()) if invalid_timeout: raise InvalidSettleTimeout( 'settle_timeout must be in range [{}, {}], is {}'.format( self.settlement_timeout_min(), self.settlement_timeout_max(), settle_timeout, )) if self.node_address == partner: raise SamePeerAddress( 'The other peer must not have the same address as the client.') # Prevent concurrent attempts to open a channel with the same token and # partner address. if partner not in self.open_channel_transactions: new_open_channel_transaction = RaidenAsyncResult() self.open_channel_transactions[ partner] = new_open_channel_transaction try: transaction_hash = self._new_netting_channel( partner, settle_timeout) except Exception as e: new_open_channel_transaction.set_exception(e) raise else: new_open_channel_transaction.set(transaction_hash) finally: self.open_channel_transactions.pop(partner, None) else: # All other concurrent threads should block on the result of opening this channel self.open_channel_transactions[partner].get() channel_created = self.channel_exists(self.node_address, partner) if channel_created is False: log.error( 'creating new channel failed', peer1=pex(self.node_address), peer2=pex(partner), ) raise RuntimeError('creating new channel failed') channel_identifier = self.detail_channel(self.node_address, partner)['channel_identifier'] log.info( 'new_netting_channel called', peer1=pex(self.node_address), peer2=pex(partner), channel_identifier=encode_hex(channel_identifier), ) return channel_identifier
def set_total_deposit( self, channel_identifier: typing.ChannelID, total_deposit: typing.TokenAmount, partner: typing.Address, ): """ Set total token deposit in the channel to total_deposit. Raises: ChannelBusyError: If the channel is busy with another operation RuntimeError: If the token address is empty. """ if not isinstance(total_deposit, int): raise ValueError('total_deposit needs to be an integral number.') self._check_for_outdated_channel( self.node_address, partner, channel_identifier, ) token_address = self.token_address() token = Token(self.client, token_address) with self.channel_operations_lock[partner], self.deposit_lock: # setTotalDeposit requires a monotonically increasing value. This # is used to handle concurrent actions: # # - The deposits will be done in order, i.e. the monotonic # property is preserved by the caller # - The race of two deposits will be resolved with the larger # deposit winning # - Retries wont have effect # # This check is serialized with the channel_operations_lock to avoid # sending invalid transactions on-chain (decreasing total deposit). # current_deposit = self.detail_participant(self.node_address, partner)['deposit'] amount_to_deposit = total_deposit - current_deposit if total_deposit < current_deposit: raise DepositMismatch( f'Current deposit ({current_deposit}) is already larger ' f'than the requested total deposit amount ({total_deposit})', ) if amount_to_deposit <= 0: raise ValueError( f'deposit {amount_to_deposit} must be greater than 0.') # A node may be setting up multiple channels for the same token # concurrently. Because each deposit changes the user balance this # check must be serialized with the operation locks. # # This check is merely informational, used to avoid sending # transactions which are known to fail. # # It is serialized with the deposit_lock to avoid sending invalid # transactions on-chain (account without balance). The lock # channel_operations_lock is not sufficient, as it allows two # concurrent deposits for different channels. # current_balance = token.balance_of(self.node_address) if current_balance < amount_to_deposit: raise ValueError( f'deposit {amount_to_deposit} can not be larger than the ' f'available balance {current_balance}, ' f'for token at address {pex(token_address)}', ) # If there are channels being set up concurrenlty either the # allowance must be accumulated *or* the calls to `approve` and # `setTotalDeposit` must be serialized. This is necessary otherwise # the deposit will fail. # # Calls to approve and setTotalDeposit are serialized with the # deposit_lock to avoid transaction failure, because with two # concurrent deposits, we may have the transactions executed in the # following order # # - approve # - approve # - setTotalDeposit # - setTotalDeposit # # in which case the second `approve` will overwrite the first, # and the first `setTotalDeposit` will consume the allowance, # making the second deposit fail. token.approve(self.address, amount_to_deposit) log_details = { 'token_network': pex(self.address), 'node': pex(self.node_address), 'partner': pex(partner), 'total_deposit': total_deposit, 'amount_to_deposit': amount_to_deposit, 'id': id(self), } log.info('deposit called', **log_details) transaction_hash = self.proxy.transact( 'setTotalDeposit', self.node_address, total_deposit, partner, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: if token.allowance(self.node_address, self.address) < amount_to_deposit: log_msg = ( 'deposit failed. The allowance is insufficient, check concurrent deposits ' 'for the same token network but different proxies.') elif token.balance_of(self.node_address) < amount_to_deposit: log_msg = 'deposit failed. The address doesnt have funds' else: log_msg = 'deposit failed' log.critical(log_msg, **log_details) channel_opened = self.channel_is_opened( self.node_address, partner) if channel_opened is False: raise ChannelIncorrectStateError( 'Channel is not in an opened state. A deposit cannot be made', ) raise TransactionThrew('Deposit', receipt_or_none) log.info('deposit successful', **log_details)
def start(self): log.debug('Alarm task started', node=pex(self.chain.node_address)) self._stop_event = AsyncResult() super().start()
def settle( self, channel_identifier: typing.ChannelID, transferred_amount: int, locked_amount: int, locksroot: typing.Locksroot, partner: typing.Address, partner_transferred_amount: int, partner_locked_amount: int, partner_locksroot: typing.Locksroot, ): """ Settle the channel. Raises: ChannelBusyError: If the channel is busy with another operation """ log_details = { 'token_network': pex(self.address), 'node': pex(self.node_address), 'partner': pex(partner), 'transferred_amount': transferred_amount, 'locked_amount': locked_amount, 'locksroot': encode_hex(locksroot), 'partner_transferred_amount': partner_transferred_amount, 'partner_locked_amount': partner_locked_amount, 'partner_locksroot': encode_hex(partner_locksroot), } log.info('settle called', **log_details) self._check_for_outdated_channel( self.node_address, partner, channel_identifier, ) with self.channel_operations_lock[partner]: if self._verify_settle_state( transferred_amount, locked_amount, locksroot, partner, partner_transferred_amount, partner_locked_amount, partner_locksroot, ) is False: raise ChannelIncorrectStateError( 'local state can not be used to call settle') our_maximum = transferred_amount + locked_amount partner_maximum = partner_transferred_amount + partner_locked_amount # The second participant transferred + locked amount must be higher our_bp_is_larger = our_maximum > partner_maximum if our_bp_is_larger: transaction_hash = self.proxy.transact( 'settleChannel', partner, partner_transferred_amount, partner_locked_amount, partner_locksroot, self.node_address, transferred_amount, locked_amount, locksroot, ) else: transaction_hash = self.proxy.transact( 'settleChannel', self.node_address, transferred_amount, locked_amount, locksroot, partner, partner_transferred_amount, partner_locked_amount, partner_locksroot, ) self.client.poll(transaction_hash) receipt_or_none = check_transaction_threw(self.client, transaction_hash) if receipt_or_none: channel_exists = self.channel_exists(self.node_address, partner) if not channel_exists: log.info('settle failed, channel already settled', **log_details) raise ChannelIncorrectStateError( 'Channel already settled or non-existent', ) channel_closed = self.channel_is_closed( self.node_address, partner) if channel_closed is False: log.info('settle failed, channel is not closed', **log_details) raise ChannelIncorrectStateError( 'Channel is not in a closed state. It cannot be settled', ) log.info('settle failed', **log_details) raise TransactionThrew('Settle', receipt_or_none) log.info('settle successful', **log_details)
def __repr__(self): return '<PaymentNetworkState id:{}>'.format(pex(self.address))
def test_transfer(raiden_network): app0, app1 = raiden_network # pylint: disable=unbalanced-tuple-unpacking messages = setup_messages_cb() mlogger = MessageLogger() a0_address = pex(app0.raiden.address) a1_address = pex(app1.raiden.address) asset_manager0 = app0.raiden.managers_by_asset_address.values()[0] asset_manager1 = app1.raiden.managers_by_asset_address.values()[0] channel0 = asset_manager0.partneraddress_channel[app1.raiden.address] channel1 = asset_manager1.partneraddress_channel[app0.raiden.address] balance0 = channel0.balance balance1 = channel1.balance assert asset_manager0.asset_address == asset_manager1.asset_address assert app1.raiden.address in asset_manager0.partneraddress_channel amount = 10 app0.raiden.api.transfer( asset_manager0.asset_address, amount, target=app1.raiden.address, ) gevent.sleep(1) assert_synched_channels(channel0, balance0 - amount, [], channel1, balance1 + amount, []) assert len(messages) == 2 # DirectTransfer, Ack directtransfer_message = decode(messages[0]) assert isinstance(directtransfer_message, DirectTransfer) assert directtransfer_message.transfered_amount == amount ack_message = decode(messages[1]) assert isinstance(ack_message, Ack) assert ack_message.echo == directtransfer_message.hash a0_messages = mlogger.get_node_messages(a0_address) assert len(a0_messages) == 2 assert isinstance(a0_messages[0], DirectTransfer) assert isinstance(a0_messages[1], Ack) a0_sent_messages = mlogger.get_node_messages(a0_address, only='sent') assert len(a0_sent_messages) == 1 assert isinstance(a0_sent_messages[0], DirectTransfer) a0_recv_messages = mlogger.get_node_messages(a0_address, only='recv') assert len(a0_recv_messages) == 1 assert isinstance(a0_recv_messages[0], Ack) a1_messages = mlogger.get_node_messages(a1_address) assert len(a1_messages) == 2 assert isinstance(a1_messages[0], Ack) assert isinstance(a1_messages[1], DirectTransfer) a1_sent_messages = mlogger.get_node_messages(a1_address, only='sent') assert len(a1_sent_messages) == 1 assert isinstance(a1_sent_messages[0], Ack) a1_recv_messages = mlogger.get_node_messages(a1_address, only='recv') assert len(a1_recv_messages) == 1 assert isinstance(a1_recv_messages[0], DirectTransfer)
def __repr__(self): return '<ReceiveSecretReveal secrethash:{} sender:{}>'.format( pex(self.secrethash), pex(self.sender), )
def __repr__(self): return '<TokenNetworkState id:{} token:{}>'.format( pex(self.address), pex(self.token_address), )
def on_transfer(self, transfer): """ This handles the echo logic, as described in https://github.com/raiden-network/raiden/issues/651: - for transfers with an amount that satisfies `amount % 3 == 0`, it sends a transfer with an amount of `amount - 1` back to the initiator - for transfers with a "lucky number" amount `amount == 7` it does not send anything back immediately -- after having received "lucky number transfers" from 7 different addresses it sends a transfer with `amount = 49` to one randomly chosen one (from the 7 lucky addresses) - consecutive entries to the lucky lottery will receive the current pool size as the `echo_amount` - for all other transfers it sends a transfer with the same `amount` back to the initiator """ echo_amount = 0 if transfer['amount'] % 3 == 0: log.debug('minus one transfer received', initiator=pex(transfer['initiator']), amount=transfer['amount'], identifier=transfer['identifier']) echo_amount = transfer['amount'] - 1 elif transfer['amount'] == 7: log.debug( 'lucky number transfer received', initiator=pex(transfer['initiator']), amount=transfer['amount'], identifier=transfer['identifier'], poolsize=self.lottery_pool.qsize(), ) # obtain a local copy of the pool pool = self.lottery_pool.copy() tickets = [pool.get() for _ in range(pool.qsize())] assert pool.empty() del pool if any(ticket['initiator'] == transfer['initiator'] for ticket in tickets): assert transfer not in tickets log.debug( 'duplicate lottery entry', initiator=pex(transfer['initiator']), identifier=transfer['identifier'], poolsize=len(tickets), ) # signal the poolsize to the participant echo_amount = len(tickets) # payout elif len(tickets) == 6: log.info('payout!') # reset the pool assert self.lottery_pool.qsize() == 6 self.lottery_pool = Queue() # add new participant tickets.append(transfer) # choose the winner transfer = random.choice(tickets) echo_amount = 49 else: self.lottery_pool.put(transfer) else: log.debug('echo transfer received', initiator=pex(transfer['initiator']), amount=transfer['amount'], identifier=transfer['identifier']) echo_amount = transfer['amount'] if echo_amount: log.debug('sending echo transfer', target=pex(transfer['initiator']), amount=echo_amount, orig_identifier=transfer['identifier'], echo_identifier=transfer['identifier'] + echo_amount, token_address=pex(self.token_address), num_handled_transfers=self.num_handled_transfers + 1) self.api.transfer_and_wait(self.token_address, echo_amount, transfer['initiator'], identifier=transfer['identifier'] + echo_amount) self.num_handled_transfers += 1
def __init__( self, web3: Web3, privkey: bytes, gas_price_strategy: Callable = rpc_gas_price_strategy, gas_estimate_correction: Callable = lambda gas: gas, block_num_confirmations: int = 0, uses_infura=False, ): if privkey is None or len(privkey) != 32: raise ValueError('Invalid private key') if block_num_confirmations < 0: raise ValueError('Number of confirmations has to be positive', ) monkey_patch_web3(web3, gas_price_strategy) try: version = web3.version.node except ConnectTimeout: raise EthNodeCommunicationError('couldnt reach the ethereum node') _, eth_node = is_supported_client(version) address = privatekey_to_address(privkey) address_checksumed = to_checksum_address(address) if uses_infura: warnings.warn( 'Infura does not provide an API to ' 'recover the latest used nonce. This may cause the Raiden node ' 'to error on restarts.\n' 'The error will manifest while there is a pending transaction ' 'from a previous execution in the Ethereum\'s client pool. When ' 'Raiden restarts the same transaction with the same nonce will ' 'be retried and *rejected*, because the nonce is already used.', ) # The first valid nonce is 0, therefore the count is already the next # available nonce available_nonce = web3.eth.getTransactionCount( address_checksumed, 'pending') elif eth_node == constants.EthClient.PARITY: parity_assert_rpc_interfaces(web3) available_nonce = parity_discover_next_available_nonce( web3, address_checksumed, ) elif eth_node == constants.EthClient.GETH: geth_assert_rpc_interfaces(web3) available_nonce = geth_discover_next_available_nonce( web3, address_checksumed, ) else: raise EthNodeInterfaceError( f'Unsupported Ethereum client {version}') self.eth_node = eth_node self.privkey = privkey self.address = address self.web3 = web3 self.default_block_num_confirmations = block_num_confirmations self._available_nonce = available_nonce self._nonce_lock = Semaphore() self._gas_estimate_correction = gas_estimate_correction log.debug( 'JSONRPCClient created', node=pex(self.address), available_nonce=available_nonce, client=version, )
def register_channel(self, netting_channel, reveal_timeout): """ Register a new channel. Args: netting_channel (network.rpc.client.NettingChannel): The netting channel proxy. reveal_timeout (int): Minimum number of blocks required by this node to see a secret. Raises: ValueError: If raiden.address is not one of the participants in the netting channel. """ translator = ContractTranslator(NETTING_CHANNEL_ABI) # race condition: # - if the filter is installed after a deposit is made it could be # missed, to avoid that we first install the filter, then request the # state from the node and then poll the filter. # - with the above strategy the same deposit could be handled twice, # once from the status received from the netting contract and once from # the event, to avoid problems the we use the balance instead of the # deposit is used. task_name = 'ChannelNewBalance {}'.format(pex(netting_channel.address)) newbalance = netting_channel.channelnewbalance_filter() newbalance_listener = LogListenerTask( task_name, newbalance, self.raiden.on_event, translator, ) task_name = 'ChannelSecretRevelead {}'.format( pex(netting_channel.address)) secretrevealed = netting_channel.channelsecretrevealed_filter() secretrevealed_listener = LogListenerTask( task_name, secretrevealed, self.raiden.on_event, translator, ) task_name = 'ChannelClosed {}'.format(pex(netting_channel.address)) close = netting_channel.channelclosed_filter() close_listener = LogListenerTask( task_name, close, self.raiden.on_event, translator, ) task_name = 'ChannelSettled {}'.format(pex(netting_channel.address)) settled = netting_channel.channelsettled_filter() settled_listener = LogListenerTask( task_name, settled, self.raiden.on_event, translator, ) channel_details = netting_channel.detail(self.raiden.address) our_state = ChannelEndState( channel_details['our_address'], channel_details['our_balance'], ) partner_state = ChannelEndState( channel_details['partner_address'], channel_details['partner_balance'], ) external_state = ChannelExternalState( self.raiden.alarm.register_callback, self.register_channel_for_hashlock, self.raiden.chain.block_number, netting_channel, ) channel = Channel( our_state, partner_state, external_state, self.asset_address, reveal_timeout, channel_details['settle_timeout'], ) self.partneraddress_channel[partner_state.address] = channel self.address_channel[netting_channel.address] = channel self.channelgraph.add_path( channel_details['our_address'], channel_details['partner_address'], ) newbalance_listener.start() secretrevealed_listener.start() close_listener.start() settled_listener.start() self.raiden.event_listeners.append(newbalance_listener) self.raiden.event_listeners.append(secretrevealed_listener) self.raiden.event_listeners.append(close_listener) self.raiden.event_listeners.append(settled_listener)
def __repr__(self): return '<RouteState hop:{node} channel:{channel}>'.format( node=pex(self.node_address), channel=pex(self.channel_identifier), )
def __repr__(self): return '<HashTimeLockState amount:{} expiration:{} secrethash:{}>'.format( self.amount, self.expiration, pex(self.secrethash), )
def start(self): """ Start the node synchronously. Raises directly if anything went wrong on startup """ if not self.stop_event.ready(): raise RuntimeError(f'{self!r} already started') self.stop_event.clear() if self.database_dir is not None: self.db_lock.acquire(timeout=0) assert self.db_lock.is_locked # start the registration early to speed up the start if self.config['transport_type'] == 'udp': endpoint_registration_greenlet = gevent.spawn( self.discovery.register, self.address, self.config['transport']['udp']['external_ip'], self.config['transport']['udp']['external_port'], ) self.maybe_upgrade_db() storage = sqlite.SerializedSQLiteStorage( database_path=self.database_path, serializer=serialize.JSONSerializer(), ) storage.log_run() self.wal = wal.restore_to_state_change( transition_function=node.state_transition, storage=storage, state_change_identifier='latest', ) if self.wal.state_manager.current_state is None: log.debug( 'No recoverable state available, created inital state', node=pex(self.address), ) # On first run Raiden needs to fetch all events for the payment # network, to reconstruct all token network graphs and find opened # channels last_log_block_number = self.query_start_block state_change = ActionInitChain( random.Random(), last_log_block_number, self.chain.node_address, self.chain.network_id, ) self.handle_state_change(state_change) payment_network = PaymentNetworkState( self.default_registry.address, [], # empty list of token network states as it's the node's startup ) state_change = ContractReceiveNewPaymentNetwork( constants.EMPTY_HASH, payment_network, last_log_block_number, ) self.handle_state_change(state_change) else: # The `Block` state change is dispatched only after all the events # for that given block have been processed, filters can be safely # installed starting from this position without losing events. last_log_block_number = views.block_number(self.wal.state_manager.current_state) log.debug( 'Restored state from WAL', last_restored_block=last_log_block_number, node=pex(self.address), ) known_networks = views.get_payment_network_identifiers(views.state_from_raiden(self)) if known_networks and self.default_registry.address not in known_networks: configured_registry = pex(self.default_registry.address) known_registries = lpex(known_networks) raise RuntimeError( f'Token network address mismatch.\n' f'Raiden is configured to use the smart contract ' f'{configured_registry}, which conflicts with the current known ' f'smart contracts {known_registries}', ) # Restore the current snapshot group state_change_qty = self.wal.storage.count_state_changes() self.snapshot_group = state_change_qty // SNAPSHOT_STATE_CHANGES_COUNT # Install the filters using the correct from_block value, otherwise # blockchain logs can be lost. self.install_all_blockchain_filters( self.default_registry, self.default_secret_registry, last_log_block_number, ) # Complete the first_run of the alarm task and synchronize with the # blockchain since the last run. # # Notes about setup order: # - The filters must be polled after the node state has been primed, # otherwise the state changes won't have effect. # - The alarm must complete its first run before the transport is started, # to reject messages for closed/settled channels. self.alarm.register_callback(self._callback_new_block) with self.dispatch_events_lock: self.alarm.first_run(last_log_block_number) chain_state = views.state_from_raiden(self) self._initialize_transactions_queues(chain_state) self._initialize_whitelists(chain_state) # send messages in queue before starting transport, # this is necessary to avoid a race where, if the transport is started # before the messages are queued, actions triggered by it can cause new # messages to be enqueued before these older ones self._initialize_messages_queues(chain_state) # The transport must not ever be started before the alarm task's # `first_run()` has been, because it's this method which synchronizes the # node with the blockchain, including the channel's state (if the channel # is closed on-chain new messages must be rejected, which will not be the # case if the node is not synchronized) self.transport.start( raiden_service=self, message_handler=self.message_handler, prev_auth_data=chain_state.last_transport_authdata, ) # First run has been called above! self.alarm.start() # exceptions on these subtasks should crash the app and bubble up self.alarm.link_exception(self.on_error) self.transport.link_exception(self.on_error) # Health check needs the transport layer self.start_neighbours_healthcheck(chain_state) if self.config['transport_type'] == 'udp': endpoint_registration_greenlet.get() # re-raise if exception occurred log.debug('Raiden Service started', node=pex(self.address)) super().start()
def __repr__(self): return '<ActionChangeNodeNetworkState node:{} state:{}>'.format( pex(self.node_address), self.network_state, )
def healthcheck( transport: 'UDPTransport', recipient: Address, stop_event: Event, event_healthy: Event, event_unhealthy: Event, nat_keepalive_retries: int, nat_keepalive_timeout: int, nat_invitation_timeout: int, ping_nonce: Dict[str, int], ): """ Sends a periodical Ping to `recipient` to check its health. """ # pylint: disable=too-many-branches log.debug( 'starting healthcheck for', node=pex(transport.address), to=pex(recipient), ) # The state of the node is unknown, the events are set to allow the tasks # to do work. last_state = NODE_NETWORK_UNKNOWN transport.set_node_network_state( recipient, last_state, ) # Always call `clear` before `set`, since only `set` does context-switches # it's easier to reason about tasks that are waiting on both events. # Wait for the end-point registration or for the node to quit try: transport.get_host_port(recipient) except UnknownAddress: log.debug( 'waiting for endpoint registration', node=pex(transport.address), to=pex(recipient), ) event_healthy.clear() event_unhealthy.set() backoff = udp_utils.timeout_exponential_backoff( nat_keepalive_retries, nat_keepalive_timeout, nat_invitation_timeout, ) sleep = next(backoff) while not stop_event.wait(sleep): try: transport.get_host_port(recipient) except UnknownAddress: sleep = next(backoff) else: break # Don't wait to send the first Ping and to start sending messages if the # endpoint is known sleep = 0 event_unhealthy.clear() event_healthy.set() while not stop_event.wait(sleep): sleep = nat_keepalive_timeout ping_nonce['nonce'] += 1 messagedata = transport.get_ping(ping_nonce['nonce']) message_id = ('ping', ping_nonce['nonce'], recipient) # Send Ping a few times before setting the node as unreachable acknowledged = udp_utils.retry( transport, messagedata, message_id, recipient, stop_event, [nat_keepalive_timeout] * nat_keepalive_retries, ) if stop_event.is_set(): return if not acknowledged: log.debug( 'node is unresponsive', node=pex(transport.address), to=pex(recipient), current_state=last_state, new_state=NODE_NETWORK_UNREACHABLE, retries=nat_keepalive_retries, timeout=nat_keepalive_timeout, ) # The node is not healthy, clear the event to stop all queue # tasks last_state = NODE_NETWORK_UNREACHABLE transport.set_node_network_state( recipient, last_state, ) event_healthy.clear() event_unhealthy.set() # Retry until recovery, used for: # - Checking node status. # - Nat punching. acknowledged = udp_utils.retry( transport, messagedata, message_id, recipient, stop_event, repeat(nat_invitation_timeout), ) if acknowledged: current_state = views.get_node_network_status( views.state_from_raiden(transport.raiden), recipient, ) if last_state != NODE_NETWORK_REACHABLE: log.debug( 'node answered', node=pex(transport.raiden.address), to=pex(recipient), current_state=current_state, new_state=NODE_NETWORK_REACHABLE, ) last_state = NODE_NETWORK_REACHABLE transport.set_node_network_state( recipient, last_state, ) event_unhealthy.clear() event_healthy.set()
def __repr__(self): return '<{} {}>'.format(self.__class__.__name__, pex(self.address))
def __repr__(self): return '<ActionInitChain block_number:{} block_hash:{} chain_id:{}>'.format( self.block_number, pex(self.block_hash), self.chain_id, )
def __repr__(self): return '<ReceiveUnlock msgid:{} secrethash:{} balance_proof:{}>'.format( self.message_identifier, pex(self.secrethash), self.balance_proof, )
def __repr__(self): return '<ReceiveProcessed msgid:{} sender:{}>'.format( self.message_identifier, pex(self.sender), )
def __repr__(self): return '<ActionNewTokenNetwork network:{} token:{}>'.format( pex(self.payment_network_identifier), self.token_network, )
def __repr__(self): return '<SendRevealSecret msgid:{} secrethash:{} recipient:{}>'.format( self.message_identifier, pex(self.secrethash), pex(self.recipient), )
def __repr__(self): return '<SendLockedTransfer msgid:{} transfer:{} recipient:{}>'.format( self.message_identifier, self.transfer, pex(self.recipient), )
def __repr__(self): return '<EventWithdrawFailed id:{} secrethash:{} reason:{}>'.format( self.identifier, pex(self.secrethash), self.reason, )
def __repr__(self): return '<ContractReceiveRouteClosed token_network:{} id:{} block:{}>'.format( pex(self.token_network_identifier), self.channel_identifier, self.block_number, )
def __repr__(self): return '<NettingChannelEndState address:{} contract_balance:{} merkletree:{}>'.format( pex(self.address), self.contract_balance, self.merkletree, )
def __repr__(self): return '<TransactionChannelNewBalance participant:{} balance:{} at_block:{}>'.format( pex(self.participant_address), self.contract_balance, self.deposit_block_number, )
def __repr__(self): return '<MerkleTreeState root:{}>'.format( pex(merkleroot(self)), )
def __repr__(self): return '<EventWithdrawSuccess id:{} secrethash:{}>'.format( self.identifier, pex(self.secrethash), )