def test_new_channel_state(private_keys, tester_chain, tester_channelmanager): """ Tests the state of a newly created netting channel. """ pkey0, pkey1 = private_keys events = list() settle_timeout = 10 channel = new_nettingcontract( pkey0, pkey1, tester_chain, events.append, tester_channelmanager, settle_timeout, ) # pylint: disable=no-member assert channel.settleTimeout(sender=pkey0) == settle_timeout assert channel.tokenAddress(sender=pkey0) == tester_channelmanager.tokenAddress(sender=pkey0) assert channel.opened(sender=pkey0) == tester_chain.block.number - 1 assert channel.closed(sender=pkey0) == 0 address_and_balances = channel.addressAndBalance(sender=pkey0) address0 = privatekey_to_address(pkey0) address1 = privatekey_to_address(pkey1) assert address_and_balances[0] == address_encoder(address0) assert address_and_balances[1] == 0 assert address_and_balances[2] == address_encoder(address1) assert address_and_balances[3] == 0
def test_channelnew_event( settle_timeout, tester_channelmanager, private_keys, tester_events): """ When a new channel is created the channel new event must be emitted. """ pkey0 = private_keys[0] address0 = privatekey_to_address(pkey0) address1 = privatekey_to_address(private_keys[1]) # pylint: disable=no-member netting_channel_address1_hex = tester_channelmanager.newChannel( address1, settle_timeout, sender=pkey0, ) last_event = event_decoder(tester_events[-1], tester_channelmanager.translator) assert last_event == { '_event_type': b'ChannelNew', 'netting_channel': netting_channel_address1_hex, 'participant1': address_encoder(address0), 'participant2': address_encoder(address1), 'settle_timeout': settle_timeout, }
def geth_bare_genesis(genesis_path, private_keys, random_marker): """Writes a bare genesis to `genesis_path`. Args: genesis_path (str): the path in which the genesis block is written. private_keys list(str): iterable list of privatekeys whose corresponding accounts will have a premined balance available. """ account_addresses = [ privatekey_to_address(key) for key in sorted(set(private_keys)) ] alloc = { address_encoder(address): { 'balance': DEFAULT_BALANCE_BIN, } for address in account_addresses } genesis = GENESIS_STUB.copy() genesis['alloc'].update(alloc) genesis['config']['clique'] = {'period': 1, 'epoch': 30000} genesis['extraData'] = clique_extradata( random_marker, address_encoder(account_addresses[0])[2:], ) with open(genesis_path, 'w') as handler: json.dump(genesis, handler)
def _check_exists(self): result = self.client.call( 'eth_getCode', address_encoder(self.address), 'latest', ) if result == '0x': raise AddressWithoutCode( 'Netting channel address {} does not contain code'.format( address_encoder(self.address), ))
def nonce(self, address): if len(address) == 40: address = unhexlify(address) with self.nonce_lock: initialized = self.nonce_current_value is not None query_time = now() if self.nonce_last_update > query_time: # Python's 2.7 time is not monotonic and it's affected by clock # resets, force an update. self.nonce_update_interval = query_time - self.nonce_update_interval needs_update = True else: last_update_interval = query_time - self.nonce_last_update needs_update = last_update_interval > self.nonce_update_interval if initialized and not needs_update: self.nonce_current_value += 1 return self.nonce_current_value pending_transactions_hex = self.call( 'eth_getTransactionCount', address_encoder(address), 'pending', ) pending_transactions = quantity_decoder(pending_transactions_hex) nonce = pending_transactions + self.nonce_offset # we may have hammered the server and not all tx are # registered as `pending` yet if initialized: while nonce < self.nonce_current_value: log.debug( 'nonce on server too low; retrying', server=nonce, local=self.nonce_current_value, ) query_time = now() pending_transactions_hex = self.call( 'eth_getTransactionCount', address_encoder(address), 'pending', ) pending_transactions = quantity_decoder( pending_transactions_hex) nonce = pending_transactions + self.nonce_offset self.nonce_current_value = nonce self.nonce_last_update = query_time return self.nonce_current_value
def test_registry(tester_registry, tester_events, private_keys, tester_chain): privatekey0 = tester.k0 token_address1 = tester_token_address(private_keys, 100, tester_chain, 0) token_address2 = tester_token_address(private_keys, 100, tester_chain, 1) unregistered_address = tester_token_address(private_keys, 100, tester_chain, 2) tester_chain.head_state.log_listeners.append(tester_events.append) contract_address1 = tester_registry.addToken(token_address1, sender=privatekey0) channel_manager_address1 = tester_registry.channelManagerByToken( token_address1, sender=privatekey0, ) assert channel_manager_address1 == contract_address1 with pytest.raises(tester.TransactionFailed): tester_registry.addToken(token_address1, sender=privatekey0) contract_address2 = tester_registry.addToken(token_address2, sender=privatekey0) channel_manager_address2 = tester_registry.channelManagerByToken( token_address2, sender=privatekey0, ) assert channel_manager_address2 == contract_address2 with pytest.raises(tester.TransactionFailed): tester_registry.channelManagerByToken( unregistered_address, sender=privatekey0, ) addresses = tester_registry.tokenAddresses(sender=privatekey0) assert len(addresses) == 2 assert addresses[0] == address_encoder(token_address1) assert addresses[1] == address_encoder(token_address2) assert len(tester_events) == 2 event0 = event_decoder(tester_events[0], tester_registry.translator) event1 = event_decoder(tester_events[1], tester_registry.translator) assert event0['_event_type'] == b'TokenAdded' assert event0['token_address'] == address_encoder(token_address1) assert event0['channel_manager_address'] == contract_address1 assert event1['_event_type'] == b'TokenAdded' assert event1['token_address'] == address_encoder(token_address2) assert event1['channel_manager_address'] == contract_address2
def nonce(self, address): if len(address) == 40: address = unhexlify(address) with self.nonce_lock: initialized = self.nonce_current_value is not None query_time = now() if self.nonce_last_update > query_time: # Python's 2.7 time is not monotonic and it's affected by clock # resets, force an update. self.nonce_update_interval = query_time - self.nonce_update_interval needs_update = True else: last_update_interval = query_time - self.nonce_last_update needs_update = last_update_interval > self.nonce_update_interval if initialized and not needs_update: self.nonce_current_value += 1 return self.nonce_current_value pending_transactions_hex = self.call( 'eth_getTransactionCount', address_encoder(address), 'pending', ) pending_transactions = quantity_decoder(pending_transactions_hex) nonce = pending_transactions + self.nonce_offset # we may have hammered the server and not all tx are # registered as `pending` yet if initialized: while nonce < self.nonce_current_value: log.debug( 'nonce on server too low; retrying', server=nonce, local=self.nonce_current_value, ) query_time = now() pending_transactions_hex = self.call( 'eth_getTransactionCount', address_encoder(address), 'pending', ) pending_transactions = quantity_decoder(pending_transactions_hex) nonce = pending_transactions + self.nonce_offset self.nonce_current_value = nonce self.nonce_last_update = query_time return self.nonce_current_value
def test_deposit(private_keys, tester_channelmanager, tester_chain, tester_token): """ A call to deposit must increase the available token amount in the netting channel. """ pkey0 = private_keys[0] pkey1 = private_keys[1] address0 = address_encoder(privatekey_to_address(pkey0)) address1 = address_encoder(privatekey_to_address(pkey1)) settle_timeout = 10 events = list() # not using the tester_nettingcontracts fixture because it has a set balance channel = new_nettingcontract( pkey0, pkey1, tester_chain, events.append, tester_channelmanager, settle_timeout, ) deposit = 100 # cannot deposit without approving assert channel.deposit(deposit, sender=pkey0) is False assert tester_token.approve(channel.address, deposit, sender=pkey0) is True # cannot deposit negative values with pytest.raises(abi.ValueOutOfBounds): channel.deposit(-1, sender=pkey0) zero_state = (address0, 0, address1, 0) assert tuple(channel.addressAndBalance(sender=pkey0)) == zero_state assert channel.deposit(deposit, sender=pkey0) is True deposit_state = (address0, deposit, address1, 0) assert tuple(channel.addressAndBalance(sender=pkey0)) == deposit_state assert tester_token.balanceOf(channel.address, sender=pkey0) == deposit # cannot over deposit (the allowance is depleted) assert channel.deposit(deposit, sender=pkey0) is False assert tester_token.approve(channel.address, deposit, sender=pkey0) is True assert channel.deposit(deposit, sender=pkey0) is True second_deposit_state = (address0, deposit * 2, address1, 0) assert tuple( channel.addressAndBalance(sender=pkey0)) == second_deposit_state
def test_deposit(private_keys, tester_channelmanager, tester_chain, tester_token): """ A call to deposit must increase the available token amount in the netting channel. """ pkey0 = private_keys[0] pkey1 = private_keys[1] address0 = address_encoder(privatekey_to_address(pkey0)) address1 = address_encoder(privatekey_to_address(pkey1)) settle_timeout = 10 events = list() # not using the tester_nettingcontracts fixture because it has a set balance channel = new_nettingcontract( pkey0, pkey1, tester_chain, events.append, tester_channelmanager, settle_timeout, ) deposit = 100 # cannot deposit without approving assert channel.deposit(deposit, sender=pkey0) is False assert tester_token.approve(channel.address, deposit, sender=pkey0) is True # cannot deposit negative values with pytest.raises(abi.ValueOutOfBounds): channel.deposit(-1, sender=pkey0) zero_state = (address0, 0, address1, 0) assert tuple(channel.addressAndBalance(sender=pkey0)) == zero_state assert channel.deposit(deposit, sender=pkey0) is True deposit_state = (address0, deposit, address1, 0) assert tuple(channel.addressAndBalance(sender=pkey0)) == deposit_state assert tester_token.balanceOf(channel.address, sender=pkey0) == deposit # cannot over deposit (the allowance is depleted) assert channel.deposit(deposit, sender=pkey0) is False assert tester_token.approve(channel.address, deposit, sender=pkey0) is True assert channel.deposit(deposit, sender=pkey0) is True second_deposit_state = (address0, deposit * 2, address1, 0) assert tuple(channel.addressAndBalance(sender=pkey0)) == second_deposit_state
def to_dict(self): return { 'type': self.__class__.__name__, 'message_identifier': self.message_identifier, 'payment_identifier': self.payment_identifier, 'secret': data_encoder(self.secret), 'nonce': self.nonce, 'token_network_address': address_encoder(self.token_network_address), 'channel': address_encoder(self.channel), 'transferred_amount': self.transferred_amount, 'locked_amount': self.locked_amount, 'locksroot': data_encoder(self.locksroot), 'signature': data_encoder(self.signature), }
def test_api_open_close_and_settle_channel(api_backend, token_addresses, reveal_timeout): # let's create a new channel partner_address = '0x61c808d82a3ac53231750dadc13c777b59310bd9' token_address = token_addresses[0] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': address_encoder(token_address), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for(api_backend, 'channelsresource'), json=channel_data_obj ) response = request.send().response balance = 0 assert_proper_response(response, status_code=HTTPStatus.CREATED) response = response.json() expected_response = channel_data_obj expected_response['balance'] = balance expected_response['state'] = CHANNEL_STATE_OPENED expected_response['reveal_timeout'] = reveal_timeout # can't know the channel address beforehand but make sure we get one assert 'channel_address' in response channel_address = response['channel_address'] expected_response['channel_address'] = response['channel_address'] assert response == expected_response # let's close the channel request = grequests.patch( api_url_for( api_backend, 'channelsresourcebychanneladdress', channel_address=channel_address ), json={'state': CHANNEL_STATE_CLOSED} ) response = request.send().response assert_proper_response(response) expected_response = { 'channel_address': channel_address, 'partner_address': partner_address, 'token_address': address_encoder(token_address), 'settle_timeout': settle_timeout, 'reveal_timeout': reveal_timeout, 'state': CHANNEL_STATE_CLOSED, 'balance': balance } assert response.json() == expected_response
def test_for_issue_892( private_keys, settle_timeout, tester_channelmanager, tester_chain, tester_events): """ This is a regression test for issue #892 (https://github.com/raiden-network/raiden/issues/892) where the `getChannelsParticipants()` call was returning an empty list if one channel from the channel manager has been settled """ pairs = itertools.combinations(private_keys, 2) participant_pairs = [] first_pair = True for pkey0, pkey1 in pairs: address0 = privatekey_to_address(pkey0) address1 = privatekey_to_address(pkey1) channel_address_hex = tester_channelmanager.newChannel( address1, settle_timeout, sender=pkey0, ) tester_chain.mine() assert tester_channelmanager.getChannelWith(address1, sender=pkey0) == channel_address_hex assert tester_channelmanager.getChannelWith(address0, sender=pkey1) == channel_address_hex if first_pair: first_pair = False nettingchannel = create_nettingchannel_proxy( tester_chain, channel_address_hex, tester_events.append, ) nettingchannel.close(sender=pkey0) tester_chain.mine(number_of_blocks=settle_timeout + 2) nettingchannel.settle(sender=pkey1) else: # this is brittle, relying on an implicit ordering of addresses participant_pairs.extend(( address_encoder(address0), address_encoder(address1), )) assert participant_pairs == tester_channelmanager.getChannelsParticipants(sender=pkey0)
def test_api_tokens(api_backend, blockchain_services, token_addresses): partner_address = '0x61c808d82a3ac53231750dadc13c777b59310bd9' token_address1 = token_addresses[0] token_address2 = token_addresses[1] settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': address_encoder(token_address1), 'settle_timeout': settle_timeout, } request = grequests.put( api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj, ) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) partner_address = '0x61c808d82a3ac53231750dadc13c777b59310bd9' settle_timeout = 1650 channel_data_obj = { 'partner_address': partner_address, 'token_address': address_encoder(token_address2), 'settle_timeout': settle_timeout, } request = grequests.put(api_url_for( api_backend, 'channelsresource', ), json=channel_data_obj) response = request.send().response assert_proper_response(response, HTTPStatus.CREATED) # and now let's get the token list request = grequests.get(api_url_for( api_backend, 'tokensresource', )) response = request.send().response assert_proper_response(response) response = response.json() expected_response = [ address_encoder(token_address1), address_encoder(token_address2), ] assert set(response) == set(expected_response)
def normalize_events_list(old_list): """Internally the `event_type` key is prefixed with underscore but the API returns an object without that prefix""" new_list = [] for _event in old_list: new_event = dict(_event) new_event['event_type'] = new_event.pop('_event_type') # Some of the raiden events contain accounts and as such need to # be exported in hex to the outside world if new_event['event_type'] == 'EventTransferReceivedSuccess': new_event['initiator'] = address_encoder(new_event['initiator'])[2:] if new_event['event_type'] == 'EventTransferSentSuccess': new_event['target'] = address_encoder(new_event['target'])[2:] new_list.append(new_event) return new_list
def _handle_message(self, room, event): """ Handle text messages sent to listening rooms """ if event['type'] != 'm.room.message' or event['content'][ 'msgtype'] != 'm.text': # Ignore non-messages and non-text messages return sender_id = event['sender'] if sender_id == self._client.user_id: # Ignore our own messages return user = self._client.get_user(sender_id) peer_address = self._userids_to_address.get(sender_id) if not peer_address: try: # recover displayname signature peer_address = signing.recover_address( sender_id.encode(), signature=data_decoder(user.get_display_name()), hasher=eth_sign_sha3) except AssertionError: log.warning('INVALID MESSAGE', sender_id=sender_id) return node_address_hex = address_encoder(peer_address) if node_address_hex.lower() not in sender_id: log.warning('INVALID SIGNATURE', peer_address=node_address_hex, sender_id=sender_id) return self._userids_to_address[sender_id] = peer_address data = event['content']['body'] if data.startswith('0x'): message = message_from_bytes(data_decoder(data)) else: message_dict = json.loads(data) log.trace('MESSAGE_DATA', data=message_dict) message = message_from_dict(message_dict) if isinstance(message, SignedMessage) and not message.sender: # FIXME: This can't be right message.sender = peer_address if isinstance(message, Delivered): self._receive_delivered(message) elif isinstance(message, Ping): log.warning( 'Not required Ping received', message=data, ) elif isinstance(message, SignedMessage): self._receive_message(message) elif log.isEnabledFor(logging.ERROR): log.error( 'Invalid message', message=data, )
def start_health_check(self, node_address): log.debug('HEALTHCHECK', peer_address=pex(node_address)) node_address_hex = address_encoder(node_address) users = [ user for user in self._client.search_user_directory(node_address_hex) if _validate_userid_signature(user) ] existing = { presence['user_id'] for presence in self._client.get_presence_list() } user_ids_to_add = {u.user_id for u in users} user_ids = user_ids_to_add - existing if user_ids: log.debug('Add to presence list', added_users=user_ids) self._client.modify_presence_list(add_user_ids=list(user_ids)) self._address_to_userids.setdefault(node_address, set()).update(user_ids_to_add) # Ensure there is a room for the peer node # We use spawn_later to avoid races if the peer is already expecting us and sent an invite gevent.spawn_later(1, self._get_room_for_address, node_address, allow_missing_peers=True)
def test_transfer_update_event(tester_channels, tester_events): """ The event TransferUpdated is emitted after a successful call to updateTransfer. """ pkey0, pkey1, nettingchannel, channel0, channel1 = tester_channels[0] address1 = privatekey_to_address(pkey1) direct0 = make_direct_transfer_from_channel( channel0, channel1, amount=90, pkey=pkey0, ) nettingchannel.close(sender=pkey0) previous_events = list(tester_events) direct0_hash = sha3(direct0.packed().data[:-65]) nettingchannel.updateTransfer( direct0.nonce, direct0.transferred_amount, direct0.locksroot, direct0_hash, direct0.signature, sender=pkey1, ) assert len(previous_events) + 1 == len(tester_events) last_event = event_decoder(tester_events[-1], nettingchannel.translator) assert last_event == { '_event_type': b'TransferUpdated', 'node_address': address_encoder(address1), }
def __init__( self, jsonrpc_client, discovery_address, startgas, gasprice, poll_timeout=DEFAULT_POLL_TIMEOUT): if not isaddress(discovery_address): raise ValueError('discovery_address must be a valid address') check_address_has_code(jsonrpc_client, discovery_address, 'Discovery') proxy = jsonrpc_client.new_contract_proxy( CONTRACT_MANAGER.get_abi(CONTRACT_ENDPOINT_REGISTRY), address_encoder(discovery_address), ) self.address = discovery_address self.proxy = proxy self.client = jsonrpc_client self.startgas = startgas self.gasprice = gasprice self.poll_timeout = poll_timeout self.not_found_address = '0x' + '0' * 40
def test_close_event(tester_registry_address, tester_nettingcontracts, tester_events): """ The event ChannelClosed is emitted when close is called. """ pkey0, _, nettingchannel = tester_nettingcontracts[0] address = privatekey_to_address(pkey0) previous_events = list(tester_events) nettingchannel.close(sender=pkey0) assert len(previous_events) + 1 == len(tester_events) close_event = event_decoder(tester_events[-1], nettingchannel.translator) assert close_event == { '_event_type': b'ChannelClosed', 'registry_address': address_encoder(tester_registry_address), 'closing_address': address_encoder(address), }
def test_endpointregistry(tester_chain, tester_events): account0 = tester.a0 sender = address_encoder(account0) endpointregistry_path = get_contract_path('EndpointRegistry.sol') endpointregistry_compiled = _solidity.compile_contract( endpointregistry_path, "EndpointRegistry" ) tester_chain.head_state.log_listeners.append(tester_events.append) endpointregistry_address = tester_chain.contract( endpointregistry_compiled['bin'], language='evm' ) endpoint_registry = tester.ABIContract( tester_chain, endpointregistry_compiled['abi'], endpointregistry_address ) endpoint_registry.registerEndpoint('127.0.0.1:4001') assert endpoint_registry.findAddressByEndpoint('127.0.0.1:4001') == sender assert endpoint_registry.findEndpointByAddress(sender) == b'127.0.0.1:4001' endpoint_registry.registerEndpoint('192.168.0.1:4002') assert endpoint_registry.findAddressByEndpoint('192.168.0.1:4002') == sender assert endpoint_registry.findEndpointByAddress(sender) == b'192.168.0.1:4002' assert len(tester_events) == 2 event0 = event_decoder(tester_events[0], endpoint_registry.translator) event1 = event_decoder(tester_events[1], endpoint_registry.translator) assert event0['_event_type'] == b'AddressRegistered' assert event1['_event_type'] == b'AddressRegistered'
def __init__( self, jsonrpc_client, manager_address, poll_timeout=DEFAULT_POLL_TIMEOUT, ): # pylint: disable=too-many-arguments if not isaddress(manager_address): raise InvalidAddress( 'Expected binary address format for token nework') check_address_has_code(jsonrpc_client, manager_address, 'Channel Manager') proxy = jsonrpc_client.new_contract_proxy( CONTRACT_MANAGER.get_abi(CONTRACT_TOKEN_NETWORK), address_encoder(manager_address), ) CONTRACT_MANAGER.check_contract_version( proxy.functions.contract_version().call(), CONTRACT_TOKEN_NETWORK) self.address = manager_address self.proxy = proxy self.client = jsonrpc_client self.node_address = privatekey_to_address(self.client.privkey) self.poll_timeout = poll_timeout # Prevents concurrent deposit, withdraw, close, or settle operations on the same channel self.channel_operations_lock = dict() self.open_channel_transactions = dict()
def geth_wait_and_check(deploy_client, privatekeys, random_marker): """ Wait until the geth cluster is ready. """ jsonrpc_running = False tries = 5 while not jsonrpc_running and tries > 0: try: block = deploy_client.call('eth_getBlockByNumber', '0x0', True) except ConnectionError: gevent.sleep(0.5) tries -= 1 else: jsonrpc_running = True running_marker = block['extraData'][2:len(random_marker) + 2] if running_marker != random_marker: raise RuntimeError( 'the test marker does not match, maybe two tests are running in ' 'parallel with the same port?' ) if jsonrpc_running is False: raise ValueError('geth didnt start the jsonrpc interface') for key in sorted(set(privatekeys)): address = address_encoder(privatekey_to_address(key)) tries = 10 balance = '0x0' while balance == '0x0' and tries > 0: balance = deploy_client.call('eth_getBalance', address, 'latest') gevent.sleep(1) tries -= 1 if balance == '0x0': raise ValueError('account is with a balance of 0')
def _validate_userid_signature(user: User) -> bool: # display_name should be an address present in the user_id recovered = signing.recover_address(user.user_id.encode(), signature=data_decoder( user.get_display_name()), hasher=eth_sign_sha3) return address_encoder(recovered).lower() in user.user_id
def test_settle_event( settle_timeout, tester_registry_address, tester_chain, tester_events, tester_nettingcontracts, ): """ The event ChannelSettled is emitted when the channel is settled. """ pkey0, _, nettingchannel = tester_nettingcontracts[0] nettingchannel.close(sender=pkey0) tester_chain.mine(number_of_blocks=settle_timeout + 1) previous_events = list(tester_events) tester_chain.head_state.log_listeners.append(tester_events.append) nettingchannel.settle(sender=pkey0) # settle + a transfer per participant assert len(previous_events) + 3 == len(tester_events) settle_event = event_decoder(tester_events[-1], nettingchannel.translator) assert settle_event == { '_event_type': b'ChannelSettled', 'registry_address': address_encoder(tester_registry_address) }
def to_dict(self): return { 'type': self.__class__.__name__, 'sender': address_encoder(self.sender), 'message_identifier': self.message_identifier, 'signature': data_encoder(self.signature) }
def test_api_transfers( api_backend, api_test_context, api_raiden_service): amount = 200 identifier = 42 token_address = '0xea674fdde714fd979de3edf0f56aa9716b898ec8' target_address = '0x61c808d82a3ac53231750dadc13c777b59310bd9' transfer = { 'initiator_address': address_encoder(api_raiden_service.address), 'target_address': target_address, 'token_address': token_address, 'amount': amount, 'identifier': identifier } request = grequests.post( api_url_for( api_backend, 'transfertotargetresource', token_address=token_address, target_address=target_address ), json={'amount': amount, 'identifier': identifier} ) response = request.send().response assert_proper_response(response) response = response.json() assert response == transfer
def __init__( self, jsonrpc_client, registry_address, startgas, gasprice, poll_timeout=DEFAULT_POLL_TIMEOUT): # pylint: disable=too-many-arguments if not isaddress(registry_address): raise ValueError('registry_address must be a valid address') check_address_has_code(jsonrpc_client, registry_address, 'Registry') proxy = jsonrpc_client.new_contract_proxy( CONTRACT_MANAGER.get_abi(CONTRACT_REGISTRY), address_encoder(registry_address), ) self.address = registry_address self.proxy = proxy self.client = jsonrpc_client self.startgas = startgas self.gasprice = gasprice self.poll_timeout = poll_timeout self.address_to_channelmanager = dict() self.token_to_channelmanager = dict()
def test_endpointregistry(tester_chain, tester_events): account0 = tester.a0 sender = address_encoder(account0) endpointregistry_path = get_contract_path('EndpointRegistry.sol') endpointregistry_compiled = _solidity.compile_contract( endpointregistry_path, "EndpointRegistry") tester_chain.head_state.log_listeners.append(tester_events.append) endpointregistry_address = tester_chain.contract( endpointregistry_compiled['bin'], language='evm') endpoint_registry = tester.ABIContract(tester_chain, endpointregistry_compiled['abi'], endpointregistry_address) endpoint_registry.registerEndpoint('127.0.0.1:4001') assert endpoint_registry.findAddressByEndpoint('127.0.0.1:4001') == sender assert endpoint_registry.findEndpointByAddress(sender) == b'127.0.0.1:4001' endpoint_registry.registerEndpoint('192.168.0.1:4002') assert endpoint_registry.findAddressByEndpoint( '192.168.0.1:4002') == sender assert endpoint_registry.findEndpointByAddress( sender) == b'192.168.0.1:4002' assert len(tester_events) == 2 event0 = event_decoder(tester_events[0], endpoint_registry.translator) event1 = event_decoder(tester_events[1], endpoint_registry.translator) assert event0['_event_type'] == b'AddressRegistered' assert event1['_event_type'] == b'AddressRegistered'
def new_filter( jsonrpc_client: JSONRPCClient, contract_address: address, topics: Optional[List[int]], from_block: Union[str, int] = 0, to_block: Union[str, int] = 'latest'): """ Custom new filter implementation to handle bad encoding from geth rpc. """ if isinstance(from_block, int): from_block = hex(from_block) if isinstance(to_block, int): to_block = hex(to_block) json_data = { 'fromBlock': from_block, 'toBlock': to_block, 'address': address_encoder(normalize_address(contract_address)), } if topics is not None: json_data['topics'] = [ topic_encoder(topic) for topic in topics ] return jsonrpc_client.call('eth_newFilter', json_data)
def test_api_transfers(api_backend, api_test_context, api_raiden_service): amount = 200 identifier = 42 token_address = '0xea674fdde714fd979de3edf0f56aa9716b898ec8' target_address = '0x61c808d82a3ac53231750dadc13c777b59310bd9' transfer = { 'initiator_address': address_encoder(api_raiden_service.address), 'target_address': target_address, 'token_address': token_address, 'amount': amount, 'identifier': identifier } request = grequests.post(api_url_for(api_backend, 'transfertotargetresource', token_address=token_address, target_address=target_address), json={ 'amount': amount, 'identifier': identifier }) response = request.send().response assert_proper_response(response) response = response.json() assert response == transfer
def __init__( self, jsonrpc_client, registry_address, poll_timeout=DEFAULT_POLL_TIMEOUT, ): # pylint: disable=too-many-arguments if not isaddress(registry_address): raise InvalidAddress( 'Expected binary address format for token network registry') check_address_has_code(jsonrpc_client, registry_address, CONTRACT_TOKEN_NETWORK_REGISTRY) proxy = jsonrpc_client.new_contract_proxy( CONTRACT_MANAGER.get_abi(CONTRACT_TOKEN_NETWORK_REGISTRY), address_encoder(registry_address), ) CONTRACT_MANAGER.check_contract_version( proxy.functions.contract_version().call(), CONTRACT_TOKEN_NETWORK_REGISTRY) self.address = registry_address self.proxy = proxy self.client = jsonrpc_client self.poll_timeout = poll_timeout self.node_address = privatekey_to_address(self.client.privkey) self.address_to_tokennetwork = dict() self.token_to_tokennetwork = dict()
def __init__(self, config, chain, default_registry, transport, discovery=None): register_error_handler(greenlet_error_handler) self.config = config self.discovery = discovery try: self.raiden = RaidenService( chain, default_registry, unhexlify(config['privatekey_hex']), transport, config, discovery, ) except filelock.Timeout: pubkey = address_encoder( privatekey_to_address(unhexlify(self.config['privatekey_hex'])) ) print( f'FATAL: Another Raiden instance already running for account {pubkey} on ' f'network id {chain.network_id}' ) sys.exit(1) self.start_console = self.config['console'] # raiden.ui.console:Console assumes that a services # attribute is available for auto-registration self.services = dict()
def get_connection_managers_info(self): raiden_service_result = self.raiden_api.get_connection_managers_info() assert isinstance(raiden_service_result, dict) # encode token addresses indexes result = { address_encoder(token_address): info for token_address, info in raiden_service_result.items() } return api_response(result=result)
def get_filter_events( jsonrpc_client: JSONRPCClient, contract_address: address, topics: Optional[List[int]], from_block: Union[str, int] = 0, to_block: Union[str, int] = 'latest') -> List[Dict]: """ Get filter. This handles bad encoding from geth rpc. """ if isinstance(from_block, int): from_block = hex(from_block) if isinstance(to_block, int): to_block = hex(to_block) json_data = { 'fromBlock': from_block, 'toBlock': to_block, 'address': address_encoder(normalize_address(contract_address)), } if topics is not None: json_data['topics'] = [ topic_encoder(topic) for topic in topics ] filter_changes = jsonrpc_client.call('eth_getLogs', json_data) # geth could return None if filter_changes is None: return [] result = [] for log_event in filter_changes: address = address_decoder(log_event['address']) data = data_decoder(log_event['data']) topics = [ topic_decoder(topic) for topic in log_event['topics'] ] block_number = log_event.get('blockNumber') if not block_number: block_number = 0 else: block_number = int(block_number, 0) result.append({ 'topics': topics, 'data': data, 'address': address, 'block_number': block_number, }) return result
def test_close_second_participant_can_close(tester_chain, tester_nettingcontracts): """ Second participant can close an unused channel. """ _, pkey1, nettingchannel = tester_nettingcontracts[0] address1 = privatekey_to_address(pkey1) closed_block_number = tester_chain.block.number nettingchannel.close(sender=pkey1) assert nettingchannel.closed(sender=pkey1) == closed_block_number assert nettingchannel.closingAddress(sender=pkey1) == address_encoder(address1)
def test_close_first_participant_can_close(tester_chain, tester_nettingcontracts): """ First participant can close an unused channel. """ pkey0, _, nettingchannel = tester_nettingcontracts[0] address0 = privatekey_to_address(pkey0) block_number = tester_chain.block.number nettingchannel.close(sender=pkey0) assert nettingchannel.closed(sender=pkey0) == block_number assert nettingchannel.closingAddress(sender=pkey0) == address_encoder(address0)
def test_deposit_events( private_keys, settle_timeout, tester_chain, tester_channelmanager, tester_token, tester_events): """ A deposit must emit the events Transfer and a ChannelNewBalance. """ private_key = private_keys[0] address = privatekey_to_address(private_key) nettingchannel = new_nettingcontract( private_key, private_keys[1], tester_chain, tester_events.append, tester_channelmanager, settle_timeout, ) initial_balance0 = tester_token.balanceOf(address, sender=private_key) deposit_amount = initial_balance0 // 10 assert tester_token.approve(nettingchannel.address, deposit_amount, sender=private_key) is True assert nettingchannel.deposit(deposit_amount, sender=private_key) is True transfer_event = event_decoder(tester_events[-2], tester_token.translator) newbalance_event = event_decoder(tester_events[-1], nettingchannel.translator) assert transfer_event == { '_event_type': b'Transfer', '_from': address_encoder(address), '_to': nettingchannel.address, '_value': deposit_amount, } assert newbalance_event['_event_type'] == b'ChannelNewBalance' assert newbalance_event['token_address'] == address_encoder(tester_token.address) assert newbalance_event['participant'] == address_encoder(address) assert newbalance_event['balance'] == deposit_amount
def check_address_has_code( client, address: address, contract_name: str = ''): """ Checks that the given address contains code. """ result = client.eth_getCode(address, 'latest') if len(result) == 0: raise AddressWithoutCode('{}Address {} does not contain code'.format( '[{}]: '.format(contract_name) if contract_name else '', address_encoder(address), ))
def test_api_query_our_address( api_backend, api_test_context, api_raiden_service): request = grequests.get( api_url_for(api_backend, 'addressresource') ) response = request.send().response assert_proper_response(response) assert response.json() == dict( our_address=address_encoder(api_raiden_service.address) )
def test_channeldeleted_event( settle_timeout, tester_channelmanager, tester_events, tester_nettingcontracts, tester_chain): """ A channel deleted event must be emmited when the channel is cleaned. This happens once a *new* channel with *the same parties* is created, overwritting the old one. This behavior may be unexpected due to the weird timing. """ pkey0, pkey1, nettingchannel = tester_nettingcontracts[0] address0 = privatekey_to_address(pkey0) address1 = privatekey_to_address(pkey1) netting_channel_settled( tester_chain, nettingchannel, pkey0, settle_timeout, ) tester_chain.head_state.log_listeners.append(tester_events.append) # old entry will be deleted when calling newChannel tester_channelmanager.newChannel( address1, settle_timeout, sender=pkey0, ) channeldelete_event = event_decoder(tester_events[-2], tester_channelmanager.translator) assert channeldelete_event == { '_event_type': b'ChannelDeleted', 'caller_address': address_encoder(address0), 'partner': address_encoder(address1) }
def test_close_event(tester_chain, tester_nettingcontracts, tester_events): """ The event ChannelClosed is emitted when close is called. """ pkey0, _, nettingchannel = tester_nettingcontracts[0] address = privatekey_to_address(pkey0) previous_events = list(tester_events) nettingchannel.close(sender=pkey0) assert len(previous_events) + 1 == len(tester_events) close_event = event_decoder(tester_events[-1], nettingchannel.translator) assert close_event == { '_event_type': b'ChannelClosed', 'closing_address': address_encoder(address), }
def register_token(self, token_address): manager_address = self.raiden_api.manager_address_if_token_registered(token_address) if manager_address is not None: return api_error( errors='Token is already registered', status_code=HTTPStatus.CONFLICT ) if manager_address is None: manager_address = self.raiden_api.register_token(token_address) return api_response( result=dict(channel_manager_address=address_encoder(manager_address)), status_code=HTTPStatus.CREATED )
def test_close_called_multiple_times(tester_chain, tester_nettingcontracts): """ A channel can be closed only once. """ pkey0, pkey1, nettingchannel = tester_nettingcontracts[0] address0 = privatekey_to_address(pkey0) closed_block_number = tester_chain.block.number nettingchannel.close(sender=pkey0) with pytest.raises(TransactionFailed): nettingchannel.close(sender=pkey0) with pytest.raises(TransactionFailed): nettingchannel.close(sender=pkey1) assert nettingchannel.closed(sender=pkey0) == closed_block_number assert nettingchannel.closingAddress(sender=pkey0) == address_encoder(address0)
def format_data_for_call( sender: address = b'', to: address = b'', value: int = 0, data: bytes = b'', startgas: int = GAS_PRICE, gasprice: int = GAS_PRICE): """ Helper to format the transaction data. """ return { 'from': address_encoder(sender), 'to': data_encoder(to), 'value': quantity_encoder(value), 'gasPrice': quantity_encoder(gasprice), 'gas': quantity_encoder(startgas), 'data': data_encoder(data) }
def database_paths(tmpdir, private_keys, in_memory_database): """ Sqlite database paths for each app. """ # According to http://www.sqlite.org/inmemorydb.html each memory connection will # create a unique in-memory DB, which is exactly what we need in this case for # each different Raiden app if in_memory_database: return [ ':memory:' for position in range(len(private_keys)) ] database_paths = list() for idx, pkey in enumerate(private_keys): app_dir = os.path.join(tmpdir.strpath, address_encoder(privatekey_to_address(pkey))[2:8]) if not os.path.exists(app_dir): os.makedirs(app_dir) database_paths.append(os.path.join(app_dir, 'log.db')) return database_paths
def eth_getCode(self, code_address: address, block: Union[int, str] = 'latest') -> bytes: """ Returns code at a given address. Args: code_address: An address. block: Integer block number, or the string 'latest', 'earliest' or 'pending'. Default is 'latest'. """ if code_address.startswith(b'0x'): warnings.warn( 'address seems to be already encoded, this will result ' 'in unexpected behavior' ) if len(code_address) != 20: raise ValueError( 'address length must be 20 (it might be hex encoded)' ) result = self.call('eth_getCode', address_encoder(code_address), block) return data_decoder(result)
def all_contract_events_raw( rpc: JSONRPCClient, contract_address: str, start_block: Union[str, int] = 0, end_block: Union[str, int] = 'latest') -> List[Dict]: """Find all events for a deployed contract given its `contract_address`. Args: rpc: client instance. contract_address: hex encoded contract address. start_block: read event-logs starting from this block number (default: 0). end_block: read event-logs up to this block number (default: 'latest'). Returns: events """ return rpc.call('eth_getLogs', { 'fromBlock': str(start_block), 'toBlock': str(end_block), 'address': address_encoder(normalize_address(contract_address)), 'topics': [], })
def new_filter(self, fromBlock=None, toBlock=None, address=None, topics=None): """ Creates a filter object, based on filter options, to notify when the state changes (logs). To check if the state has changed, call eth_getFilterChanges. """ json_data = { 'fromBlock': block_tag_encoder(fromBlock or ''), 'toBlock': block_tag_encoder(toBlock or ''), } if address is not None: json_data['address'] = address_encoder(address) if topics is not None: if not isinstance(topics, list): raise ValueError('topics must be a list') json_data['topics'] = [topic_encoder(topic) for topic in topics] filter_id = self.call('eth_newFilter', json_data) return quantity_decoder(filter_id)