def _test_getnodeaddresses(self): self.nodes[0].add_p2p_connection(P2PInterface()) # send some addresses to the node via the p2p message addr msg = msg_addr() imported_addrs = [] for i in range(256): a = "123.123.123.{}".format(i) imported_addrs.append(a) addr = CAddress() addr.time = 100000000 addr.nServices = NODE_NETWORK | NODE_WITNESS addr.ip = a addr.port = 21102 msg.addrs.append(addr) self.nodes[0].p2p.send_and_ping(msg) # obtain addresses via rpc call and check they were ones sent in before REQUEST_COUNT = 10 node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT) assert_equal(len(node_addresses), REQUEST_COUNT) for a in node_addresses: assert_greater_than(a["time"], 1527811200) # 1st June 2018 assert_equal(a["services"], NODE_NETWORK | NODE_WITNESS) assert a["address"] in imported_addrs assert_equal(a["port"], 21102) assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1) # addrman's size cannot be known reliably after insertion, as hash collisions may occur # so only test that requesting a large number of addresses returns less than that LARGE_REQUEST_COUNT = 10000 node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT) assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
def run_test(self): self.log.info('Create connection that sends addr messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addr() self.log.info('Send too large addr message') msg.addrs = ADDRS * 101 with self.nodes[0].assert_debug_log(['message addr size() = 1010']): addr_source.send_and_ping(msg) self.log.info( 'Check that addr message content is relayed and added to addrman') addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS with self.nodes[0].assert_debug_log([ 'Added 10 addresses from 127.0.0.1: 0 tried', 'received: addr (301 bytes) peer=0', 'sending addr (301 bytes) peer=1', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) addr_receiver.sync_with_ping()
def test_getnodeaddresses(self): self.log.info("Test getnodeaddresses") self.nodes[0].add_p2p_connection(P2PInterface()) # send some addresses to the node via the p2p message addr msg = msg_addr() imported_addrs = [] for i in range(256): a = "123.123.123.{}".format(i) imported_addrs.append(a) addr = CAddress() addr.time = 100000000 addr.nServices = NODE_NETWORK addr.ip = a addr.port = 8333 msg.addrs.append(addr) self.nodes[0].p2p.send_and_ping(msg) # obtain addresses via rpc call and check they were ones sent in before REQUEST_COUNT = 10 node_addresses = self.nodes[0].getnodeaddresses(REQUEST_COUNT) assert_equal(len(node_addresses), REQUEST_COUNT) for a in node_addresses: assert_greater_than(a["time"], 1527811200) # 1st June 2018 assert_equal(a["services"], NODE_NETWORK) assert a["address"] in imported_addrs assert_equal(a["port"], 8333) assert_raises_rpc_error(-8, "Address count out of range", self.nodes[0].getnodeaddresses, -1) # addrman's size cannot be known reliably after insertion, as hash collisions may occur # so only test that requesting a large number of addresses returns less # than that LARGE_REQUEST_COUNT = 10000 node_addresses = self.nodes[0].getnodeaddresses(LARGE_REQUEST_COUNT) assert_greater_than(LARGE_REQUEST_COUNT, len(node_addresses))
def run_test(self): self.log.info('Create connection that sends addr messages') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) msg = msg_addr() self.log.info('Send too-large addr message') msg.addrs = ADDRS * 101 with self.nodes[0].assert_debug_log(['addr message size = 1010']): addr_source.send_and_ping(msg) self.log.info('Check that addr message content is relayed and added to addrman') addr_receiver = self.nodes[0].add_p2p_connection(AddrReceiver()) msg.addrs = ADDRS with self.nodes[0].assert_debug_log([ 'Added 10 addresses from 127.0.0.1: 0 tried', 'received: addr (301 bytes) peer=0', 'sending addr (301 bytes) peer=1', ]): addr_source.send_and_ping(msg) self.nodes[0].setmocktime(int(time.time()) + 30 * 60) addr_receiver.sync_with_ping() # The following test is backported. The original test also verified behavior for # outbound peers, but lacking add_outbound_p2p_connection, those tests have been # removed here. for contype, tokens, no_relay in [("inbound", 1, False)]: self.log.info('Test rate limiting of addr processing for %s peers' % contype) self.stop_node(0) os.remove(os.path.join(self.nodes[0].datadir, "regtest", "peers.dat")) self.start_node(0, []) self.mocktime = int(time.time()) self.nodes[0].setmocktime(self.mocktime) peer = self.nodes[0].add_p2p_connection(AddrReceiver()) # Check that we start off with empty addrman addr_count_0 = len(self.nodes[0].getnodeaddresses(0)) assert_equal(addr_count_0, 0) # Send 600 addresses. For all but the block-relay-only peer this should result in at least 1 address. peer.send_and_ping(self.setup_rand_addr_msg(600)) addr_count_1 = len(self.nodes[0].getnodeaddresses(0)) assert_greater_than_or_equal(tokens, addr_count_1) assert_greater_than_or_equal(addr_count_0 + 600, addr_count_1) assert_equal(addr_count_1 > addr_count_0, tokens > 0) # Send 600 more addresses. For the outbound-full-relay peer (which we send a GETADDR, and thus will # process up to 1001 incoming addresses), this means more entries will appear. peer.send_and_ping(self.setup_rand_addr_msg(600)) addr_count_2 = len(self.nodes[0].getnodeaddresses(0)) assert_greater_than_or_equal(tokens, addr_count_2) assert_greater_than_or_equal(addr_count_1 + 600, addr_count_2) assert_equal(addr_count_2 > addr_count_1, tokens > 600) # Send 10 more. As we reached the processing limit for all nodes, this should have no effect. peer.send_and_ping(self.setup_rand_addr_msg(10)) addr_count_3 = len(self.nodes[0].getnodeaddresses(0)) assert_greater_than_or_equal(tokens, addr_count_3) assert_equal(addr_count_2, addr_count_3) # Advance the time by 100 seconds, permitting the processing of 10 more addresses. Send 200, # but verify that no more than 10 are processed. self.mocktime += 100 self.nodes[0].setmocktime(self.mocktime) new_tokens = 0 if no_relay else 10 tokens += new_tokens peer.send_and_ping(self.setup_rand_addr_msg(200)) addr_count_4 = len(self.nodes[0].getnodeaddresses(0)) assert_greater_than_or_equal(tokens, addr_count_4) assert_greater_than_or_equal(addr_count_3 + new_tokens, addr_count_4) # Advance the time by 1000 seconds, permitting the processing of 100 more addresses. Send 200, # but verify that no more than 100 are processed (and at least some). self.mocktime += 1000 self.nodes[0].setmocktime(self.mocktime) new_tokens = 0 if no_relay else 100 tokens += new_tokens peer.send_and_ping(self.setup_rand_addr_msg(200)) addr_count_5 = len(self.nodes[0].getnodeaddresses(0)) assert_greater_than_or_equal(tokens, addr_count_5) assert_greater_than_or_equal(addr_count_4 + new_tokens, addr_count_5) assert_equal(addr_count_5 > addr_count_4, not no_relay) self.nodes[0].disconnect_p2ps()
def relay_tests(self): self.log.info('Test address relay') self.log.info( 'Check that addr message content is relayed and added to addrman') addr_source = self.nodes[0].add_p2p_connection(P2PInterface()) num_receivers = 7 receivers = [] for _ in range(num_receivers): receivers.append(self.nodes[0].add_p2p_connection( AddrReceiver(test_addr_contents=True))) # Keep this with length <= 10. Addresses from larger messages are not # relayed. num_ipv4_addrs = 10 msg = self.setup_addr_msg(num_ipv4_addrs) with self.nodes[0].assert_debug_log([ 'received: addr (301 bytes) peer=1', ]): self.send_addr_msg(addr_source, msg, receivers) total_ipv4_received = sum(r.num_ipv4_received for r in receivers) # Every IPv4 address must be relayed to two peers, other than the # originating node (addr_source). ipv4_branching_factor = 2 assert_equal(total_ipv4_received, num_ipv4_addrs * ipv4_branching_factor) self.nodes[0].disconnect_p2ps() self.log.info('Check relay of addresses received from outbound peers') inbound_peer = self.nodes[0].add_p2p_connection( AddrReceiver(test_addr_contents=True, send_getaddr=False)) full_outbound_peer = self.nodes[0].add_outbound_p2p_connection( AddrReceiver(), p2p_idx=0, connection_type="outbound-full-relay") msg = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg, [inbound_peer]) self.log.info( 'Check that the first addr message received from an outbound peer is not relayed' ) # Currently, there is a flag that prevents the first addr message received # from a new outbound peer to be relayed to others. Originally meant to prevent # large GETADDR responses from being relayed, it now typically affects the self-announcement # of the outbound peer which is often sent before the GETADDR response. assert_equal(inbound_peer.num_ipv4_received, 0) # Send an empty ADDR message to initialize address relay on this connection. inbound_peer.send_and_ping(msg_addr()) self.log.info( 'Check that subsequent addr messages sent from an outbound peer are relayed' ) msg2 = self.setup_addr_msg(2) self.send_addr_msg(full_outbound_peer, msg2, [inbound_peer]) assert_equal(inbound_peer.num_ipv4_received, 2) self.log.info('Check address relay to outbound peers') block_relay_peer = self.nodes[0].add_outbound_p2p_connection( AddrReceiver(), p2p_idx=1, connection_type="block-relay-only") msg3 = self.setup_addr_msg(2) self.send_addr_msg(inbound_peer, msg3, [full_outbound_peer, block_relay_peer]) self.log.info( 'Check that addresses are relayed to full outbound peers') assert_equal(full_outbound_peer.num_ipv4_received, 2) self.log.info( 'Check that addresses are not relayed to block-relay-only outbound peers' ) assert_equal(block_relay_peer.num_ipv4_received, 0) self.nodes[0].disconnect_p2ps()