def has_full_node(self) -> bool:
     if self.server is None:
         return False
     if "full_node_peer" in self.config:
         full_node_peer = PeerInfo(
             self.config["full_node_peer"]["host"],
             self.config["full_node_peer"]["port"],
         )
         peers = [
             c.get_peer_info()
             for c in self.server.get_full_node_connections()
         ]
         full_node_resolved = PeerInfo(
             socket.gethostbyname(full_node_peer.host), full_node_peer.port)
         if full_node_peer in peers or full_node_resolved in peers:
             self.log.info(
                 f"Will not attempt to connect to other nodes, already connected to {full_node_peer}"
             )
             for connection in self.server.get_full_node_connections():
                 if (connection.get_peer_info() != full_node_peer and
                         connection.get_peer_info() != full_node_resolved):
                     self.log.info(
                         f"Closing unnecessary connection to {connection.get_peer_info()}."
                     )
                     asyncio.create_task(connection.close())
             return True
     return False
Beispiel #2
0
    def _num_needed_peers(self) -> int:
        if self.wallet_state_manager is None:
            return 0
        assert self.server is not None
        diff = self.config["target_peer_count"] - len(
            self.global_connections.get_full_node_connections())
        if diff < 0:
            return 0

        if "full_node_peer" in self.config:
            full_node_peer = PeerInfo(
                self.config["full_node_peer"]["host"],
                self.config["full_node_peer"]["port"],
            )
            peers = [
                c.get_peer_info()
                for c in self.global_connections.get_full_node_connections()
            ]
            full_node_resolved = PeerInfo(
                socket.gethostbyname(full_node_peer.host), full_node_peer.port)
            if full_node_peer in peers or full_node_resolved in peers:
                self.log.info(
                    f"Will not attempt to connect to other nodes, already connected to {full_node_peer}"
                )
                for connection in self.global_connections.get_full_node_connections(
                ):
                    if (connection.get_peer_info() != full_node_peer and
                            connection.get_peer_info() != full_node_resolved):
                        self.log.info(
                            f"Closing unnecessary connection to {connection.get_peer_info()}."
                        )
                        self.global_connections.close(connection)
                return 0
        return diff
Beispiel #3
0
    async def get_local_peerinfo(self) -> Optional[PeerInfo]:
        ip = None
        port = None
        for c in self._all_connections:
            if c.connection_type == NodeType.FULL_NODE:
                port = c.local_port
                break
        if port is None:
            return None

        try:
            async with aiohttp.ClientSession() as session:
                async with session.get(
                        "https://checkip.amazonaws.com/") as resp:
                    if resp.status == 200:
                        ip = str(await resp.text())
                        ip = ip.rstrip()
                    else:
                        ip = None
        except Exception:
            ip = None
        if ip is None:
            return None
        peer = PeerInfo(ip, uint16(port))
        if not peer.is_valid():
            return None
        return peer
Beispiel #4
0
async def main():
    farmer = Farmer()
    harvester_peer = PeerInfo(farmer.config["harvester_peer"]["host"],
                              farmer.config["harvester_peer"]["port"])
    full_node_peer = PeerInfo(farmer.config["full_node_peer"]["host"],
                              farmer.config["full_node_peer"]["port"])
    host, port = parse_host_port(farmer)
    server = ChiaServer(port, farmer, NodeType.FARMER)

    asyncio.get_running_loop().add_signal_handler(signal.SIGINT,
                                                  server.close_all)
    asyncio.get_running_loop().add_signal_handler(signal.SIGTERM,
                                                  server.close_all)

    async def on_connect():
        # Sends a handshake to the harvester
        pool_sks: List[PrivateKey] = [
            PrivateKey.from_bytes(bytes.fromhex(ce))
            for ce in farmer.key_config["pool_sks"]
        ]
        msg = HarvesterHandshake([sk.get_public_key() for sk in pool_sks])
        yield OutboundMessage(NodeType.HARVESTER,
                              Message("harvester_handshake", msg),
                              Delivery.BROADCAST)

    _ = await server.start_server(host, on_connect)
    await asyncio.sleep(1)  # Prevents TCP simultaneous connect with harvester
    _ = await server.start_client(harvester_peer, None)
    _ = await server.start_client(full_node_peer, None)

    await server.await_closed()
Beispiel #5
0
    async def test_sync_from_fork_point_and_weight_proof(self, three_nodes, default_1000_blocks, default_400_blocks):
        start = time.time()
        # Must be larger than "sync_block_behind_threshold" in the config
        num_blocks_initial = len(default_1000_blocks) - 50
        blocks_950 = default_1000_blocks[:num_blocks_initial]
        blocks_rest = default_1000_blocks[num_blocks_initial:]
        blocks_400 = default_400_blocks
        full_node_1, full_node_2, full_node_3 = three_nodes
        server_1 = full_node_1.full_node.server
        server_2 = full_node_2.full_node.server
        server_3 = full_node_3.full_node.server

        for block in blocks_950:
            await full_node_1.full_node.respond_block(full_node_protocol.RespondBlock(block))

        # Node 2 syncs from halfway
        for i in range(int(len(default_1000_blocks) / 2)):
            await full_node_2.full_node.respond_block(full_node_protocol.RespondBlock(default_1000_blocks[i]))

        # Node 3 syncs from a different blockchain
        for block in blocks_400:
            await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))

        await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
        await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)

        # Also test request proof of weight
        # Have the request header hash
        res = await full_node_1.request_proof_of_weight(
            full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, blocks_950[-1].header_hash)
        )
        assert res is not None
        validated, _ = full_node_1.full_node.weight_proof_handler.validate_weight_proof(
            full_node_protocol.RespondProofOfWeight.from_bytes(res.data).wp
        )
        assert validated

        # Don't have the request header hash
        res = await full_node_1.request_proof_of_weight(
            full_node_protocol.RequestProofOfWeight(blocks_950[-1].height + 1, std_hash(b"12"))
        )
        assert res is None

        # The second node should eventually catch up to the first one, and have the
        # same tip at height num_blocks - 1 (or at least num_blocks - 3, in case we sync to below the tip)
        await time_out_assert(180, node_height_exactly, True, full_node_2, num_blocks_initial - 1)
        await time_out_assert(180, node_height_exactly, True, full_node_3, num_blocks_initial - 1)

        cons = list(server_1.all_connections.values())[:]
        for con in cons:
            await con.close()
        for block in blocks_rest:
            await full_node_3.full_node.respond_block(full_node_protocol.RespondBlock(block))
        await time_out_assert(120, node_height_exactly, True, full_node_3, 999)

        await server_2.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_2.full_node.on_connect)
        await server_3.start_client(PeerInfo(self_hostname, uint16(server_1._port)), full_node_3.full_node.on_connect)
        await server_3.start_client(PeerInfo(self_hostname, uint16(server_2._port)), full_node_3.full_node.on_connect)
        await time_out_assert(180, node_height_exactly, True, full_node_1, 999)
        await time_out_assert(180, node_height_exactly, True, full_node_2, 999)
Beispiel #6
0
    async def test_addr_manager(self):
        addrman = AddressManagerTest()
        # Test: Does Addrman respond correctly when empty.
        none_peer = await addrman.select_peer()
        assert none_peer is None
        assert await addrman.size() == 0
        # Test: Does Add work as expected.
        peer1 = PeerInfo("250.1.1.1", 8444)
        assert await addrman.add_peer_info([peer1])
        assert await addrman.size() == 1
        peer1_ret = await addrman.select_peer()
        assert peer1_ret.peer_info == peer1

        # Test: Does IP address deduplication work correctly.
        peer1_duplicate = PeerInfo("250.1.1.1", 8444)
        assert not await addrman.add_peer_info([peer1_duplicate])
        assert await addrman.size() == 1

        # Test: New table has one addr and we add a diff addr we should
        # have at least one addr.
        # Note that addrman's size cannot be tested reliably after insertion, as
        # hash collisions may occur. But we can always be sure of at least one
        # success.

        peer2 = PeerInfo("250.1.1.2", 8444)
        assert await addrman.add_peer_info([peer2])
        assert await addrman.size() >= 1

        # Test: AddrMan::Add multiple addresses works as expected
        addrman2 = AddressManagerTest()
        peers = [peer1, peer2]
        assert await addrman2.add_peer_info(peers)
        assert await addrman2.size() >= 1
Beispiel #7
0
    async def test_addrman_find(self):
        addrman = AddressManagerTest()
        assert await addrman.size() == 0

        peer1 = PeerInfo("250.1.2.1", 8333)
        peer2 = PeerInfo("250.1.2.1", 9999)
        peer3 = PeerInfo("251.255.2.1", 8333)

        source1 = PeerInfo("250.1.2.1", 8444)
        source2 = PeerInfo("250.1.2.2", 8444)

        assert await addrman.add_peer_info([peer1], source1)
        assert not await addrman.add_peer_info([peer2], source2)
        assert await addrman.add_peer_info([peer3], source1)

        # Test: ensure Find returns an IP matching what we searched on.
        info1 = addrman.find_(peer1)
        assert info1[0] is not None and info1[1] is not None
        assert info1[0].peer_info == peer1

        # Test: Find does not discriminate by port number.
        info2 = addrman.find_(peer2)
        assert info2[0] is not None and info2[1] is not None
        assert info2 == info1

        # Test: Find returns another IP matching what we searched on.
        info3 = addrman.find_(peer3)
        assert info3[0] is not None and info3[1] is not None
        assert info3[0].peer_info == peer3
Beispiel #8
0
async def wallets_prefarm(two_wallet_nodes):
    """
    Sets up the node with 10 blocks, and returns a payer and payee wallet.
    """
    farm_blocks = 10
    buffer = 4
    full_nodes, wallets = two_wallet_nodes
    full_node, server = full_nodes[0]
    wallet_node_0, wallet_server_0 = wallets[0]
    wallet_node_1, wallet_server_1 = wallets[1]
    wallet_0 = wallet_node_0.wallet_state_manager.main_wallet
    wallet_1 = wallet_node_1.wallet_state_manager.main_wallet

    ph0 = await wallet_0.get_new_puzzlehash()
    ph1 = await wallet_1.get_new_puzzlehash()

    await wallet_server_0.start_client(
        PeerInfo("localhost", uint16(server._port)), None)
    await wallet_server_1.start_client(
        PeerInfo("localhost", uint16(server._port)), None)

    for i in range(0, farm_blocks):
        await full_node.farm_new_block(FarmNewBlockProtocol(ph0))

    for i in range(0, farm_blocks):
        await full_node.farm_new_block(FarmNewBlockProtocol(ph1))

    for i in range(0, buffer):
        await full_node.farm_new_block(FarmNewBlockProtocol(token_bytes()))

    return wallet_node_0, wallet_node_1, full_node
Beispiel #9
0
    async def test_create_rl_coin(self, two_wallet_nodes):
        num_blocks = 4
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet_node_1, wallet_server_1 = wallets[1]

        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)
        await wallet_server_1.start_client(PeerInfo(self_hostname, uint16(full_node_server._port)), None)

        for i in range(0, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        rl_admin: RLWallet = await RLWallet.create_rl_admin(wallet_node.wallet_state_manager)

        rl_user: RLWallet = await RLWallet.create_rl_user(wallet_node_1.wallet_state_manager)
        interval = uint64(2)
        limit = uint64(1)
        amount = uint64(100)
        await rl_admin.admin_create_coin(interval, limit, rl_user.rl_info.user_pubkey.hex(), amount, 0)
        origin = rl_admin.rl_info.rl_origin
        admin_pubkey = rl_admin.rl_info.admin_pubkey

        await rl_user.set_user_info(
            interval,
            limit,
            origin.parent_coin_info.hex(),
            origin.puzzle_hash.hex(),
            origin.amount,
            admin_pubkey.hex(),
        )

        for i in range(0, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))

        for i in range(0, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))

        await time_out_assert(15, rl_user.get_confirmed_balance, 100)
        balance = await rl_user.rl_available_balance()

        tx_record = await rl_user.generate_signed_transaction(1, 32 * b"\0")

        await wallet_node_1.wallet_state_manager.main_wallet.push_transaction(tx_record)

        for i in range(0, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"\0"))

        balance = await rl_user.get_confirmed_balance()
        print(balance)

        await time_out_assert(15, rl_user.get_confirmed_balance, 99)

        rl_user.rl_get_aggregation_puzzlehash(rl_user.get_new_puzzle())
    async def test_generate_zero_val(self, two_wallet_nodes):
        num_blocks = 10
        full_nodes, wallets = two_wallet_nodes
        full_node_1, server_1 = full_nodes[0]
        wallet_node, server_2 = wallets[0]
        wallet_node_2, server_3 = wallets[1]
        wallet = wallet_node.wallet_state_manager.main_wallet
        wallet2 = wallet_node_2.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
        await server_3.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
        for i in range(1, 4):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
                for i in range(1, 4 - 2)
            ]
        )
        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(
            wallet_node.wallet_state_manager, wallet, uint64(100)
        )

        ph = await wallet2.get_new_puzzlehash()
        for i in range(1, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)

        assert cc_wallet.cc_info.my_genesis_checker is not None
        colour = cc_wallet.get_colour()

        cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(
            wallet_node_2.wallet_state_manager, wallet2, colour
        )

        assert (
            cc_wallet.cc_info.my_genesis_checker
            == cc_wallet_2.cc_info.my_genesis_checker
        )

        await cc_wallet_2.generate_zero_val_coin()

        for i in range(1, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        unspent: List[WalletCoinRecord] = list(
            await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(
                cc_wallet_2.id()
            )
        )
        assert len(unspent) == 1
        assert unspent.pop().coin.amount == 0
Beispiel #11
0
    async def test_coin_backup(self, two_wallet_nodes):
        num_blocks = 5
        full_nodes, wallets = two_wallet_nodes
        full_node_1, server_1 = full_nodes[0]
        wallet_node, server_2 = wallets[0]
        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(
            PeerInfo("localhost", uint16(server_1._port)), None)
        for i in range(1, 4):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        funds = sum([
            calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
            for i in range(1, 4 - 2)
        ])

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(
            wallet_node.wallet_state_manager, wallet, uint64(100))

        for i in range(1, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)

        # Write backup to file
        filename = f"test-backup-{token_bytes(16).hex()}"
        file_path = Path(filename)
        await wallet_node.wallet_state_manager.create_wallet_backup(file_path)

        # Close wallet and restart
        db_path = wallet_node.wallet_state_manager.db_path
        wallet_node._close()
        await wallet_node._await_closed()

        db_path.unlink()

        started = await wallet_node._start()
        assert started is False

        await wallet_node._start(backup_file=file_path)
        await server_2.start_client(
            PeerInfo("localhost", uint16(server_1._port)), None)

        all_wallets = wallet_node.wallet_state_manager.wallets
        assert len(all_wallets) == 2

        cc_wallet_from_backup = wallet_node.wallet_state_manager.wallets[2]

        await time_out_assert(15, cc_wallet_from_backup.get_confirmed_balance,
                              100)
        if file_path.exists():
            file_path.unlink()
    async def test_generate_zero_val(self, two_wallet_nodes):
        num_blocks = 4
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet_node_2, server_3 = wallets[1]
        wallet = wallet_node.wallet_state_manager.main_wallet
        wallet2 = wallet_node_2.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
        await server_3.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
                for i in range(1, num_blocks - 1)
            ]
        )
        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))

        ph = await wallet2.get_new_puzzlehash()
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)

        assert cc_wallet.cc_info.my_genesis_checker is not None
        colour = cc_wallet.get_colour()

        cc_wallet_2: CCWallet = await CCWallet.create_wallet_for_cc(wallet_node_2.wallet_state_manager, wallet2, colour)

        assert cc_wallet.cc_info.my_genesis_checker == cc_wallet_2.cc_info.my_genesis_checker

        spend_bundle = await cc_wallet_2.generate_zero_val_coin()
        await time_out_assert(15, tx_in_pool, True, full_node_api.full_node.mempool_manager, spend_bundle.name())
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        async def unspent_count():
            unspent: List[WalletCoinRecord] = list(
                await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
            )
            return len(unspent)

        await time_out_assert(15, unspent_count, 1)
        unspent: List[WalletCoinRecord] = list(
            await cc_wallet_2.wallet_state_manager.get_spendable_coins_for_wallet(cc_wallet_2.id())
        )
        assert unspent.pop().coin.amount == 0
Beispiel #13
0
    async def test_simulation_1(self, simulation, extra_node):
        node1, node2, _, _, _, _, _, _, _, server1 = simulation
        await server1.start_client(PeerInfo(self_hostname, uint16(21238)))
        # Use node2 to test node communication, since only node1 extends the chain.
        await time_out_assert(1500, node_height_at_least, True, node2, 7)

        async def has_compact(node1, node2):
            peak_height_1 = node1.full_node.blockchain.get_peak_height()
            headers_1 = await node1.full_node.blockchain.get_header_blocks_in_range(
                0, peak_height_1)
            peak_height_2 = node2.full_node.blockchain.get_peak_height()
            headers_2 = await node2.full_node.blockchain.get_header_blocks_in_range(
                0, peak_height_2)
            # Commented to speed up.
            # cc_eos = [False, False]
            # icc_eos = [False, False]
            # cc_sp = [False, False]
            # cc_ip = [False, False]
            has_compact = [False, False]
            for index, headers in enumerate([headers_1, headers_2]):
                for header in headers.values():
                    for sub_slot in header.finished_sub_slots:
                        if sub_slot.proofs.challenge_chain_slot_proof.normalized_to_identity:
                            # cc_eos[index] = True
                            has_compact[index] = True
                        if (sub_slot.proofs.infused_challenge_chain_slot_proof
                                is not None and sub_slot.proofs.
                                infused_challenge_chain_slot_proof.
                                normalized_to_identity):
                            # icc_eos[index] = True
                            has_compact[index] = True
                    if (header.challenge_chain_sp_proof is not None and header.
                            challenge_chain_sp_proof.normalized_to_identity):
                        # cc_sp[index] = True
                        has_compact[index] = True
                    if header.challenge_chain_ip_proof.normalized_to_identity:
                        # cc_ip[index] = True
                        has_compact[index] = True

            # return (
            #     cc_eos == [True, True] and icc_eos == [True, True] and cc_sp == [True, True] and cc_ip == [True, True]
            # )
            return has_compact == [True, True]

        await time_out_assert(1500, has_compact, True, node1, node2)
        node3 = extra_node
        server3 = node3.full_node.server
        peak_height = max(node1.full_node.blockchain.get_peak_height(),
                          node2.full_node.blockchain.get_peak_height())
        await server3.start_client(PeerInfo(self_hostname, uint16(21237)))
        await server3.start_client(PeerInfo(self_hostname, uint16(21238)))
        await time_out_assert(600, node_height_at_least, True, node3,
                              peak_height)
 async def _address_relay(self):
     while not self.is_closed:
         try:
             relay_peer, num_peers = await self.relay_queue.get()
             relay_peer_info = PeerInfo(relay_peer.host, relay_peer.port)
             if not relay_peer_info.is_valid():
                 continue
             # https://en.bitcoin.it/wiki/Satoshi_Client_Node_Discovery#Address_Relay
             connections = self.global_connections.get_full_node_connections(
             )
             hashes = []
             cur_day = int(time.time()) // (24 * 60 * 60)
             for connection in connections:
                 peer_info = connection.get_peer_info()
                 cur_hash = int.from_bytes(
                     bytes(
                         std_hash(
                             self.key.to_bytes(32, byteorder="big") +
                             peer_info.get_key() +
                             cur_day.to_bytes(3, byteorder="big"))),
                     byteorder="big",
                 )
                 hashes.append((cur_hash, connection))
             hashes.sort(key=lambda x: x[0])
             for index, (_, connection) in enumerate(hashes):
                 if index >= num_peers:
                     break
                 peer_info = connection.get_peer_info()
                 pair = (peer_info.host, peer_info.port)
                 async with self.lock:
                     if (pair in self.neighbour_known_peers
                             and relay_peer.host
                             in self.neighbour_known_peers[pair]):
                         continue
                     if pair not in self.neighbour_known_peers:
                         self.neighbour_known_peers[pair] = set()
                     self.neighbour_known_peers[pair].add(relay_peer.host)
                 if connection.node_id is None:
                     continue
                 msg = OutboundMessage(
                     NodeType.FULL_NODE,
                     Message(
                         "respond_peers_full_node",
                         full_node_protocol.RespondPeers([relay_peer]),
                     ),
                     Delivery.SPECIFIC,
                     connection.node_id,
                 )
                 self.server.push_message(msg)
         except Exception as e:
             self.log.error(f"Exception in address relay: {e}")
             self.log.error(f"Traceback: {traceback.format_exc()}")
Beispiel #15
0
 async def test_cleanup(self):
     addrman = AddressManagerTest()
     peer1 = TimestampedPeerInfo("250.250.2.1", 8444, 100000)
     peer2 = TimestampedPeerInfo("250.250.2.2", 9999, time.time())
     source = PeerInfo("252.5.1.1", 8333)
     assert await addrman.add_to_new_table([peer1], source)
     assert await addrman.add_to_new_table([peer2], source)
     await addrman.mark_good(PeerInfo("250.250.2.2", 9999))
     assert await addrman.size() == 2
     for _ in range(5):
         await addrman.attempt(peer1, True, time.time() - 61)
     addrman.cleanup(7 * 3600 * 24, 5)
     assert await addrman.size() == 1
def service_kwargs_for_farmer(root_path):
    service_name = "farmer"
    config = load_config_cli(root_path, "config.yaml", service_name)
    keychain = Keychain()

    connect_peers = [
        PeerInfo(config["full_node_peer"]["host"],
                 config["full_node_peer"]["port"])
    ]

    # TOD: Remove once we have pool server
    config_pool = load_config_cli(root_path, "config.yaml", "pool")
    api = Farmer(config, config_pool, keychain, constants)

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.FARMER,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        connect_peers=connect_peers,
        auth_connect_peers=False,
        on_connect_callback=api._on_connect,
    )
    if config["start_rpc_server"]:
        kwargs["rpc_info"] = (FarmerRpcApi, config["rpc_port"])
    return kwargs
    async def test_blocks_load(self, two_nodes):
        num_blocks = 50
        full_node_1, full_node_2, server_1, server_2 = two_nodes
        blocks = bt.get_consecutive_blocks(num_blocks)
        peer = await connect_and_get_peer(server_1, server_2)
        await full_node_1.full_node.respond_block(
            full_node_protocol.RespondBlock(blocks[0]), peer)

        await server_2.start_client(
            PeerInfo(self_hostname, uint16(server_1._port)), None)

        async def num_connections():
            return len(server_2.get_connections())

        await time_out_assert(10, num_connections, 1)

        start_unf = time.time()
        for i in range(1, num_blocks):
            await full_node_1.full_node.respond_block(
                full_node_protocol.RespondBlock(blocks[i]))
            await full_node_2.full_node.respond_block(
                full_node_protocol.RespondBlock(blocks[i]))
        print(
            f"Time taken to process {num_blocks} is {time.time() - start_unf}")
        assert time.time() - start_unf < 100
    async def test_colour_creation(self, two_wallet_nodes):
        num_blocks = 5
        full_nodes, wallets = two_wallet_nodes
        full_node_1, server_1 = full_nodes[0]
        wallet_node, server_2 = wallets[0]
        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
        for i in range(1, 4):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
                for i in range(1, 4 - 2)
            ]
        )

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(
            wallet_node.wallet_state_manager, wallet, uint64(100)
        )

        for i in range(1, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
def service_kwargs_for_farmer(
    root_path: pathlib.Path,
    config: Dict,
    config_pool: Dict,
    keychain: Keychain,
    consensus_constants: ConsensusConstants,
) -> Dict:

    connect_peers = []
    fnp = config.get("full_node_peer")
    if fnp is not None:
        connect_peers.append(PeerInfo(fnp["host"], fnp["port"]))

    genesis_challenge = bytes32(bytes.fromhex(config["network_genesis_challenges"][config["selected_network"]]))

    farmer = Farmer(config, config_pool, keychain, consensus_constants.replace(GENESIS_CHALLENGE=genesis_challenge))
    peer_api = FarmerAPI(farmer)

    kwargs = dict(
        root_path=root_path,
        node=farmer,
        peer_api=peer_api,
        node_type=NodeType.FARMER,
        advertised_port=config["port"],
        service_name=SERVICE_NAME,
        server_listen_ports=[config["port"]],
        connect_peers=connect_peers,
        auth_connect_peers=False,
        on_connect_callback=farmer.on_connect,
        network_id=genesis_challenge,
    )
    if config["start_rpc_server"]:
        kwargs["rpc_info"] = (FarmerRpcApi, config["rpc_port"])
    return kwargs
Beispiel #20
0
    async def test_fast_sync_wallet(self, wallet_node_starting_height):
        num_blocks = 50
        blocks = bt.get_consecutive_blocks(test_constants, num_blocks, [])
        full_node_1, wallet_node, server_1, server_2 = wallet_node_starting_height

        for i in range(1, len(blocks)):
            async for _ in full_node_1.respond_block(
                    full_node_protocol.RespondBlock(blocks[i])):
                pass

        await server_2.start_client(
            PeerInfo("localhost", uint16(server_1._port)), None)

        start = time.time()
        found = False
        while time.time() - start < 60:
            if (wallet_node.wallet_state_manager.block_records[
                    wallet_node.wallet_state_manager.lca].height >=
                    num_blocks - 6):
                found = True
                break
            await asyncio.sleep(0.1)
        if not found:
            raise Exception(
                f"Took too long to process blocks, stopped at: {time.time() - start}"
            )
    async def test_wallet_coinbase_reorg(self, wallet_node):
        num_blocks = 10
        full_nodes, wallets = wallet_node
        full_node_1, server_1 = full_nodes[0]
        wallet_node, server_2 = wallets[0]
        wallet = wallet_node.wallet_state_manager.main_wallet
        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(server_1._port)), None)
        for i in range(1, num_blocks):
            await full_node_1.farm_new_block(FarmNewBlockProtocol(ph))

        await asyncio.sleep(3)
        funds = sum(
            [
                calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
                for i in range(1, num_blocks - 2)
            ]
        )
        assert await wallet.get_confirmed_balance() == funds

        await full_node_1.reorg_from_index_to_new_index(
            ReorgProtocol(uint32(5), uint32(num_blocks + 3), token_bytes())
        )
        await asyncio.sleep(3)

        funds = sum(
            [
                calculate_base_fee(uint32(i)) + calculate_block_reward(uint32(i))
                for i in range(1, 5)
            ]
        )

        assert await wallet.get_confirmed_balance() == funds
Beispiel #22
0
    def _num_needed_peers(self) -> int:
        assert self.server is not None
        diff = self.config["target_peer_count"] - len(
            self.global_connections.get_full_node_connections())
        if diff < 0:
            return 0

        if "full_node_peer" in self.config:
            full_node_peer = PeerInfo(
                self.config["full_node_peer"]["host"],
                self.config["full_node_peer"]["port"],
            )
            peers = [
                c.get_peer_info()
                for c in self.global_connections.get_full_node_connections()
            ]
            if full_node_peer in peers:
                self.log.info(
                    f"Will not attempt to connect to other nodes, already connected to {full_node_peer}"
                )
                for connection in self.global_connections.get_full_node_connections(
                ):
                    if connection.get_peer_info() != full_node_peer:
                        self.log.info(
                            f"Closing unnecessary connection to {connection.get_peer_info()}."
                        )
                        self.global_connections.close(connection)
                return 0
        return diff
Beispiel #23
0
def service_kwargs_for_harvester(root_path=DEFAULT_ROOT_PATH):
    service_name = "harvester"
    config = load_config_cli(root_path, "config.yaml", service_name)

    connect_peers = [
        PeerInfo(config["farmer_peer"]["host"], config["farmer_peer"]["port"])
    ]

    api = Harvester(root_path, constants)

    async def start_callback():
        await api._start()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.HARVESTER,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        connect_peers=connect_peers,
        auth_connect_peers=True,
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
    )
    if config["start_rpc_server"]:
        kwargs["rpc_info"] = (HarvesterRpcApi, config["rpc_port"])
    return kwargs
Beispiel #24
0
 def get_peer_info(self) -> Optional[PeerInfo]:
     result = self.ws._writer.transport.get_extra_info("peername")
     if result is None:
         return None
     connection_host = result[0]
     port = self.peer_server_port if self.peer_server_port is not None else self.peer_port
     return PeerInfo(connection_host, port)
 def __init__(
     self,
     server: ChiaServer,
     root_path: Path,
     target_outbound_count: int,
     peer_db_path: str,
     introducer_info: Optional[Dict],
     peer_connect_interval: int,
     log,
 ):
     self.server: ChiaServer = server
     self.message_queue: asyncio.Queue = asyncio.Queue()
     self.is_closed = False
     self.target_outbound_count = target_outbound_count
     self.peer_db_path = path_from_root(root_path, peer_db_path)
     if introducer_info is not None:
         self.introducer_info: Optional[PeerInfo] = PeerInfo(
             introducer_info["host"],
             introducer_info["port"],
         )
     else:
         self.introducer_info = None
     self.peer_connect_interval = peer_connect_interval
     self.log = log
     self.relay_queue = None
     self.address_manager = None
     self.connection_time_pretest: Dict = {}
    async def test_get_wallet_for_colour(self, two_wallet_nodes):
        num_blocks = 3
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
                for i in range(1, num_blocks - 1)
            ]
        )

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))

        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))

        colour = cc_wallet.get_colour()
        assert await wallet_node.wallet_state_manager.get_wallet_for_colour(colour) == cc_wallet
def service_kwargs_for_timelord(root_path):
    service_name = "timelord"
    config = load_config_cli(root_path, "config.yaml", service_name)

    connect_peers = [
        PeerInfo(config["full_node_peer"]["host"],
                 config["full_node_peer"]["port"])
    ]

    api = Timelord(config, constants["DISCRIMINANT_SIZE_BITS"])

    async def start_callback():
        await api._start()

    def stop_callback():
        api._close()

    async def await_closed_callback():
        await api._await_closed()

    kwargs = dict(
        root_path=root_path,
        api=api,
        node_type=NodeType.TIMELORD,
        advertised_port=config["port"],
        service_name=service_name,
        server_listen_ports=[config["port"]],
        start_callback=start_callback,
        stop_callback=stop_callback,
        await_closed_callback=await_closed_callback,
        connect_peers=connect_peers,
        auth_connect_peers=False,
    )
    return kwargs
    async def test_colour_creation(self, two_wallet_nodes):
        num_blocks = 3
        full_nodes, wallets = two_wallet_nodes
        full_node_api = full_nodes[0]
        full_node_server = full_node_api.server
        wallet_node, server_2 = wallets[0]
        wallet = wallet_node.wallet_state_manager.main_wallet

        ph = await wallet.get_new_puzzlehash()

        await server_2.start_client(PeerInfo("localhost", uint16(full_node_server._port)), None)
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(ph))

        funds = sum(
            [
                calculate_pool_reward(uint32(i)) + calculate_base_farmer_reward(uint32(i))
                for i in range(1, num_blocks - 1)
            ]
        )

        await time_out_assert(15, wallet.get_confirmed_balance, funds)

        cc_wallet: CCWallet = await CCWallet.create_new_cc(wallet_node.wallet_state_manager, wallet, uint64(100))
        tx_queue: List[TransactionRecord] = await wallet_node.wallet_state_manager.get_send_queue()
        tx_record = tx_queue[0]
        await time_out_assert(
            15, tx_in_pool, True, full_node_api.full_node.mempool_manager, tx_record.spend_bundle.name()
        )
        for i in range(1, num_blocks):
            await full_node_api.farm_new_transaction_block(FarmNewBlockProtocol(32 * b"0"))

        await time_out_assert(15, cc_wallet.get_confirmed_balance, 100)
        await time_out_assert(15, cc_wallet.get_unconfirmed_balance, 100)
Beispiel #29
0
    def _start_bg_tasks(self):
        """
        Start a background task that checks connection and reconnects periodically to the farmer.
        """

        farmer_peer = PeerInfo(
            self.config["farmer_peer"]["host"], self.config["farmer_peer"]["port"]
        )

        async def connection_check():
            while not self._is_shutdown:
                if self.server is not None:
                    farmer_retry = True

                    for connection in self.server.global_connections.get_connections():
                        if connection.get_peer_info() == farmer_peer:
                            farmer_retry = False

                    if farmer_retry:
                        log.info(f"Reconnecting to farmer {farmer_retry}")
                        if not await self.server.start_client(
                            farmer_peer, None, auth=True
                        ):
                            await asyncio.sleep(1)
                await asyncio.sleep(30)

        self.reconnect_task = asyncio.create_task(connection_check())
Beispiel #30
0
    async def test_blocks_load(self, two_nodes):
        num_blocks = 100
        full_node_1, full_node_2, server_1, server_2 = two_nodes
        blocks = bt.get_consecutive_blocks(test_constants, num_blocks, [], 10)

        await server_2.start_client(
            PeerInfo(server_1._host, uint16(server_1._port)), None
        )

        await asyncio.sleep(2)  # Allow connections to get made

        start_unf = time.time()
        for i in range(1, num_blocks):
            msg = Message("block", peer_protocol.Block(blocks[i]))
            server_1.push_message(
                OutboundMessage(NodeType.FULL_NODE, msg, Delivery.BROADCAST)
            )

        while time.time() - start_unf < 100:
            if (
                max([h.height for h in full_node_2.blockchain.get_current_tips()])
                == num_blocks - 1
            ):
                print(
                    f"Time taken to process {num_blocks} is {time.time() - start_unf}"
                )
                return
            await asyncio.sleep(0.1)

        raise Exception("Took too long to process blocks")