async def deserialize(self) -> AddressManager:
        address_manager = AddressManager()
        metadata = await self.get_metadata()
        nodes = await self.get_nodes()
        new_table_entries = await self.get_new_table()
        address_manager.clear()

        address_manager.key = int(metadata["key"])
        address_manager.new_count = int(metadata["new_count"])
        # address_manager.tried_count = int(metadata["tried_count"])
        address_manager.tried_count = 0

        new_table_nodes = [(node_id, info) for node_id, info in nodes
                           if node_id < address_manager.new_count]
        for n, info in new_table_nodes:
            address_manager.map_addr[info.peer_info.host] = n
            address_manager.map_info[n] = info
            info.random_pos = len(address_manager.random_pos)
            address_manager.random_pos.append(n)
        address_manager.id_count = len(new_table_nodes)
        tried_table_nodes = [(node_id, info) for node_id, info in nodes
                             if node_id >= address_manager.new_count]
        # lost_count = 0
        for node_id, info in tried_table_nodes:
            tried_bucket = info.get_tried_bucket(address_manager.key)
            tried_bucket_pos = info.get_bucket_position(
                address_manager.key, False, tried_bucket)
            if address_manager.tried_matrix[tried_bucket][
                    tried_bucket_pos] == -1:
                info.random_pos = len(address_manager.random_pos)
                info.is_tried = True
                id_count = address_manager.id_count
                address_manager.random_pos.append(id_count)
                address_manager.map_info[id_count] = info
                address_manager.map_addr[info.peer_info.host] = id_count
                address_manager.tried_matrix[tried_bucket][
                    tried_bucket_pos] = id_count
                address_manager.id_count += 1
                address_manager.tried_count += 1
            # else:
            #    lost_count += 1

        # address_manager.tried_count -= lost_count
        for node_id, bucket in new_table_entries:
            if node_id >= 0 and node_id < address_manager.new_count:
                info = address_manager.map_info[node_id]
                bucket_pos = info.get_bucket_position(address_manager.key,
                                                      True, bucket)
                if address_manager.new_matrix[bucket][
                        bucket_pos] == -1 and info.ref_count < NEW_BUCKETS_PER_ADDRESS:
                    info.ref_count += 1
                    address_manager.new_matrix[bucket][bucket_pos] = node_id

        for node_id, info in list(address_manager.map_info.items()):
            if not info.is_tried and info.ref_count == 0:
                address_manager.delete_new_entry_(node_id)
        address_manager.load_used_table_positions()
        return address_manager
    async def _deserialize(cls, peers_file_path: Path) -> AddressManager:
        """
        Create an address manager using data deserialized from a peers file.
        """
        peer_data: Optional[PeerDataSerialization] = None
        address_manager = AddressManager()
        start_time = timer()
        try:
            peer_data = await cls._read_peers(peers_file_path)
        except Exception:
            log.exception(
                f"Unable to deserialize peers from {peers_file_path}")

        if peer_data is not None:
            metadata: Dict[str, str] = {
                key: value
                for key, value in peer_data.metadata
            }
            nodes: List[Tuple[int, ExtendedPeerInfo]] = [
                (node_id, ExtendedPeerInfo.from_string(info_str))
                for node_id, info_str in peer_data.nodes
            ]
            new_table_entries: List[Tuple[int, int]] = [
                (node_id, bucket) for node_id, bucket in peer_data.new_table
            ]
            log.debug(
                f"Deserializing peer data took {timer() - start_time} seconds")

            address_manager.key = int(metadata["key"])
            address_manager.new_count = int(metadata["new_count"])
            # address_manager.tried_count = int(metadata["tried_count"])
            address_manager.tried_count = 0

            new_table_nodes = [(node_id, info) for node_id, info in nodes
                               if node_id < address_manager.new_count]
            for n, info in new_table_nodes:
                address_manager.map_addr[info.peer_info.host] = n
                address_manager.map_info[n] = info
                info.random_pos = len(address_manager.random_pos)
                address_manager.random_pos.append(n)
            address_manager.id_count = len(new_table_nodes)
            tried_table_nodes = [(node_id, info) for node_id, info in nodes
                                 if node_id >= address_manager.new_count]
            # lost_count = 0
            for node_id, info in tried_table_nodes:
                tried_bucket = info.get_tried_bucket(address_manager.key)
                tried_bucket_pos = info.get_bucket_position(
                    address_manager.key, False, tried_bucket)
                if address_manager.tried_matrix[tried_bucket][
                        tried_bucket_pos] == -1:
                    info.random_pos = len(address_manager.random_pos)
                    info.is_tried = True
                    id_count = address_manager.id_count
                    address_manager.random_pos.append(id_count)
                    address_manager.map_info[id_count] = info
                    address_manager.map_addr[info.peer_info.host] = id_count
                    address_manager.tried_matrix[tried_bucket][
                        tried_bucket_pos] = id_count
                    address_manager.id_count += 1
                    address_manager.tried_count += 1
                # else:
                #    lost_count += 1

            # address_manager.tried_count -= lost_count
            for node_id, bucket in new_table_entries:
                if node_id >= 0 and node_id < address_manager.new_count:
                    info = address_manager.map_info[node_id]
                    bucket_pos = info.get_bucket_position(
                        address_manager.key, True, bucket)
                    if (address_manager.new_matrix[bucket][bucket_pos] == -1
                            and info.ref_count < NEW_BUCKETS_PER_ADDRESS):
                        info.ref_count += 1
                        address_manager.new_matrix[bucket][
                            bucket_pos] = node_id

            for node_id, info in list(address_manager.map_info.items()):
                if not info.is_tried and info.ref_count == 0:
                    address_manager.delete_new_entry_(node_id)

            address_manager.load_used_table_positions()

        return address_manager