Ejemplo n.º 1
0
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 rqc_settings: RemoteQueryCommunitySettings = None,
                 metadata_store=None,
                 **kwargs):
        super().__init__(my_peer, endpoint, network=network, **kwargs)

        self.rqc_settings = rqc_settings
        self.mds: MetadataStore = metadata_store

        # This object stores requests for "select" queries that we sent to other hosts.
        # We keep track of peers we actually requested for data so people can't randomly push spam at us.
        # Also, this keeps track of hosts we responded to. There is a possibility that
        # those hosts will push back updates at us, so we need to allow it.
        self.request_cache = RequestCache()

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(RemoteSelectPayloadEva,
                                 self.on_remote_select_eva)
        self.add_message_handler(SelectResponsePayload,
                                 self.on_remote_select_response)

        self.eva_init()
        self.eva_register_receive_callback(self.on_receive)
        self.eva_register_send_complete_callback(self.on_send_complete)
        self.eva_register_error_callback(self.on_error)
Ejemplo n.º 2
0
    def __init__(self, *args, **kwargs) -> None:
        """
        Initialize the community.
        :param persistence: The database that stores transactions, will be created if not provided.
        :param database_path: The path at which the database will be created. Defaults to the current working directory.
        """
        self.settings = kwargs.pop('settings', BandwidthAccountingSettings())
        self.database = kwargs.pop('database', None)
        self.database_path = Path(kwargs.pop('database_path', ''))
        self.random = Random()

        super().__init__(*args, **kwargs)

        self.request_cache = RequestCache()
        self.my_pk = self.my_peer.public_key.key_to_bin()

        if not self.database:
            self.database = BandwidthDatabase(self.database_path, self.my_pk)

        self.add_message_handler(BandwidthTransactionPayload,
                                 self.received_transaction)
        self.add_message_handler(BandwidthTransactionQueryPayload,
                                 self.received_query)

        self.register_task("query_peers",
                           self.query_random_peer,
                           interval=self.settings.outgoing_query_interval)

        self.logger.info(
            "Started bandwidth accounting community with public key %s",
            hexlify(self.my_pk))
Ejemplo n.º 3
0
 def __init__(self, endpoint):
     my_peer = Peer(default_eccrypto.generate_key(u"very-low"))
     self.signature_length = default_eccrypto.get_signature_length(
         my_peer.public_key)
     super().__init__(my_peer, endpoint, Network())
     self.request_cache = RequestCache()
     self.endpoint.add_listener(
         self
     )  # Listen to all incoming packets (not just the fake community_id).
     self.churn_strategy = TrackerChurn(self)
     self.churn_task = self.register_task("churn",
                                          self.churn_strategy.take_step,
                                          interval=10)
Ejemplo n.º 4
0
    def __init__(self, *args, **kwargs):
        if kwargs.get("work_dir"):
            self.work_dir = kwargs.pop("work_dir")
        super().__init__(*args, **kwargs)
        self._req = RequestCache()

        for base in self.__class__.__bases__:
            if issubclass(base, MessageStateMachine):
                base.setup_messages(self)
Ejemplo n.º 5
0
    def __init__(self, *args, **kwargs):
        working_directory = kwargs.pop("working_directory", "")
        self.persistence = kwargs.pop("persistence", None)
        db_name = kwargs.pop("db_name", self.DB_NAME)
        self.settings = kwargs.pop("settings", TrustChainSettings())
        self.receive_block_lock = RLock()

        super(TrustChainCommunity, self).__init__(*args, **kwargs)
        self.request_cache = RequestCache()
        self.logger = logging.getLogger(self.__class__.__name__)

        if not self.persistence:
            db_path = (
                os.path.join(working_directory, db_name)
                if working_directory != ":memory:"
                else working_directory
            )
            self.persistence = self.DB_CLASS(
                db_path, self.my_peer.public_key.key_to_bin()
            )
        self.relayed_broadcasts = set()
        self.relayed_broadcasts_order = deque()
        self.logger.debug(
            "The trustchain community started with Public Key: %s",
            hexlify(self.my_peer.public_key.key_to_bin()),
        )
        self.shutting_down = False
        self.listeners_map = {}  # Map of block_type -> [callbacks]
        self.register_task("db_cleanup", self.do_db_cleanup, interval=600)

        self.add_message_handler(HalfBlockPayload, self.received_half_block)
        self.add_message_handler(CrawlRequestPayload, self.received_crawl_request)
        self.add_message_handler(CrawlResponsePayload, self.received_crawl_response)
        self.add_message_handler(HalfBlockPairPayload, self.received_half_block_pair)
        self.add_message_handler(
            HalfBlockBroadcastPayload, self.received_half_block_broadcast
        )
        self.add_message_handler(
            HalfBlockPairBroadcastPayload, self.received_half_block_pair_broadcast
        )
        self.add_message_handler(
            EmptyCrawlResponsePayload, self.received_empty_crawl_response
        )
Ejemplo n.º 6
0
class MyCommunity(Community):
    community_id = community_id

    def __init__(self, *args, **kwargs):
        super().__init__(*args, **kwargs)
        self.request_cache = RequestCache()

    def send_intro_request(self, target_addr):
        packet = self.create_introduction_request(target_addr, new_style=self.network.is_new_style(target_addr))
        cache = NumberCache(self.request_cache, 'intro-req', self.global_time)
        cache.future = Future()
        self.request_cache.add(cache)
        self.endpoint.send(target_addr, packet)
        return cache.future

    def introduction_response_callback(self, peer, dist, payload):
        if self.request_cache.has('intro-req', payload.identifier):
            cache = self.request_cache.pop('intro-req', payload.identifier)
            if not cache.future.done():
                cache.future.set_result(payload)
Ejemplo n.º 7
0
    def __init__(self, my_peer, endpoint, network, metadata_store, settings=None, notifier=None):
        super(RemoteQueryCommunity, self).__init__(my_peer, endpoint, network)

        self.notifier = notifier
        self.max_peers = 60

        self.settings = settings or RemoteQueryCommunitySettings()

        self.mds = metadata_store

        # This set contains all the peers that we queried for subscribed channels over time.
        # It is emptied regularly. The purpose of this set is to work as a filter so we never query the same
        # peer twice. If we do, this should happen realy rarely
        # TODO: use Bloom filter here instead. We actually *want* it to be all-false-positives eventually.
        self.queried_subscribed_channels_peers = set()
        self.queried_peers_limit = 1000

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(SelectResponsePayload, self.on_remote_select_response)

        self.request_cache = RequestCache()
Ejemplo n.º 8
0
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 metadata_store,
                 notifier=None):
        super(GigaChannelCommunity, self).__init__(my_peer, endpoint, network)
        self.metadata_store = metadata_store
        self.add_message_handler(self.NEWS_PUSH_MESSAGE, self.on_blob)
        self.add_message_handler(self.SEARCH_REQUEST, self.on_search_request)
        self.add_message_handler(self.SEARCH_RESPONSE, self.on_search_response)
        self.request_cache = RequestCache()
        self.notifier = notifier

        self.gossip_blob = None
        self.gossip_blob_personal_channel = None

        # We regularly regenerate the gossip blobs to account for changes in the local DB
        self.register_task("Renew channel gossip cache",
                           self.prepare_gossip_blob_cache,
                           interval=600)
Ejemplo n.º 9
0
class RemoteQueryCommunity(Community):
    """
    Community for general purpose SELECT-like queries into remote Channels database
    """
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 metadata_store,
                 settings=None,
                 **kwargs):
        super().__init__(my_peer, endpoint, network=network, **kwargs)

        self.settings = settings or RemoteQueryCommunitySettings()
        self.mds = metadata_store

        # This object stores requests for "select" queries that we sent to other hosts.
        # We keep track of peers we actually requested for data so people can't randomly push spam at us.
        # Also, this keeps track of hosts we responded to. There is a possibility that
        # those hosts will push back updates at us, so we need to allow it.
        self.request_cache = RequestCache()

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(SelectResponsePayload,
                                 self.on_remote_select_response)

    def send_remote_select(self, peer, processing_callback=None, **kwargs):

        request = SelectRequest(self.request_cache, hexlify(peer.mid), kwargs,
                                processing_callback)
        self.request_cache.add(request)

        self.logger.info(f"Select to {hexlify(peer.mid)} with ({kwargs})")
        self.ez_send(
            peer,
            RemoteSelectPayload(request.number,
                                json.dumps(kwargs).encode('utf8')))

    async def process_rpc_query(self, json_bytes: bytes):
        """
        Retrieve the result of a database query from a third party, encoded as raw JSON bytes (through `dumps`).
        :raises TypeError: if the JSON contains invalid keys.
        :raises ValueError: if no JSON could be decoded.
        :raises pony.orm.dbapiprovider.OperationalError: if an illegal query was performed.
        """
        request_sanitized = sanitize_query(json.loads(json_bytes),
                                           self.settings.max_response_size)
        return await self.mds.MetadataNode.get_entries_threaded(
            **request_sanitized)

    def send_db_results(self, peer, request_payload_id, db_results):
        index = 0
        while index < len(db_results):
            data, index = entries_to_chunk(db_results,
                                           self.settings.maximum_payload_size,
                                           start_index=index)
            self.ez_send(peer, SelectResponsePayload(request_payload_id, data))

    @lazy_wrapper(RemoteSelectPayload)
    async def on_remote_select(self, peer, request_payload):
        try:
            db_results = await self.process_rpc_query(request_payload.json)

            # When we send our response to a host, we open a window of opportunity
            # for it to push back updates
            if db_results and not self.request_cache.has(
                    hexlify(peer.mid), request_payload.id):
                self.request_cache.add(
                    PushbackWindow(self.request_cache, hexlify(peer.mid),
                                   request_payload.id))

            self.send_db_results(peer, request_payload.id, db_results)
        except (OperationalError, TypeError, ValueError) as error:
            self.logger.error(f"Remote select. The error occurred: {error}")

    @lazy_wrapper(SelectResponsePayload)
    async def on_remote_select_response(self, peer, response_payload):
        """
        Match the the response that we received from the network to a query cache
        and process it by adding the corresponding entries to the MetadataStore database.
        This processes both direct responses and pushback (updates) responses
        """
        self.logger.info(f"Response from {hexlify(peer.mid)}")

        # ACHTUNG! the returned request cache can be either a SelectRequest or PushbackWindow
        request = self.request_cache.get(hexlify(peer.mid),
                                         response_payload.id)
        if request is None:
            return

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response_payload.id)

        processing_results = await self.mds.process_compressed_mdblob_threaded(
            response_payload.raw_blob)
        self.logger.info(f"Response result: {processing_results}")

        # If we now about updated versions of the received stuff, push the updates back
        if isinstance(
                request,
                SelectRequest) and self.settings.push_updates_back_enabled:
            newer_entities = [
                md for md, result in processing_results
                if result == GOT_NEWER_VERSION
            ]
            self.send_db_results(peer, response_payload.id, newer_entities)

        # Query back the sender for preview contents for the new channels
        # TODO: maybe transform this into a processing_callback?
        if self.settings.channel_query_back_enabled:
            new_channels = [
                md for md, result in processing_results
                if result in (UNKNOWN_CHANNEL, UNKNOWN_COLLECTION)
            ]
            for channel in new_channels:
                request_dict = {
                    "channel_pk": hexlify(channel.public_key),
                    "origin_id": channel.id_,
                    "first": 0,
                    "last": self.settings.max_channel_query_back,
                }
                self.send_remote_select(peer=peer, **request_dict)

        if isinstance(request, SelectRequest) and request.processing_callback:
            request.processing_callback(request, processing_results)

    async def unload(self):
        await self.request_cache.shutdown()
        await super().unload()
Ejemplo n.º 10
0
class GigaChannelCommunity(Community):
    """
    Community to gossip around gigachannels.
    """

    master_peer = Peer(
        unhexlify(
            "4c69624e61434c504b3a75d4e05e6e1eff04861806a3ca7af9e5dbb43ef212eb60e319449fc7b9f7d20"
            "f67503f32c822d4333694bf9862756c5c04552585f820af2ce2e153389fa35fee"
        ))

    NEWS_PUSH_MESSAGE = 1
    SEARCH_REQUEST = 2
    SEARCH_RESPONSE = 3

    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 metadata_store,
                 notifier=None):
        super(GigaChannelCommunity, self).__init__(my_peer, endpoint, network)
        self.metadata_store = metadata_store
        self.add_message_handler(self.NEWS_PUSH_MESSAGE, self.on_blob)
        self.add_message_handler(self.SEARCH_REQUEST, self.on_search_request)
        self.add_message_handler(self.SEARCH_RESPONSE, self.on_search_response)
        self.request_cache = RequestCache()
        self.notifier = notifier

        self.gossip_blob = None
        self.gossip_blob_personal_channel = None

        # We regularly regenerate the gossip blobs to account for changes in the local DB
        self.register_task("Renew channel gossip cache",
                           self.prepare_gossip_blob_cache,
                           interval=600)

    async def unload(self):
        await self.request_cache.shutdown()
        await super(GigaChannelCommunity, self).unload()

    def _prepare_gossip_blob_cache(self):
        # Choose some random entries and try to pack them into maximum_payload_size bytes
        with db_session:
            # Generate and cache the gossip blob for the personal channel
            personal_channels = list(
                self.metadata_store.ChannelMetadata.get_my_channels().where(
                    lambda g: g.num_entries > 0).random(1))
            personal_channel = personal_channels[
                0] if personal_channels else None
            md_list = (
                [personal_channel] +
                list(personal_channel.get_random_contents(max_entries - 1))
                if personal_channel else None)
            self.gossip_blob_personal_channel = (
                entries_to_chunk(md_list, maximum_payload_size)[0]
                if md_list and len(md_list) > 1 else None)

            # Generate and cache the gossip blob for a subscribed channel
            # TODO: when the health table will be there, send popular torrents instead
            channel_l = list(
                self.metadata_store.ChannelMetadata.get_random_channels(
                    1, only_subscribed=True, only_downloaded=True))
            md_list = channel_l + list(channel_l[0].get_random_contents(
                max_entries - 1)) if channel_l else None
            self.gossip_blob = entries_to_chunk(
                md_list, maximum_payload_size)[0] if md_list else None
        self.metadata_store.disconnect_thread()

    async def prepare_gossip_blob_cache(self):
        await get_event_loop().run_in_executor(None,
                                               self._prepare_gossip_blob_cache)

    def send_random_to(self, peer):
        """
        Send random entries from our subscribed channels to another peer.

        To speed-up propagation of original content, we send two distinct packets on each walk step:
        the first packet contains user's personal channel, the second one contains a random subscribed channel.

        :param peer: the peer to send to
        :type peer: Peer
        :returns: None
        """

        # Send personal channel
        if self.gossip_blob_personal_channel:
            self.endpoint.send(
                peer.address,
                self.ezr_pack(
                    self.NEWS_PUSH_MESSAGE,
                    RawBlobPayload(self.gossip_blob_personal_channel)))

        # Send subscribed channel
        if self.gossip_blob:
            self.endpoint.send(
                peer.address,
                self.ezr_pack(self.NEWS_PUSH_MESSAGE,
                              RawBlobPayload(self.gossip_blob)))

    def _update_db_with_blob(self, raw_blob):
        result = None
        try:
            with db_session:
                try:
                    result = self.metadata_store.process_compressed_mdblob(
                        raw_blob)
                except (TransactionIntegrityError, CacheIndexError) as err:
                    self._logger.error(
                        "DB transaction error when tried to process payload: %s",
                        str(err))
        # Unfortunately, we have to catch the exception twice, because Pony can raise them both on the exit from
        # db_session, and on calling the line of code
        except (TransactionIntegrityError, CacheIndexError) as err:
            self._logger.error(
                "DB transaction error when tried to process payload: %s",
                str(err))
        finally:
            self.metadata_store.disconnect_thread()
        return result

    @lazy_wrapper(RawBlobPayload)
    async def on_blob(self, peer, blob):
        """
        Callback for when a MetadataBlob message comes in.

        :param peer: the peer that sent us the blob
        :param blob: payload raw data
        """
        def _process_received_blob():
            md_results = self._update_db_with_blob(blob.raw_blob)
            if not md_results:
                self.metadata_store.disconnect_thread()
                return None, None
            # Update votes counters
            with db_session:
                # This check ensures, in a bit hackish way, that we do not bump responses
                # sent by respond_with_updated_metadata
                if len(md_results) > 1:
                    for c in [
                            md for md, _ in md_results
                            if md and (md.metadata_type == CHANNEL_TORRENT)
                    ]:
                        self.metadata_store.vote_bump(
                            c.public_key, c.id_,
                            peer.public_key.key_to_bin()[10:])
                        # We only want to bump the leading channel entry in the payload, since the rest is content
                        break

            with db_session:
                # Get the list of new channels for notifying the GUI
                new_channels = [
                    md.to_simple_dict() for md, result in md_results
                    if md and md.metadata_type == CHANNEL_TORRENT
                    and result == UNKNOWN_CHANNEL and md.origin_id == 0
                    and md.num_entries > 0
                ]
            result = gen_have_newer_results_blob(md_results), new_channels
            self.metadata_store.disconnect_thread()
            return result

        reply_blob, new_channels = await get_event_loop().run_in_executor(
            None, _process_received_blob)

        # Notify the discovered torrents and channels to the GUI
        if self.notifier and new_channels:
            self.notifier.notify(NTFY.CHANNEL_DISCOVERED, {
                "results": new_channels,
                "uuid": str(CHANNELS_VIEW_UUID)
            })

        # Check if the guy who send us this metadata actually has an older version of this md than
        # we do, and queue to send it back.
        self.respond_with_updated_metadata(peer, reply_blob)

    def respond_with_updated_metadata(self, peer, reply_blob):
        if reply_blob:
            self.endpoint.send(
                peer.address,
                self.ezr_pack(self.NEWS_PUSH_MESSAGE,
                              RawBlobPayload(reply_blob)))

    def send_search_request(self,
                            txt_filter,
                            metadata_type=None,
                            sort_by=None,
                            sort_asc=0,
                            hide_xxx=True,
                            uuid=None):
        """
        Sends request to max_search_peers from peer list. The request is cached in request cached. The past cache is
        cleared before adding a new search request to prevent incorrect results being pushed to the GUI.
        Returns: request cache number which uniquely identifies each search request
        """
        sort_by = sort_by or "HEALTH"
        peers = self.get_peers()
        search_candidates = sample(
            peers,
            max_search_peers) if len(peers) > max_search_peers else peers
        search_request_cache = SearchRequestCache(self.request_cache, uuid,
                                                  search_candidates)
        self.request_cache.clear()
        self.request_cache.add(search_request_cache)

        search_request_payload = SearchRequestPayload(
            search_request_cache.number,
            txt_filter.encode('utf8'),
            metadata_type_to_v1_field.get(
                metadata_type, "").encode('utf8'),  # Compatibility with v1.0
            sort_by.encode('utf8'),
            sort_asc,
            hide_xxx,
        )
        self._logger.info("Started remote search for query:%s", txt_filter)

        for peer in search_candidates:
            self.endpoint.send(
                peer.address,
                self.ezr_pack(self.SEARCH_REQUEST, search_request_payload))
        return search_request_cache.number

    @lazy_wrapper(SearchRequestPayload)
    async def on_search_request(self, peer, request):
        # Caution: beware of potential SQL injection!
        # Since this string 'txt_filter' is passed as it is to fetch the results, there could be a chance for
        # SQL injection. But since we use Pony which is supposed to be doing proper variable bindings, it should
        # be relatively safe.
        txt_filter = request.txt_filter.decode('utf8')

        # Check if the txt_filter is a simple query
        if not is_simple_match_query(txt_filter):
            self.logger.error("Dropping a complex remote search query:%s",
                              txt_filter)
            return

        metadata_type = v1_md_field_to_metadata_type.get(
            request.metadata_type.decode('utf8'),
            frozenset((REGULAR_TORRENT, CHANNEL_TORRENT)))
        # If we get a hex-encoded public key in the txt_filter field, we drop the filter,
        # and instead query by public_key. However, we only do this if there is no channel_pk or
        # origin_id attributes set, because it is only for support of GigaChannel v1.0 channel preview requests.
        channel_pk = None
        normal_filter = txt_filter.replace('"', '').replace("*", "")
        if (metadata_type == frozenset((REGULAR_TORRENT, COLLECTION_NODE))
                and is_hex_string(normal_filter)
                and len(normal_filter) % 2 == 0
                and is_channel_public_key(normal_filter)):
            channel_pk = database_blob(unhexlify(normal_filter))
            txt_filter = None

        request_dict = {
            "first": 1,
            "last": max_entries,
            "sort_by": request.sort_by.decode('utf8'),
            "sort_desc":
            not request.sort_asc if request.sort_asc is not None else None,
            "txt_filter": txt_filter,
            "hide_xxx": request.hide_xxx,
            "metadata_type": metadata_type,
            "exclude_legacy": True,
            "channel_pk": channel_pk,
        }

        def _get_search_results():
            with db_session:
                db_results = self.metadata_store.MetadataNode.get_entries(
                    **request_dict)
                result = entries_to_chunk(
                    db_results[:max_entries],
                    maximum_payload_size)[0] if db_results else None
            self.metadata_store.disconnect_thread()
            return result

        result_blob = await get_event_loop().run_in_executor(
            None, _get_search_results)

        if result_blob:
            self.endpoint.send(
                peer.address,
                self.ezr_pack(self.SEARCH_RESPONSE,
                              SearchResponsePayload(request.id, result_blob)))

    @lazy_wrapper(SearchResponsePayload)
    async def on_search_response(self, peer, response):
        search_request_cache = self.request_cache.get(u"remote-search-request",
                                                      response.id)
        if not search_request_cache or not search_request_cache.process_peer_response(
                peer):
            return

        def _process_received_blob():
            md_results = self._update_db_with_blob(response.raw_blob)
            if not md_results:
                self.metadata_store.disconnect_thread()
                return None, None

            with db_session:
                result = (
                    [
                        md.to_simple_dict() for (md, action) in md_results
                        if (md and (
                            md.metadata_type in
                            [CHANNEL_TORRENT, REGULAR_TORRENT]) and action in [
                                UNKNOWN_CHANNEL, UNKNOWN_TORRENT,
                                UPDATED_OUR_VERSION, UNKNOWN_COLLECTION
                            ])
                    ],
                    gen_have_newer_results_blob(md_results),
                )
            self.metadata_store.disconnect_thread()
            return result

        search_results, reply_blob = await get_event_loop().run_in_executor(
            None, _process_received_blob)

        if self.notifier and search_results:
            self.notifier.notify(NTFY.REMOTE_QUERY_RESULTS, {
                "uuid": search_request_cache.uuid,
                "results": search_results
            })

        # Send the updated metadata if any to the responding peer
        self.respond_with_updated_metadata(peer, reply_blob)
Ejemplo n.º 11
0
class RemoteQueryCommunity(TriblerCommunity, EVAProtocolMixin):
    """
    Community for general purpose SELECT-like queries into remote Channels database
    """
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 rqc_settings: RemoteQueryCommunitySettings = None,
                 metadata_store=None,
                 **kwargs):
        super().__init__(my_peer, endpoint, network=network, **kwargs)

        self.rqc_settings = rqc_settings
        self.mds: MetadataStore = metadata_store

        # This object stores requests for "select" queries that we sent to other hosts.
        # We keep track of peers we actually requested for data so people can't randomly push spam at us.
        # Also, this keeps track of hosts we responded to. There is a possibility that
        # those hosts will push back updates at us, so we need to allow it.
        self.request_cache = RequestCache()

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(RemoteSelectPayloadEva,
                                 self.on_remote_select_eva)
        self.add_message_handler(SelectResponsePayload,
                                 self.on_remote_select_response)

        self.eva_init()
        self.eva_register_receive_callback(self.on_receive)
        self.eva_register_send_complete_callback(self.on_send_complete)
        self.eva_register_error_callback(self.on_error)

    def on_receive(self, peer, binary_info, binary_data, nonce):
        self.logger.debug(
            f"EVA data received: peer {hexlify(peer.mid)}, info {binary_info}")
        packet = (peer.address, binary_data)
        self.on_packet(packet)

    def on_send_complete(self, peer, binary_info, binary_data, nonce):
        self.logger.debug(
            f"EVA outgoing transfer complete: peer {hexlify(peer.mid)},  info {binary_info}"
        )

    def on_error(self, peer, exception):
        self.logger.warning(
            f"EVA transfer error: peer {hexlify(peer.mid)}, exception: {exception}"
        )

    def send_remote_select(self,
                           peer,
                           processing_callback=None,
                           force_eva_response=False,
                           **kwargs):
        request_class = EvaSelectRequest if force_eva_response else SelectRequest
        request = request_class(
            self.request_cache,
            hexlify(peer.mid),
            kwargs,
            peer,
            processing_callback=processing_callback,
            timeout_callback=self._on_query_timeout,
        )
        self.request_cache.add(request)

        self.logger.debug(f"Select to {hexlify(peer.mid)} with ({kwargs})")
        args = (request.number, convert_to_json(kwargs).encode('utf8'))
        if force_eva_response:
            self.ez_send(peer, RemoteSelectPayloadEva(*args))
        else:
            self.ez_send(peer, RemoteSelectPayload(*args))
        return request

    async def process_rpc_query(self, json_bytes: bytes):
        """
        Retrieve the result of a database query from a third party, encoded as raw JSON bytes (through `dumps`).
        :raises TypeError: if the JSON contains invalid keys.
        :raises ValueError: if no JSON could be decoded.
        :raises pony.orm.dbapiprovider.OperationalError: if an illegal query was performed.
        """
        request_sanitized = sanitize_query(json.loads(json_bytes),
                                           self.rqc_settings.max_response_size)
        return await self.mds.get_entries_threaded(**request_sanitized)

    def send_db_results(self,
                        peer,
                        request_payload_id,
                        db_results,
                        force_eva_response=False):

        # Special case of empty results list - sending empty lz4 archive
        if len(db_results) == 0:
            self.ez_send(
                peer,
                SelectResponsePayload(request_payload_id, LZ4_EMPTY_ARCHIVE))
            return

        index = 0
        while index < len(db_results):
            transfer_size = (self.eva_protocol.binary_size_limit
                             if force_eva_response else
                             self.rqc_settings.maximum_payload_size)
            data, index = entries_to_chunk(db_results,
                                           transfer_size,
                                           start_index=index,
                                           include_health=True)
            payload = SelectResponsePayload(request_payload_id, data)
            if force_eva_response or (len(data) >
                                      self.rqc_settings.maximum_payload_size):
                self.eva_send_binary(peer, struct.pack('>i',
                                                       request_payload_id),
                                     self.ezr_pack(payload.msg_id, payload))
            else:
                self.ez_send(peer, payload)

    @lazy_wrapper(RemoteSelectPayloadEva)
    async def on_remote_select_eva(self, peer, request_payload):
        await self._on_remote_select_basic(peer,
                                           request_payload,
                                           force_eva_response=True)

    @lazy_wrapper(RemoteSelectPayload)
    async def on_remote_select(self, peer, request_payload):
        await self._on_remote_select_basic(peer, request_payload)

    async def _on_remote_select_basic(self,
                                      peer,
                                      request_payload,
                                      force_eva_response=False):
        try:
            db_results = await self.process_rpc_query(request_payload.json)

            # When we send our response to a host, we open a window of opportunity
            # for it to push back updates
            if db_results and not self.request_cache.has(
                    hexlify(peer.mid), request_payload.id):
                self.request_cache.add(
                    PushbackWindow(self.request_cache, hexlify(peer.mid),
                                   request_payload.id))

            self.send_db_results(peer, request_payload.id, db_results,
                                 force_eva_response)
        except (OperationalError, TypeError, ValueError) as error:
            self.logger.error(f"Remote select. The error occurred: {error}")

    @lazy_wrapper(SelectResponsePayload)
    async def on_remote_select_response(self, peer, response_payload):
        """
        Match the the response that we received from the network to a query cache
        and process it by adding the corresponding entries to the MetadataStore database.
        This processes both direct responses and pushback (updates) responses
        """
        self.logger.debug(f"Response from {hexlify(peer.mid)}")

        # ACHTUNG! the returned request cache can be any one of SelectRequest, PushbackWindow
        request = self.request_cache.get(hexlify(peer.mid),
                                         response_payload.id)
        if request is None:
            return

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response_payload.id)

        processing_results = await self.mds.process_compressed_mdblob_threaded(
            response_payload.raw_blob)
        self.logger.debug(f"Response result: {processing_results}")

        if isinstance(
                request,
                EvaSelectRequest) and not request.processing_results.done():
            request.processing_results.set_result(processing_results)

        # If we know about updated versions of the received stuff, push the updates back
        if isinstance(
                request,
                SelectRequest) and self.rqc_settings.push_updates_back_enabled:
            newer_entities = [
                r.md_obj for r in processing_results
                if r.obj_state == ObjState.LOCAL_VERSION_NEWER
            ]
            self.send_db_results(peer, response_payload.id, newer_entities)

        if self.rqc_settings.channel_query_back_enabled:
            for result in processing_results:
                # Query back the sender for preview contents for the new channels
                # The fact that the object is previously unknown is indicated by process_payload in the
                # .obj_state property of returned ProcessingResults objects.
                if result.obj_state == ObjState.NEW_OBJECT and result.md_obj.metadata_type in (
                        CHANNEL_TORRENT,
                        COLLECTION_NODE,
                ):
                    request_dict = {
                        "metadata_type": [COLLECTION_NODE, REGULAR_TORRENT],
                        "channel_pk": result.md_obj.public_key,
                        "origin_id": result.md_obj.id_,
                        "first": 0,
                        "last": self.rqc_settings.max_channel_query_back,
                    }
                    self.send_remote_select(peer=peer, **request_dict)

                # Query back for missing dependencies, e.g. thumbnail/description.
                # The fact that some dependency is missing is checked by the lower layer during
                # the query to process_payload and indicated through .missing_deps property of the
                # ProcessingResults objects returned by process_payload.
                for dep_query_dict in result.missing_deps:
                    self.send_remote_select(peer=peer, **dep_query_dict)

        if isinstance(request, SelectRequest) and request.processing_callback:
            request.processing_callback(request, processing_results)

        # Remember that at least a single packet was received was received from the queried peer.
        if isinstance(request, SelectRequest):
            request.peer_responded = True

    def _on_query_timeout(self, request_cache):
        if not request_cache.peer_responded:
            self.logger.debug(
                "Remote query timeout, deleting peer: %s %s %s",
                str(request_cache.peer.address),
                hexlify(request_cache.peer.mid),
                str(request_cache.request_kwargs),
            )
            self.network.remove_peer(request_cache.peer)

    async def unload(self):
        await self.request_cache.shutdown()
        await super().unload()
Ejemplo n.º 12
0
 def __init__(self, *args, **kwargs):
     super(MyCommunity, self).__init__(*args, **kwargs)
     self.request_cache = RequestCache()
Ejemplo n.º 13
0
class MyCommunity(Community):
    master_peer = Peer(ECCrypto().generate_key(u"medium"))

    def __init__(self, *args, **kwargs):
        super(MyCommunity, self).__init__(*args, **kwargs)
        self.request_cache = RequestCache()

    def unload(self):
        self.request_cache.shutdown()
        super(MyCommunity, self).unload()

    def finish_ping(self, cache, include=True):
        global RESULTS
        print(cache.hostname, cache.address, time.time() - cache.starttime)
        if include:
            if (cache.hostname, cache.address) in RESULTS:
                RESULTS[(cache.hostname,
                         cache.address)].append(time.time() - cache.starttime)
            else:
                RESULTS[(cache.hostname,
                         cache.address)] = [time.time() - cache.starttime]
        elif (cache.hostname, cache.address) not in RESULTS:
            RESULTS[(cache.hostname, cache.address)] = []

        self.next_ping()

    def next_ping(self):
        global CHECK_QUEUE
        if CHECK_QUEUE:
            hostname, address = CHECK_QUEUE.pop()
            packet = self.create_introduction_request(address)
            self.request_cache.add(
                PingCache(self, hostname, address, time.time()))
            self.endpoint.send(address, packet)
        else:
            reactor.callFromThread(reactor.stop)

    def introduction_response_callback(self, peer, dist, payload):
        if self.request_cache.has(u"introping", payload.identifier):
            cache = self.request_cache.pop(u"introping", payload.identifier)
            self.finish_ping(cache)

    def started(self):
        global CHECK_QUEUE

        dnsmap = {}
        for (address, port) in _DNS_ADDRESSES:
            try:
                ip = gethostbyname(address)
                dnsmap[(ip, port)] = address
            except:
                pass

        UNKNOWN_NAME = '*'

        for (ip, port) in _DEFAULT_ADDRESSES:
            hostname = dnsmap.get((ip, port), None)
            if not hostname:
                hostname = UNKNOWN_NAME
                UNKNOWN_NAME = UNKNOWN_NAME + '*'
            CHECK_QUEUE.append((hostname, (ip, port)))

        CHECK_QUEUE = CHECK_QUEUE * CONST_REQUESTS

        self.next_ping()
Ejemplo n.º 14
0
class TrustChainCommunity(Community):
    """
    Community for reputation based on TrustChain tamper proof interaction history.
    """
    community_id = unhexlify('5ad767b05ae592a02488272ca2a86b847d4562e1')

    UNIVERSAL_BLOCK_LISTENER = b'UNIVERSAL_BLOCK_LISTENER'
    DB_CLASS = TrustChainDB
    DB_NAME = 'trustchain'
    version = b'\x02'

    def __init__(self, *args, **kwargs):
        working_directory = kwargs.pop('working_directory', '')
        self.persistence = kwargs.pop('persistence', None)
        db_name = kwargs.pop('db_name', self.DB_NAME)
        self.settings = kwargs.pop('settings', TrustChainSettings())
        self.receive_block_lock = RLock()

        super(TrustChainCommunity, self).__init__(*args, **kwargs)
        self.request_cache = RequestCache()
        self.logger = logging.getLogger(self.__class__.__name__)

        if not self.persistence:
            db_path = os.path.join(
                working_directory, db_name
            ) if working_directory != ":memory:" else working_directory
            self.persistence = self.DB_CLASS(
                db_path, self.my_peer.public_key.key_to_bin())
        self.relayed_broadcasts = set()
        self.relayed_broadcasts_order = deque()
        self.logger.debug(
            "The trustchain community started with Public Key: %s",
            hexlify(self.my_peer.public_key.key_to_bin()))
        self.shutting_down = False
        self.listeners_map = {}  # Map of block_type -> [callbacks]
        self.register_task("db_cleanup", self.do_db_cleanup, interval=600)

        self.add_message_handler(HalfBlockPayload, self.received_half_block)
        self.add_message_handler(CrawlRequestPayload,
                                 self.received_crawl_request)
        self.add_message_handler(CrawlResponsePayload,
                                 self.received_crawl_response)
        self.add_message_handler(HalfBlockPairPayload,
                                 self.received_half_block_pair)
        self.add_message_handler(HalfBlockBroadcastPayload,
                                 self.received_half_block_broadcast)
        self.add_message_handler(HalfBlockPairBroadcastPayload,
                                 self.received_half_block_pair_broadcast)
        self.add_message_handler(EmptyCrawlResponsePayload,
                                 self.received_empty_crawl_response)

    def do_db_cleanup(self):
        """
        Cleanup the database if necessary.
        """
        blocks_in_db = self.persistence.get_number_of_known_blocks()
        if blocks_in_db > self.settings.max_db_blocks:
            my_pk = self.my_peer.public_key.key_to_bin()
            self.persistence.remove_old_blocks(
                blocks_in_db - self.settings.max_db_blocks, my_pk)

    def add_listener(self, listener, block_types):
        """
        Add a listener for specific block types.
        """
        for block_type in block_types:
            if block_type not in self.listeners_map:
                self.listeners_map[block_type] = []
            self.listeners_map[block_type].append(listener)
            self.persistence.block_types[block_type] = listener.BLOCK_CLASS

    def remove_listener(self, listener, block_types):
        for block_type in block_types:
            if block_type in self.listeners_map and listener in self.listeners_map[
                    block_type]:
                self.listeners_map[block_type].remove(listener)
            if block_type in self.persistence.block_types:
                self.persistence.block_types.pop(block_type, None)

    def get_block_class(self, block_type):
        """
        Get the block class for a specific block type.
        """
        if block_type not in self.listeners_map or not self.listeners_map[
                block_type]:
            return TrustChainBlock

        return self.listeners_map[block_type][0].BLOCK_CLASS

    async def should_sign(self, block):
        """
        Return whether we should sign the block in the passed message.
        @param block: the block we want to sign or not.
        """
        if block.type not in self.listeners_map:
            return False  # There are no listeners for this block

        for listener in self.listeners_map[block.type]:
            should_sign = await maybe_coroutine(listener.should_sign, block)
            if should_sign:
                return True

        return False

    def on_counter_signed(self, block):
        """
        We just counter signed a block. Inform the listeners.
        """
        if block.type in self.listeners_map:
            for listener in self.listeners_map[block.type]:
                listener.on_counter_signed_block(block)

    def get_counter_tx(self, block):
        """
        Return some counter tx content.
        """
        if block.type not in self.listeners_map:
            return block.transaction  # There are no listeners for this block

        if len(self.listeners_map[block.type]) > 1:
            self.logger.warning(
                "There should only be one listener when returning a counter transaction!"
            )
            return block.transaction

        listener = self.listeners_map[block.type][0]
        if not hasattr(listener, "get_counter_tx"):
            return block.transaction

        return listener.get_counter_tx(block)

    def _add_broadcasted_blockid(self, block_id):
        self.relayed_broadcasts.add(block_id)
        self.relayed_broadcasts_order.append(block_id)
        if len(self.relayed_broadcasts) > self.settings.broadcast_history_size:
            to_remove = self.relayed_broadcasts_order.popleft()
            self.relayed_broadcasts.remove(to_remove)

    def send_block(self, block, address=None, ttl=1):
        """
        Send a block to a specific address, or do a broadcast to known peers if no peer is specified.
        """
        global_time = self.claim_global_time()
        dist = GlobalTimeDistributionPayload(global_time)

        if address:
            self.logger.debug("Sending block to (%s:%d) (%s)", address[0],
                              address[1], block)
            payload = HalfBlockPayload.from_half_block(block)
            packet = self._ez_pack(self._prefix, 1, [dist, payload], False)
            self.endpoint.send(address, packet)
        else:
            self.logger.debug("Broadcasting block %s", block)
            payload = HalfBlockBroadcastPayload.from_half_block(block, ttl)
            packet = self._ez_pack(self._prefix, 5, [dist, payload], False)
            peers = self.get_peers()
            for peer in random.sample(
                    peers, min(len(peers), self.settings.broadcast_fanout)):
                self.endpoint.send(peer.address, packet)
            self._add_broadcasted_blockid(block.block_id)

    def send_block_pair(self, block1, block2, address=None, ttl=1):
        """
        Send a half block pair to a specific address, or do a broadcast to known peers if no peer is specified.
        """
        global_time = self.claim_global_time()
        dist = GlobalTimeDistributionPayload(global_time)

        if address:
            self.logger.debug("Sending block pair to (%s:%d) (%s and %s)",
                              address[0], address[1], block1, block2)
            payload = HalfBlockPairPayload.from_half_blocks(block1, block2)
            packet = self._ez_pack(self._prefix, 4, [dist, payload], False)
            self.endpoint.send(address, packet)
        else:
            self.logger.debug("Broadcasting blocks %s and %s", block1, block2)
            payload = HalfBlockPairBroadcastPayload.from_half_blocks(
                block1, block2, ttl)
            packet = self._ez_pack(self._prefix, 6, [dist, payload], False)
            peers = self.get_peers()
            for peer in random.sample(
                    peers, min(len(peers), self.settings.broadcast_fanout)):
                self.endpoint.send(peer.address, packet)
            self._add_broadcasted_blockid(block1.block_id)

    def self_sign_block(self, block_type=b'unknown', transaction=None):
        return self.sign_block(self.my_peer,
                               block_type=block_type,
                               transaction=transaction)

    def create_source_block(self, block_type=b'unknown', transaction=None):
        """
        Create a source block without any initial counterparty to sign.

        :param block_type: The type of the block to be constructed, as a string
        :param transaction: A string describing the interaction in this block
        :return: A future that fires with a (block, None) tuple
        """
        return self.sign_block(peer=None,
                               public_key=ANY_COUNTERPARTY_PK,
                               block_type=block_type,
                               transaction=transaction)

    def create_link(self,
                    source,
                    block_type,
                    additional_info=None,
                    public_key=None):
        """
        Create a Link Block to a source block

        :param source: The source block which had no initial counterpary to sign
        :param block_type: The type of the block to be constructed, as a string
        :param additional_info: a dictionary with supplementary information concerning the transaction
        :param public_key: The public key of the counterparty (usually of the source's owner)
        :return: None
        """
        public_key = source.public_key if public_key is None else public_key

        return self.sign_block(self.my_peer,
                               linked=source,
                               public_key=public_key,
                               block_type=block_type,
                               additional_info=additional_info)

    @synchronized
    def sign_block(self,
                   peer,
                   public_key=EMPTY_PK,
                   block_type=b'unknown',
                   transaction=None,
                   linked=None,
                   additional_info=None):
        """
        Create, sign, persist and send a block signed message
        :param peer: The peer with whom you have interacted, as a IPv8 peer
        :param public_key: The public key of the other party you transact with
        :param block_type: The type of the block to be constructed, as a string
        :param transaction: A string describing the interaction in this block
        :param linked: The block that the requester is asking us to sign
        :param additional_info: Stores additional information, on the transaction
        """
        # NOTE to the future: This method reads from the database, increments and then writes back. If in some future
        # this method is allowed to execute in parallel, be sure to lock from before .create up to after .add_block

        # In this particular case there must be an implicit transaction due to the following assert
        assert peer is not None or peer is None and linked is None and public_key == ANY_COUNTERPARTY_PK, \
            "Peer, linked block should not be provided when creating a no counterparty source block. Public key " \
            "should be that reserved for any counterpary."
        assert transaction is None and linked is not None or transaction is not None and linked is None, \
            "Either provide a linked block or a transaction, not both %s, %s" % (peer, self.my_peer)
        assert (additional_info is None or additional_info is not None and linked is not None
                and transaction is None), \
            "Either no additional info is provided or one provides it for a linked block"
        assert (linked is None or linked.link_public_key
                == self.my_peer.public_key.key_to_bin()
                or linked.link_public_key == ANY_COUNTERPARTY_PK
                ), "Cannot counter sign block not addressed to self"
        assert linked is None or linked.link_sequence_number == UNKNOWN_SEQ, \
            "Cannot counter sign block that is not a request"
        assert transaction is None or isinstance(
            transaction, dict), "Transaction should be a dictionary"
        assert additional_info is None or isinstance(
            additional_info, dict), "Additional info should be a dictionary"

        self.persistence_integrity_check()

        if linked and linked.link_public_key != ANY_COUNTERPARTY_PK:
            block_type = linked.type

        block = self.get_block_class(block_type).create(
            block_type,
            transaction,
            self.persistence,
            self.my_peer.public_key.key_to_bin(),
            link=linked,
            additional_info=additional_info,
            link_pk=public_key)
        block.sign(self.my_peer.key)

        validation = block.validate(self.persistence)
        self.logger.info("Signed block to %s (%s) validation result %s",
                         hexlify(block.link_public_key)[-8:], block,
                         validation)
        if validation[0] != ValidationResult.partial_next and validation[
                0] != ValidationResult.valid:
            self.logger.error("Signed block did not validate?! Result %s",
                              repr(validation))
            return fail(RuntimeError("Signed block did not validate."))

        if not self.persistence.contains(block):
            self.persistence.add_block(block)
            self.notify_listeners(block)

        # This is a source block with no counterparty
        if not peer and public_key == ANY_COUNTERPARTY_PK:
            if block.type not in self.settings.block_types_bc_disabled:
                self.send_block(block)
            return succeed((block, None))

        # If there is a counterparty to sign, we send it
        self.send_block(block, address=peer.address)

        # We broadcast the block in the network if we initiated a transaction
        if block.type not in self.settings.block_types_bc_disabled and not linked:
            self.send_block(block)

        if peer == self.my_peer:
            # We created a self-signed block
            if block.type not in self.settings.block_types_bc_disabled:
                self.send_block(block)

            return succeed(
                (block,
                 None)) if public_key == ANY_COUNTERPARTY_PK else succeed(
                     (block, linked))
        elif not linked:
            # We keep track of this outstanding sign request.
            sign_future = Future()
            self.request_cache.add(
                HalfBlockSignCache(self, block, sign_future, peer.address))
            return sign_future
        else:
            # We return a future that fires immediately with both half blocks.
            if block.type not in self.settings.block_types_bc_disabled:
                self.send_block_pair(linked, block)

            return succeed((linked, block))

    @synchronized
    @lazy_wrapper_unsigned(GlobalTimeDistributionPayload, HalfBlockPayload)
    async def received_half_block(self, source_address, dist, payload):
        """
        We've received a half block, either because we sent a SIGNED message to some one or we are crawling
        """
        peer = Peer(payload.public_key, source_address)
        block = self.get_block_class(payload.type).from_payload(
            payload, self.serializer)
        try:
            await self.process_half_block(block, peer)
        except RuntimeError as e:
            self.logger.info("Failed to process half block (error %s)", e)

    @synchronized
    @lazy_wrapper_unsigned(GlobalTimeDistributionPayload,
                           HalfBlockBroadcastPayload)
    def received_half_block_broadcast(self, source_address, dist, payload):
        """
        We received a half block, part of a broadcast. Disseminate it further.
        """
        block = self.get_block_class(payload.type).from_payload(
            payload, self.serializer)
        self.validate_persist_block(block)

        if block.block_id not in self.relayed_broadcasts and payload.ttl > 1:
            self.send_block(block, ttl=payload.ttl - 1)

    @synchronized
    @lazy_wrapper_unsigned(GlobalTimeDistributionPayload, HalfBlockPairPayload)
    def received_half_block_pair(self, source_address, dist, payload):
        """
        We received a block pair message.
        """
        block1, block2 = self.get_block_class(payload.type1).from_pair_payload(
            payload, self.serializer)
        self.validate_persist_block(block1)
        self.validate_persist_block(block2)

    @synchronized
    @lazy_wrapper_unsigned(GlobalTimeDistributionPayload,
                           HalfBlockPairBroadcastPayload)
    def received_half_block_pair_broadcast(self, source_address, dist,
                                           payload):
        """
        We received a half block pair, part of a broadcast. Disseminate it further.
        """
        block1, block2 = self.get_block_class(payload.type1).from_pair_payload(
            payload, self.serializer)
        self.validate_persist_block(block1)
        self.validate_persist_block(block2)

        if block1.block_id not in self.relayed_broadcasts and payload.ttl > 1:
            self.send_block_pair(block1, block2, ttl=payload.ttl - 1)

    def validate_persist_block(self, block):
        """
        Validate a block and if it's valid, persist it. Return the validation result.
        :param block: The block to validate and persist.
        :return: [ValidationResult]
        """
        validation = block.validate(self.persistence)
        if validation[0] == ValidationResult.invalid:
            pass
        elif not self.persistence.contains(block):
            self.persistence.add_block(block)
            self.notify_listeners(block)

        return validation

    def notify_listeners(self, block):
        """
        Notify listeners of a specific new block.
        """
        # Call the listeners associated to the universal block, if there are any
        for listener in self.listeners_map.get(self.UNIVERSAL_BLOCK_LISTENER,
                                               []):
            listener.received_block(block)

        # Avoid proceeding any further if the type of the block coincides with the UNIVERSAL_BLOCK_LISTENER
        if block.type not in self.listeners_map or self.shutting_down or block.type == self.UNIVERSAL_BLOCK_LISTENER:
            return

        for listener in self.listeners_map[block.type]:
            listener.received_block(block)

    @synchronized
    async def process_half_block(self, blk, peer):
        """
        Process a received half block.
        """
        validation = self.validate_persist_block(blk)
        self.logger.info("Block validation result %s, %s, (%s)", validation[0],
                         validation[1], blk)
        if validation[0] == ValidationResult.invalid:
            raise RuntimeError(
                f"Block could not be validated: {validation[0]}, {validation[1]}"
            )

        # Check if we are waiting for this signature response
        link_block_id_int = int(hexlify(blk.linked_block_id), 16) % 100000000
        if self.request_cache.has('sign', link_block_id_int):
            cache = self.request_cache.pop('sign', link_block_id_int)
            cache.sign_future.set_result(
                (blk, self.persistence.get_linked(blk)))

        # Is this a request, addressed to us, and have we not signed it already?
        if (blk.link_sequence_number != UNKNOWN_SEQ
                or blk.link_public_key != self.my_peer.public_key.key_to_bin()
                or self.persistence.get_linked(blk) is not None):
            return

        self.logger.info("Received request block addressed to us (%s)", blk)

        try:
            should_sign = await maybe_coroutine(self.should_sign, blk)
        except Exception as e:
            self.logger.error(
                "Error while determining whether to sign (error: %s)", e)
            return

        if not should_sign:
            self.logger.info("Not signing block %s", blk)
            return

        # It is important that the request matches up with its previous block, gaps cannot be tolerated at
        # this point. We already dropped invalids, so here we delay this message if the result is partial,
        # partial_previous or no-info. We send a crawl request to the requester to (hopefully) close the gap
        if (validation[0] == ValidationResult.partial_previous
                or validation[0] == ValidationResult.partial
                or validation[0] == ValidationResult.no_info
            ) and self.settings.validation_range > 0:
            self.logger.info(
                "Request block could not be validated sufficiently, crawling requester. %s",
                validation)
            # Note that this code does not cover the scenario where we obtain this block indirectly.
            if not self.request_cache.has("crawl", blk.hash_number):
                try:
                    await self.send_crawl_request(
                        peer,
                        blk.public_key,
                        max(GENESIS_SEQ, (blk.sequence_number -
                                          self.settings.validation_range)),
                        max(GENESIS_SEQ, blk.sequence_number - 1),
                        for_half_block=blk)
                except Exception as e:
                    self.logger.error(
                        "Error while sending crawl request (error: %s)", e)
                    return
                return await self.process_half_block(blk, peer)
        else:
            blocks = self.sign_block(peer,
                                     linked=blk,
                                     additional_info=self.get_counter_tx(blk))
            self.on_counter_signed(blk)
            return blocks

    def crawl_chain(self, peer, latest_block_num=0):
        """
        Crawl the whole chain of a specific peer.
        :param latest_block_num: The latest block number of the peer in question, if available.
        """
        if self.request_cache.has("chaincrawl",
                                  ChainCrawlCache.get_number_for(peer)):
            self.logger.debug(
                "Skipping crawl of peer %s, another crawl is pending", peer)
            return succeed(None)

        crawl_future = Future()
        cache = ChainCrawlCache(self,
                                peer,
                                crawl_future,
                                known_chain_length=latest_block_num)
        self.request_cache.add(cache)
        ensure_future(self.send_next_partial_chain_crawl_request(cache))
        return crawl_future

    def crawl_lowest_unknown(self, peer, latest_block_num=None):
        """
        Crawl the lowest unknown block of a specific peer.
        :param latest_block_num: The latest block number of the peer in question, if available
        """
        sq = self.persistence.get_lowest_sequence_number_unknown(
            peer.public_key.key_to_bin())
        if latest_block_num and sq == latest_block_num + 1:
            return [
            ]  # We don't have to crawl this node since we have its whole chain
        return self.send_crawl_request(peer, peer.public_key.key_to_bin(), sq,
                                       sq)

    def send_crawl_request(self,
                           peer,
                           public_key,
                           start_seq_num,
                           end_seq_num,
                           for_half_block=None):
        """
        Send a crawl request to a specific peer.
        """
        crawl_id = for_half_block.hash_number if for_half_block else \
            RandomNumberCache.find_unclaimed_identifier(self.request_cache, "crawl")
        crawl_future = Future()
        self.request_cache.add(CrawlRequestCache(self, crawl_id, crawl_future))
        self.logger.info(
            "Requesting crawl of node %s (blocks %d to %d) with id %d",
            hexlify(peer.public_key.key_to_bin())[-8:], start_seq_num,
            end_seq_num, crawl_id)

        global_time = self.claim_global_time()
        auth = BinMemberAuthenticationPayload(
            self.my_peer.public_key.key_to_bin())
        payload = CrawlRequestPayload(public_key, start_seq_num, end_seq_num,
                                      crawl_id)
        dist = GlobalTimeDistributionPayload(global_time)

        packet = self._ez_pack(self._prefix, 2, [auth, dist, payload])
        self.endpoint.send(peer.address, packet)

        return crawl_future

    @task
    async def perform_partial_chain_crawl(self, cache, start, stop):
        """
        Perform a partial crawl request for a specific range, when crawling a chain.
        :param cache: The cache that stores progress regarding the chain crawl.
        :param start: The sequence number of the first block to be requested.
        :param stop: The sequence number of the last block to be requested.
        """
        if cache.current_request_range != (start, stop):
            # We are performing a new request
            cache.current_request_range = start, stop
            cache.current_request_attempts = 0
        elif cache.current_request_attempts == 3:
            # We already tried the same request three times, bail out
            self.request_cache.pop("chaincrawl", cache.number)
            cache.crawl_future.set_result(None)
            return

        cache.current_request_attempts += 1
        await self.send_crawl_request(cache.peer,
                                      cache.peer.public_key.key_to_bin(),
                                      start, stop)
        await self.send_next_partial_chain_crawl_request(cache)

    async def send_next_partial_chain_crawl_request(self, cache):
        """
        Send the next partial crawl request, if we are not done yet.
        :param cache: The cache that stores progress regarding the chain crawl.
        """
        lowest_unknown = self.persistence.get_lowest_sequence_number_unknown(
            cache.peer.public_key.key_to_bin())
        if cache.known_chain_length and cache.known_chain_length == lowest_unknown - 1:
            # At this point, we have all the blocks we need
            self.request_cache.pop("chaincrawl", cache.number)
            cache.crawl_future.set_result(None)
            return

        if not cache.known_chain_length:
            # Do we know the chain length of the crawled peer? If not, make sure we get to know this first.
            blocks = await self.send_crawl_request(
                cache.peer, cache.peer.public_key.key_to_bin(), -1, -1)
            if not blocks:
                self.request_cache.pop("chaincrawl", cache.number)
                cache.crawl_future.set_result(None)
                return

            cache.known_chain_length = blocks[0].sequence_number
            await self.send_next_partial_chain_crawl_request(cache)
            return

        latest_block = self.persistence.get_latest(
            cache.peer.public_key.key_to_bin())
        if not latest_block:
            # We have no knowledge of this peer but we have the length of the chain.
            # Simply send a request from the genesis block to the known chain length.
            self.perform_partial_chain_crawl(cache, 1,
                                             cache.known_chain_length)
            return
        elif latest_block and lowest_unknown == latest_block.sequence_number + 1:
            # It seems that we filled all gaps in the database; check whether we can do one final request
            if latest_block.sequence_number < cache.known_chain_length:
                self.perform_partial_chain_crawl(
                    cache, latest_block.sequence_number + 1,
                    cache.known_chain_length)
            else:
                self.request_cache.pop("chaincrawl", cache.number)
                cache.crawl_future.set_result(None)
            return

        start, stop = self.persistence.get_lowest_range_unknown(
            cache.peer.public_key.key_to_bin())
        self.perform_partial_chain_crawl(cache, start, stop)

    @synchronized
    @lazy_wrapper(GlobalTimeDistributionPayload, CrawlRequestPayload)
    def received_crawl_request(self, peer, dist, payload):
        self.logger.info("Received crawl request from node %s for range %d-%d",
                         hexlify(peer.public_key.key_to_bin())[-8:],
                         payload.start_seq_num, payload.end_seq_num)
        start_seq_num = payload.start_seq_num
        end_seq_num = payload.end_seq_num

        # It could be that our start_seq_num and end_seq_num are negative. If so, convert them to positive numbers,
        # based on the last block of ones chain.
        if start_seq_num < 0:
            last_block = self.persistence.get_latest(payload.public_key)
            start_seq_num = max(GENESIS_SEQ, last_block.sequence_number + start_seq_num + 1) \
                if last_block else GENESIS_SEQ
        if end_seq_num < 0:
            last_block = self.persistence.get_latest(payload.public_key)
            end_seq_num = max(GENESIS_SEQ, last_block.sequence_number + end_seq_num + 1) \
                if last_block else GENESIS_SEQ

        blocks = self.persistence.crawl(payload.public_key,
                                        start_seq_num,
                                        end_seq_num,
                                        limit=self.settings.max_crawl_batch)
        total_count = len(blocks)

        if total_count == 0:
            global_time = self.claim_global_time()
            response_payload = EmptyCrawlResponsePayload(payload.crawl_id)
            dist = GlobalTimeDistributionPayload(global_time)
            packet = self._ez_pack(self._prefix, 7, [dist, response_payload],
                                   False)
            self.endpoint.send(peer.address, packet)
        else:
            self.send_crawl_responses(blocks, peer, payload.crawl_id)

    def send_crawl_responses(self, blocks, peer, crawl_id):
        """
        Answer a peer with crawl responses.
        """
        for ind, block in enumerate(blocks):
            self.send_crawl_response(block, crawl_id, ind + 1, len(blocks),
                                     peer)
        self.logger.info("Sent %d blocks", len(blocks))

    @synchronized
    def sanitize_database(self):
        """
        DANGER! USING THIS MAY CAUSE DOUBLE SPENDING IN THE NETWORK.
                ONLY USE IF YOU KNOW WHAT YOU ARE DOING.

        This method removes all of the invalid blocks in our own chain.
        """
        self.logger.error("Attempting to recover %s", self.DB_CLASS.__name__)
        block = self.persistence.get_latest(
            self.my_peer.public_key.key_to_bin())
        if not block:
            # There is nothing to corrupt, we're at the genesis block.
            self.logger.debug(
                "No latest block found when trying to recover database!")
            return
        validation = self.validate_persist_block(block)
        while validation[0] != ValidationResult.partial_next and validation[
                0] != ValidationResult.valid:
            # The latest block is invalid, remove it.
            self.persistence.remove_block(block)
            self.logger.error("Removed invalid block %d from our chain",
                              block.sequence_number)
            block = self.persistence.get_latest(
                self.my_peer.public_key.key_to_bin())
            if not block:
                # Back to the genesis
                break
            validation = self.validate_persist_block(block)
        self.logger.error("Recovered database, our last block is now %d",
                          block.sequence_number if block else 0)

    def persistence_integrity_check(self):
        """
        Perform an integrity check of our own chain. Recover it if needed.
        """
        block = self.persistence.get_latest(
            self.my_peer.public_key.key_to_bin())
        if not block:
            return
        validation = self.validate_persist_block(block)
        if validation[0] != ValidationResult.partial_next and validation[
                0] != ValidationResult.valid:
            self.logger.error("Our chain did not validate. Result %s",
                              repr(validation))
            self.sanitize_database()

    def send_crawl_response(self, block, crawl_id, index, total_count, peer):
        self.logger.debug("Sending block for crawl request to %s (%s)", peer,
                          block)

        # Don't answer with any invalid blocks.
        validation = self.validate_persist_block(block)
        if validation[0] == ValidationResult.invalid and total_count > 0:
            # We send an empty block to the crawl requester if no blocks should be sent back
            self.logger.error(
                "Not sending crawl response, the block is invalid. Result %s",
                repr(validation))
            self.persistence_integrity_check()
            return

        global_time = self.claim_global_time()
        payload = CrawlResponsePayload.from_crawl(block, crawl_id, index,
                                                  total_count)
        dist = GlobalTimeDistributionPayload(global_time)

        packet = self._ez_pack(self._prefix, 3, [dist, payload], False)
        self.endpoint.send(peer.address, packet)

    @synchronized
    @lazy_wrapper_unsigned_wd(GlobalTimeDistributionPayload,
                              CrawlResponsePayload)
    async def received_crawl_response(self, source_address, dist, payload,
                                      data):
        await self.received_half_block(
            source_address,
            data[:-12])  # We cut off a few bytes to make it a BlockPayload

        block = self.get_block_class(payload.type).from_payload(
            payload, self.serializer)
        cache = self.request_cache.get("crawl", payload.crawl_id)
        if cache:
            cache.received_block(block, payload.total_count)

    @lazy_wrapper_unsigned_wd(GlobalTimeDistributionPayload,
                              EmptyCrawlResponsePayload)
    def received_empty_crawl_response(self, source_address, dist, payload,
                                      data):
        cache = self.request_cache.get("crawl", payload.crawl_id)
        if cache:
            self.logger.info(
                "Received empty crawl response for crawl with ID %d",
                payload.crawl_id)
            cache.received_empty_response()

    def get_chain_length(self):
        """
        Return the length of your own chain.
        """
        latest_block = self.persistence.get_latest(
            self.my_peer.public_key.key_to_bin())
        return 0 if not latest_block else latest_block.sequence_number

    @synchronized
    def create_introduction_request(self,
                                    socket_address,
                                    extra_bytes=b'',
                                    new_style=False):
        extra_bytes = struct.pack('>l', self.get_chain_length())
        return super().create_introduction_request(socket_address, extra_bytes,
                                                   new_style)

    @synchronized
    def create_introduction_response(self,
                                     lan_socket_address,
                                     socket_address,
                                     identifier,
                                     introduction=None,
                                     extra_bytes=b'',
                                     prefix=None,
                                     new_style=False):
        extra_bytes = struct.pack('>l', self.get_chain_length())
        return super(TrustChainCommunity, self).create_introduction_response(
            lan_socket_address, socket_address, identifier, introduction,
            extra_bytes, prefix, new_style)

    @synchronized
    def introduction_response_callback(self, peer, dist, payload):
        chain_length = None
        if payload.extra_bytes:
            chain_length = struct.unpack('>l', payload.extra_bytes)[0]

        if peer.address in self.network.blacklist:  # Do not crawl addresses in our blacklist (trackers)
            return

        # Check if we have pending crawl requests for this peer
        has_intro_crawl = self.request_cache.has(
            "introcrawltimeout", IntroCrawlTimeout.get_number_for(peer))
        has_chain_crawl = self.request_cache.has(
            "chaincrawl", ChainCrawlCache.get_number_for(peer))
        if has_intro_crawl or has_chain_crawl:
            self.logger.debug(
                "Skipping crawl of peer %s, another crawl is pending", peer)
            return

        if self.settings.crawler:
            self.crawl_chain(peer, latest_block_num=chain_length)
        else:
            known_blocks = self.persistence.get_number_of_known_blocks(
                public_key=peer.public_key.key_to_bin())
            if known_blocks < 1000 or random.random() > 0.5:
                self.request_cache.add(IntroCrawlTimeout(self, peer))
                self.crawl_lowest_unknown(peer, latest_block_num=chain_length)

    async def unload(self):
        self.logger.debug("Unloading the TrustChain Community.")
        self.shutting_down = True

        await self.request_cache.shutdown()

        await super(TrustChainCommunity, self).unload()

        # Close the persistence layer
        self.persistence.close()
Ejemplo n.º 15
0
class RemoteQueryCommunity(Community):
    """
    Community for general purpose SELECT-like queries into remote Channels database
    """

    master_peer = Peer(
        unhexlify(
            "4c69624e61434c504b3a667b8dee4645475512c0780990cfaca234ad19c5dabcb065751776"
            "b75a4b4210c06e2eb4d8bbf4a775ed735eb16bbc3e44193479ad7426d7cd1067807f95b696"
        )
    )

    def __init__(self, my_peer, endpoint, network, metadata_store, settings=None, notifier=None):
        super(RemoteQueryCommunity, self).__init__(my_peer, endpoint, network)

        self.notifier = notifier
        self.max_peers = 60

        self.settings = settings or RemoteQueryCommunitySettings()

        self.mds = metadata_store

        # This set contains all the peers that we queried for subscribed channels over time.
        # It is emptied regularly. The purpose of this set is to work as a filter so we never query the same
        # peer twice. If we do, this should happen realy rarely
        # TODO: use Bloom filter here instead. We actually *want* it to be all-false-positives eventually.
        self.queried_subscribed_channels_peers = set()
        self.queried_peers_limit = 1000

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(SelectResponsePayload, self.on_remote_select_response)

        self.request_cache = RequestCache()

    def get_random_peers(self, sample_size=None):
        # Randomly sample sample_size peers from the complete list of our peers
        all_peers = self.get_peers()
        if sample_size is not None and sample_size < len(all_peers):
            return sample(all_peers, sample_size)
        return all_peers

    def send_remote_select(self, peer, **kwargs):
        request = SelectRequest(self.request_cache, hexlify(peer.mid), kwargs)
        self.request_cache.add(request)
        self.ez_send(peer, RemoteSelectPayload(request.number, json.dumps(kwargs).encode('utf8')))

    def send_remote_select_to_many(self, **kwargs):
        for p in self.get_random_peers(self.settings.max_query_peers):
            self.send_remote_select(p, **kwargs)

    def send_remote_select_subscribed_channels(self, peer):
        request_dict = {
            "metadata_type": [CHANNEL_TORRENT],
            "subscribed": True,
            "attribute_ranges": (("num_entries", 1, None),),
        }
        self.send_remote_select(peer, **request_dict)

    @lazy_wrapper(RemoteSelectPayload)
    async def on_remote_select(self, peer, request):
        request_sanitized = sanitize_query(json.loads(request.json), self.settings.max_response_size)
        db_results = await self.mds.MetadataNode.get_entries_threaded(**request_sanitized)
        if not db_results:
            return

        index = 0
        while index < len(db_results):
            data, index = entries_to_chunk(db_results, self.settings.maximum_payload_size, start_index=index)
            self.ez_send(peer, SelectResponsePayload(request.id, data))

    @lazy_wrapper(SelectResponsePayload)
    async def on_remote_select_response(self, peer, response):

        request = self.request_cache.get(hexlify(peer.mid), response.id)
        if request is None:
            return

        # Check for limit on the number of packets per request
        if request.packets_limit > 1:
            request.packets_limit -= 1
        else:
            self.request_cache.pop(hexlify(peer.mid), response.id)

        # We use responses for requests about subscribed channels to bump our local channels ratings
        peer_vote = peer if request.request_kwargs.get("subscribed", None) is True else None

        result = await self.mds.process_compressed_mdblob_threaded(response.raw_blob, peer_vote_for_channels=peer_vote)
        # Maybe move this callback to MetadataStore side?
        if self.notifier:
            new_channels = [
                md.to_simple_dict()
                for md, result in result
                if md and md.metadata_type == CHANNEL_TORRENT and result == UNKNOWN_CHANNEL and md.origin_id == 0
            ]
            if new_channels:
                self.notifier.notify(
                    NTFY.CHANNEL_DISCOVERED, {"results": new_channels, "uuid": str(CHANNELS_VIEW_UUID)}
                )

    def introduction_response_callback(self, peer, dist, payload):
        if peer.address in self.network.blacklist or peer.mid in self.queried_subscribed_channels_peers:
            return
        if len(self.queried_subscribed_channels_peers) >= self.queried_peers_limit:
            self.queried_subscribed_channels_peers.clear()
        self.queried_subscribed_channels_peers.add(peer.mid)
        self.send_remote_select_subscribed_channels(peer)

    async def unload(self):
        await self.request_cache.shutdown()
        await super(RemoteQueryCommunity, self).unload()
Ejemplo n.º 16
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.request_cache = RequestCache()
Ejemplo n.º 17
0
class BandwidthAccountingCommunity(Community):
    """
    Community around bandwidth accounting and payouts.
    """
    community_id = unhexlify('79b25f2867739261780faefede8f25038de9975d')
    DB_NAME = 'bandwidth'
    version = b'\x02'

    def __init__(self, *args, **kwargs) -> None:
        """
        Initialize the community.
        :param persistence: The database that stores transactions, will be created if not provided.
        :param database_path: The path at which the database will be created. Defaults to the current working directory.
        """
        self.settings = kwargs.pop('settings', BandwidthAccountingSettings())
        self.database = kwargs.pop('database', None)
        self.database_path = Path(kwargs.pop('database_path', ''))
        self.random = Random()

        super().__init__(*args, **kwargs)

        self.request_cache = RequestCache()
        self.my_pk = self.my_peer.public_key.key_to_bin()

        if not self.database:
            self.database = BandwidthDatabase(self.database_path, self.my_pk)

        self.add_message_handler(BandwidthTransactionPayload,
                                 self.received_transaction)
        self.add_message_handler(BandwidthTransactionQueryPayload,
                                 self.received_query)

        self.register_task("query_peers",
                           self.query_random_peer,
                           interval=self.settings.outgoing_query_interval)

        self.logger.info(
            "Started bandwidth accounting community with public key %s",
            hexlify(self.my_pk))

    def construct_signed_transaction(self, peer: Peer,
                                     amount: int) -> BandwidthTransactionData:
        """
        Construct a new signed bandwidth transaction.
        :param peer: The counterparty of the transaction.
        :param amount: The amount of bytes to payout.
        :return A signed BandwidthTransaction.
        """
        peer_pk = peer.public_key.key_to_bin()
        latest_tx = self.database.get_latest_transaction(self.my_pk, peer_pk)
        total_amount = latest_tx.amount + amount if latest_tx else amount
        next_seq_num = latest_tx.sequence_number + 1 if latest_tx else 1
        tx = BandwidthTransactionData(next_seq_num, self.my_pk, peer_pk,
                                      EMPTY_SIGNATURE, EMPTY_SIGNATURE,
                                      total_amount)
        tx.sign(self.my_peer.key, as_a=True)
        return tx

    def do_payout(self, peer: Peer, amount: int) -> Future:
        """
        Conduct a payout with a given amount of bytes to a peer.
        :param peer: The counterparty of the payout.
        :param amount: The amount of bytes to payout.
        :return A Future that fires when the counterparty has acknowledged the payout.
        """
        tx = self.construct_signed_transaction(peer, amount)
        with db_session:
            self.database.BandwidthTransaction.insert(tx)
        cache = self.request_cache.add(BandwidthTransactionSignCache(self, tx))
        self.send_transaction(tx, peer.address, cache.number)

        return cache.future

    def send_transaction(self, transaction: BandwidthTransactionData,
                         address: Address, request_id: int) -> None:
        """
        Send a provided transaction to another party.
        :param transaction: The BandwidthTransaction to send to the other party.
        :param peer: The IP address and port of the peer.
        :param request_id: The identifier of the message, is usually provided by a request cache.
        """
        payload = BandwidthTransactionPayload.from_transaction(
            transaction, request_id)
        packet = self._ez_pack(self._prefix, 1, [payload], False)
        self.endpoint.send(address, packet)

    def received_transaction(self, source_address: Address,
                             data: bytes) -> None:
        """
        Callback when we receive a transaction from another peer.
        :param source_address: The network address of the peer that has sent us the transaction.
        :param data: The serialized, raw data in the packet.
        """
        payload = self._ez_unpack_noauth(BandwidthTransactionPayload,
                                         data,
                                         global_time=False)
        tx = BandwidthTransactionData.from_payload(payload)

        if not tx.is_valid():
            self.logger.info("Transaction %s not valid, ignoring it", tx)
            return

        if payload.public_key_a == self.my_pk or payload.public_key_b == self.my_pk:
            # This transaction involves this peer.
            latest_tx = self.database.get_latest_transaction(
                tx.public_key_a, tx.public_key_b)
            if payload.public_key_b == self.my_peer.public_key.key_to_bin():
                from_peer = Peer(payload.public_key_a, source_address)
                if latest_tx:
                    # Check if the amount in the received transaction is higher than the amount of the latest one
                    # in the database.
                    if payload.amount > latest_tx.amount:
                        # Sign it, store it, and send it back
                        tx.sign(self.my_peer.key, as_a=False)
                        self.database.BandwidthTransaction.insert(tx)
                        self.send_transaction(tx, from_peer.address,
                                              payload.request_id)
                    else:
                        self.logger.info(
                            "Received older bandwidth transaction from peer %s:%d - "
                            "sending back the latest one", *from_peer.address)
                        self.send_transaction(latest_tx, from_peer.address,
                                              payload.request_id)
                else:
                    # This transaction is the first one with party A. Sign it, store it, and send it back.
                    tx.sign(self.my_peer.key, as_a=False)
                    self.database.BandwidthTransaction.insert(tx)
                    from_peer = Peer(payload.public_key_a, source_address)
                    self.send_transaction(tx, from_peer.address,
                                          payload.request_id)
            elif payload.public_key_a == self.my_peer.public_key.key_to_bin():
                # It seems that we initiated this transaction. Check if we are waiting for it.
                cache = self.request_cache.get("bandwidth-tx-sign",
                                               payload.request_id)
                if not cache:
                    self.logger.info(
                        "Received bandwidth transaction %s without associated cache entry, ignoring it",
                        tx)
                    return

                if not latest_tx or (latest_tx
                                     and latest_tx.amount >= tx.amount):
                    self.database.BandwidthTransaction.insert(tx)

                cache.future.set_result(tx)
        else:
            # This transaction involves two unknown peers. We can add it to our database.
            self.database.BandwidthTransaction.insert(tx)

    def query_random_peer(self) -> None:
        """
        Query a random peer neighbouring peer and ask their bandwidth transactions.
        """
        peers = list(self.network.verified_peers)
        if peers:
            random_peer = self.random.choice(peers)
            self.query_transactions(random_peer)

    def query_transactions(self, peer: Peer) -> None:
        """
        Query the transactions of a specific peer and ask for their bandwidth transactions.
        :param peer: The peer to send the query to.
        """
        self.logger.info("Querying the transactions of peer %s:%d",
                         *peer.address)
        payload = BandwidthTransactionQueryPayload()
        packet = self._ez_pack(self._prefix, 2, [payload], False)
        self.endpoint.send(peer.address, packet)

    def received_query(self, source_address: Address, data: bytes) -> None:
        """
        We received a query from another peer.
        :param source_address: The network address of the peer that has sent us the query.
        :param data: The serialized, raw data in the packet.
        """
        my_txs = self.database.get_my_latest_transactions(
            limit=self.settings.max_tx_returned_in_query)
        self.logger.debug("Sending %d bandwidth transaction(s) to peer %s:%d",
                          len(my_txs), *source_address)
        for tx in my_txs:
            self.send_transaction(tx, source_address, 0)

    def get_statistics(self) -> Dict:
        """
        Return a dictionary with bandwidth statistics, including the total amount of bytes given and taken, and the
        number of unique peers you helped/that helped you.
        :return: A dictionary with statistics.
        """
        my_pk = self.my_peer.public_key.key_to_bin()
        return {
            "id": hexlify(my_pk),
            "total_given": self.database.get_total_given(my_pk),
            "total_taken": self.database.get_total_taken(my_pk),
            "num_peers_helped": self.database.get_num_peers_helped(my_pk),
            "num_peers_helped_by": self.database.get_num_peers_helped_by(my_pk)
        }

    async def unload(self) -> None:
        """
        Unload this community by shutting down the request cache and database.
        """
        self.logger.info("Unloading the bandwidth accounting community.")

        await self.request_cache.shutdown()
        self.database.shutdown()

        await super().unload()
Ejemplo n.º 18
0
class EndpointServer(Community):
    """
    Make some small modifications to the Community to allow it a dynamic prefix.
    We will also only answer introduction requests.
    """
    community_id = os.urandom(20)

    def __init__(self, endpoint):
        my_peer = Peer(default_eccrypto.generate_key(u"very-low"))
        self.signature_length = default_eccrypto.get_signature_length(
            my_peer.public_key)
        super().__init__(my_peer, endpoint, Network())
        self.request_cache = RequestCache()
        self.endpoint.add_listener(
            self
        )  # Listen to all incoming packets (not just the fake community_id).
        self.churn_strategy = TrackerChurn(self)
        self.churn_task = self.register_task("churn",
                                             self.churn_strategy.take_step,
                                             interval=10)

    def on_packet(self, packet, warn_unknown=False):
        source_address, data = packet
        try:
            probable_peer = self.network.get_verified_by_address(
                source_address)
            if probable_peer:
                probable_peer.last_response = time.time()
            if data[22] == 246:
                self.on_generic_introduction_request(source_address, data,
                                                     data[:22])
            if data[22] == 245:
                self.on_generic_introduction_response(source_address, data,
                                                      data[:22])
            elif warn_unknown:
                self.logger.warning("Tracker received unknown message %s",
                                    str(data[22]))
        except Exception:
            traceback.print_exc()

    def on_generic_introduction_request(self, source_address, data, prefix):
        auth, dist, payload = self._ez_unpack_auth(IntroductionRequestPayload,
                                                   data)
        peer = Peer(auth.public_key_bin, source_address)
        peer.address = UDPv4LANAddress(*payload.source_lan_address)
        peer.last_response = time.time()

        service_id = prefix[2:]
        self.on_peer_introduction_request(peer, source_address, service_id)

        self.network.add_verified_peer(peer)
        self.network.discover_services(peer, [
            service_id,
        ])

        intro_peers = [
            p for p in self.network.get_peers_for_service(service_id)
            if not p == peer
        ]
        if intro_peers:
            intro_peer = random.choice(intro_peers)
        else:
            intro_peer = None

        packet = self.create_introduction_response(payload.destination_address,
                                                   peer.address,
                                                   payload.identifier,
                                                   introduction=intro_peer,
                                                   prefix=prefix)
        self.endpoint.send(peer.address, packet)

    def send_ping(self, peer):
        service = random.choice(tuple(
            self.network.get_services_for_peer(peer)))
        prefix = b'\x00' + self.version + service
        packet = self.create_introduction_request(peer.address, prefix=prefix)
        cache = TrackerPing(self.request_cache, self.global_time, self.network,
                            peer, service)
        self.request_cache.add(cache)
        self.endpoint.send(peer.address, packet)

    def on_generic_introduction_response(self, source_address, data, prefix):
        auth, dist, payload = self._ez_unpack_auth(IntroductionResponsePayload,
                                                   data)
        if not self.request_cache.has('tracker-ping', payload.identifier):
            return

        self.request_cache.pop('tracker-ping', payload.identifier)
        if payload.peer_limit_reached:
            peer = Peer(auth.public_key_bin, source_address)
            services = self.network.get_services_for_peer(peer)
            services.discard(prefix[2:])
            if not services:
                self.network.remove_peer(peer)

    def on_peer_introduction_request(self, peer, source_address, service_id):
        """
        A hook to collect anonymized statistics about total peer count
        """

    def get_peer_for_introduction(self, exclude=None, new_style=False):
        """
        We explicitly provide create_introduction_response with a peer.
        If on_generic_introduction_request provides None, this method should not suggest a peer.
        More so as the get_peer_for_introduction peer would be for the DiscoveryCommunity.
        """
        return None

    async def unload(self):
        await self.request_cache.shutdown()
        await super().unload()
Ejemplo n.º 19
0
class MyCommunity(Community):
    community_id = os.urandom(20)

    def __init__(self, *args, **kwargs):
        super(MyCommunity, self).__init__(*args, **kwargs)
        self.request_cache = RequestCache()

    def unload(self):
        self.request_cache.shutdown()
        super(MyCommunity, self).unload()

    def finish_ping(self, cache, include=True):
        global RESULTS
        print(cache.hostname, cache.address, time.time() - cache.starttime)  # noqa: T001
        if include:
            if (cache.hostname, cache.address) in RESULTS:
                RESULTS[(cache.hostname, cache.address)].append(time.time() - cache.starttime)
            else:
                RESULTS[(cache.hostname, cache.address)] = [time.time() - cache.starttime]
        elif (cache.hostname, cache.address) not in RESULTS:
            RESULTS[(cache.hostname, cache.address)] = []

        self.next_ping()

    def next_ping(self):
        global CHECK_QUEUE
        if CHECK_QUEUE:
            hostname, address = CHECK_QUEUE.pop()
            packet = self.create_introduction_request(UDPv4Address(*address))
            self.request_cache.add(PingCache(self, hostname, address, time.time()))
            self.endpoint.send(address, packet)
        else:
            get_event_loop().stop()

    def introduction_response_callback(self, peer, dist, payload):
        if self.request_cache.has(u"introping", payload.identifier):
            cache = self.request_cache.pop(u"introping", payload.identifier)
            self.finish_ping(cache)

    def started(self):
        global CHECK_QUEUE

        dnsmap = {}
        for (address, port) in DISPERSY_BOOTSTRAPPER['init']['dns_addresses']:
            try:
                ip = gethostbyname(address)
                dnsmap[(ip, port)] = address
            except OSError:
                pass

        unknown_name = '*'

        for (ip, port) in DISPERSY_BOOTSTRAPPER['init']['ip_addresses']:
            hostname = dnsmap.get((ip, port), None)
            if not hostname:
                hostname = unknown_name
                unknown_name = unknown_name + '*'
            CHECK_QUEUE.append((hostname, (ip, port)))

        CHECK_QUEUE = CHECK_QUEUE * CONST_REQUESTS

        self.next_ping()