예제 #1
0
    def __init__(self, *args, **kwargs) -> None:
        """
        Initialize the community.
        :param persistence: The database that stores transactions, will be created if not provided.
        :param database_path: The path at which the database will be created. Defaults to the current working directory.
        """
        self.settings = kwargs.pop('settings', BandwidthAccountingSettings())
        self.database = kwargs.pop('database', None)
        self.database_path = Path(kwargs.pop('database_path', ''))
        self.random = Random()

        super().__init__(*args, **kwargs)

        self.request_cache = RequestCache()
        self.my_pk = self.my_peer.public_key.key_to_bin()

        if not self.database:
            self.database = BandwidthDatabase(self.database_path, self.my_pk)

        self.add_message_handler(BandwidthTransactionPayload,
                                 self.received_transaction)
        self.add_message_handler(BandwidthTransactionQueryPayload,
                                 self.received_query)

        self.register_task("query_peers",
                           self.query_random_peer,
                           interval=self.settings.outgoing_query_interval)

        self.logger.info(
            "Started bandwidth accounting community with public key %s",
            hexlify(self.my_pk))
예제 #2
0
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 rqc_settings: RemoteQueryCommunitySettings = None,
                 metadata_store=None,
                 **kwargs):
        super().__init__(my_peer, endpoint, network=network, **kwargs)

        self.rqc_settings = rqc_settings
        self.mds: MetadataStore = metadata_store

        # This object stores requests for "select" queries that we sent to other hosts.
        # We keep track of peers we actually requested for data so people can't randomly push spam at us.
        # Also, this keeps track of hosts we responded to. There is a possibility that
        # those hosts will push back updates at us, so we need to allow it.
        self.request_cache = RequestCache()

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(RemoteSelectPayloadEva,
                                 self.on_remote_select_eva)
        self.add_message_handler(SelectResponsePayload,
                                 self.on_remote_select_response)

        self.eva_init()
        self.eva_register_receive_callback(self.on_receive)
        self.eva_register_send_complete_callback(self.on_send_complete)
        self.eva_register_error_callback(self.on_error)
예제 #3
0
파일: community.py 프로젝트: devos50/noodle
    def __init__(self, *args, **kwargs):
        if kwargs.get("work_dir"):
            self.work_dir = kwargs.pop("work_dir")
        super().__init__(*args, **kwargs)
        self._req = RequestCache()

        for base in self.__class__.__bases__:
            if issubclass(base, MessageStateMachine):
                base.setup_messages(self)
예제 #4
0
 def __init__(self, endpoint):
     my_peer = Peer(default_eccrypto.generate_key(u"very-low"))
     self.signature_length = default_eccrypto.get_signature_length(
         my_peer.public_key)
     super().__init__(my_peer, endpoint, Network())
     self.request_cache = RequestCache()
     self.endpoint.add_listener(
         self
     )  # Listen to all incoming packets (not just the fake community_id).
     self.churn_strategy = TrackerChurn(self)
     self.churn_task = self.register_task("churn",
                                          self.churn_strategy.take_step,
                                          interval=10)
예제 #5
0
    def __init__(self, *args, **kwargs):
        working_directory = kwargs.pop("working_directory", "")
        self.persistence = kwargs.pop("persistence", None)
        db_name = kwargs.pop("db_name", self.DB_NAME)
        self.settings = kwargs.pop("settings", TrustChainSettings())
        self.receive_block_lock = RLock()

        super(TrustChainCommunity, self).__init__(*args, **kwargs)
        self.request_cache = RequestCache()
        self.logger = logging.getLogger(self.__class__.__name__)

        if not self.persistence:
            db_path = (
                os.path.join(working_directory, db_name)
                if working_directory != ":memory:"
                else working_directory
            )
            self.persistence = self.DB_CLASS(
                db_path, self.my_peer.public_key.key_to_bin()
            )
        self.relayed_broadcasts = set()
        self.relayed_broadcasts_order = deque()
        self.logger.debug(
            "The trustchain community started with Public Key: %s",
            hexlify(self.my_peer.public_key.key_to_bin()),
        )
        self.shutting_down = False
        self.listeners_map = {}  # Map of block_type -> [callbacks]
        self.register_task("db_cleanup", self.do_db_cleanup, interval=600)

        self.add_message_handler(HalfBlockPayload, self.received_half_block)
        self.add_message_handler(CrawlRequestPayload, self.received_crawl_request)
        self.add_message_handler(CrawlResponsePayload, self.received_crawl_response)
        self.add_message_handler(HalfBlockPairPayload, self.received_half_block_pair)
        self.add_message_handler(
            HalfBlockBroadcastPayload, self.received_half_block_broadcast
        )
        self.add_message_handler(
            HalfBlockPairBroadcastPayload, self.received_half_block_pair_broadcast
        )
        self.add_message_handler(
            EmptyCrawlResponsePayload, self.received_empty_crawl_response
        )
예제 #6
0
    def __init__(self, my_peer, endpoint, network, metadata_store, settings=None, notifier=None):
        super(RemoteQueryCommunity, self).__init__(my_peer, endpoint, network)

        self.notifier = notifier
        self.max_peers = 60

        self.settings = settings or RemoteQueryCommunitySettings()

        self.mds = metadata_store

        # This set contains all the peers that we queried for subscribed channels over time.
        # It is emptied regularly. The purpose of this set is to work as a filter so we never query the same
        # peer twice. If we do, this should happen realy rarely
        # TODO: use Bloom filter here instead. We actually *want* it to be all-false-positives eventually.
        self.queried_subscribed_channels_peers = set()
        self.queried_peers_limit = 1000

        self.add_message_handler(RemoteSelectPayload, self.on_remote_select)
        self.add_message_handler(SelectResponsePayload, self.on_remote_select_response)

        self.request_cache = RequestCache()
예제 #7
0
    def __init__(self,
                 my_peer,
                 endpoint,
                 network,
                 metadata_store,
                 notifier=None):
        super(GigaChannelCommunity, self).__init__(my_peer, endpoint, network)
        self.metadata_store = metadata_store
        self.add_message_handler(self.NEWS_PUSH_MESSAGE, self.on_blob)
        self.add_message_handler(self.SEARCH_REQUEST, self.on_search_request)
        self.add_message_handler(self.SEARCH_RESPONSE, self.on_search_response)
        self.request_cache = RequestCache()
        self.notifier = notifier

        self.gossip_blob = None
        self.gossip_blob_personal_channel = None

        # We regularly regenerate the gossip blobs to account for changes in the local DB
        self.register_task("Renew channel gossip cache",
                           self.prepare_gossip_blob_cache,
                           interval=600)
예제 #8
0
 def __init__(self, *args, **kwargs):
     super(MyCommunity, self).__init__(*args, **kwargs)
     self.request_cache = RequestCache()
예제 #9
0
 def __init__(self, *args, **kwargs):
     super().__init__(*args, **kwargs)
     self.request_cache = RequestCache()