Esempio n. 1
0
    def handle_version(self, version, services, addr_to, addr_from, nonce,
                       sub_version, mode, best_share_hash):
        if self.other_version is not None:
            raise PeerMisbehavingError('more than one version message')
        if version < 4:
            raise PeerMisbehavingError('peer too old')

        self.other_version = version
        self.other_sub_version = sub_version[:512]
        self.other_services = services

        if nonce == self.node.nonce:
            raise PeerMisbehavingError('was connected to self')
        if nonce in self.node.peers:
            if p2pool.DEBUG:
                print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
            self.transport.loseConnection()
            return

        self.nonce = nonce
        self.connected2 = True

        self.timeout_delayed.cancel()
        self.timeout_delayed = reactor.callLater(100, self._timeout)

        old_dataReceived = self.dataReceived

        def new_dataReceived(data):
            if self.timeout_delayed is not None:
                self.timeout_delayed.reset(100)
            old_dataReceived(data)

        self.dataReceived = new_dataReceived

        self.factory.proto_connected(self)

        self._stop_thread = deferral.run_repeatedly(
            lambda: [self.send_ping(),
                     random.expovariate(1 / 100)][-1])

        self._stop_thread2 = deferral.run_repeatedly(lambda: [
            self.send_addrme(port=self.node.port),
            random.expovariate(1 / (100 * len(self.node.peers) + 1))
        ][-1])

        if best_share_hash is not None:
            self.node.handle_share_hashes([best_share_hash], self)
Esempio n. 2
0
 def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
     if self.other_version is not None:
         raise PeerMisbehavingError('more than one version message')
     if version < 4:
         raise PeerMisbehavingError('peer too old')
     
     self.other_version = version
     self.other_sub_version = sub_version[:512]
     self.other_services = services
     
     if nonce == self.node.nonce:
         raise PeerMisbehavingError('was connected to self')
     if nonce in self.node.peers:
         if p2pool.DEBUG:
             print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
         self.transport.loseConnection()
         return
     
     self.nonce = nonce
     self.connected2 = True
     
     self.timeout_delayed.cancel()
     self.timeout_delayed = reactor.callLater(100, self._timeout)
     
     old_dataReceived = self.dataReceived
     def new_dataReceived(data):
         if self.timeout_delayed is not None:
             self.timeout_delayed.reset(100)
         old_dataReceived(data)
     self.dataReceived = new_dataReceived
     
     self.factory.proto_connected(self)
     
     self._stop_thread = deferral.run_repeatedly(lambda: [
         self.send_ping(),
     random.expovariate(1/100)][-1])
     
     self._stop_thread2 = deferral.run_repeatedly(lambda: [
         self.send_addrme(port=self.node.port),
     random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
     
     if best_share_hash is not None:
         self.node.handle_share_hashes([best_share_hash], self)
Esempio n. 3
0
 def start(self):
     if self.running:
         raise ValueError('already running')
     
     self.clientfactory.start()
     self.serverfactory.start()
     self.singleclientconnectors = [reactor.connectTCP(addr, port, SingleClientFactory(self)) for addr, port in self.connect_addrs]
     
     self.running = True
     
     self._stop_thinking = deferral.run_repeatedly(self._think)
Esempio n. 4
0
    def start(self):
        if self.running:
            raise ValueError('already running')

        self.clientfactory.start()
        self.serverfactory.start()
        self.singleclientconnectors = [
            reactor.connectTCP(addr, port, SingleClientFactory(self))
            for addr, port in self.connect_addrs
        ]

        self.running = True

        self._stop_thinking = deferral.run_repeatedly(self._think)
        self.forgiveness_task = task.LoopingCall(self.forgive_transgressions)
        self.forgiveness_task.start(3600.)
Esempio n. 5
0
 def start(self):
     assert not self.running
     self.running = True
     self._stop_thinking = deferral.run_repeatedly(self._think)
Esempio n. 6
0
    def handle_version(self, version, services, addr_to, addr_from, nonce,
                       sub_version, mode, best_share_hash):
        if self.other_version is not None:
            raise PeerMisbehavingError('more than one version message')
        if version < 1300:
            raise PeerMisbehavingError('peer too old')

        self.other_version = version
        self.other_sub_version = sub_version[:512]
        self.other_services = services

        if nonce == self.node.nonce:
            raise PeerMisbehavingError('was connected to self')
        if nonce in self.node.peers:
            if p2pool.DEBUG:
                print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
            self.disconnect()
            return

        self.nonce = nonce
        self.connected2 = True

        self.timeout_delayed.cancel()
        self.timeout_delayed = reactor.callLater(100, self._timeout)

        old_dataReceived = self.dataReceived

        def new_dataReceived(data):
            if self.timeout_delayed is not None:
                self.timeout_delayed.reset(100)
            old_dataReceived(data)

        self.dataReceived = new_dataReceived

        self.factory.proto_connected(self)

        self._stop_thread = deferral.run_repeatedly(
            lambda: [self.send_ping(),
                     random.expovariate(1 / 100)][-1])

        if self.node.advertise_ip:
            self._stop_thread2 = deferral.run_repeatedly(lambda: [
                self.sendAdvertisement(),
                random.expovariate(1 / (100 * len(self.node.peers) + 1))
            ][-1])

        if best_share_hash is not None:
            self.node.handle_share_hashes([best_share_hash], self)

        def update_remote_view_of_my_known_txs(before, after):
            added = set(after) - set(before)
            removed = set(before) - set(after)
            if added:
                self.send_have_tx(tx_hashes=list(added))
            if removed:
                self.send_losing_tx(tx_hashes=list(removed))

                # cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
                key = max(
                    self.known_txs_cache) + 1 if self.known_txs_cache else 0
                self.known_txs_cache[key] = dict(
                    (h, before[h]) for h in removed)
                reactor.callLater(20, self.known_txs_cache.pop, key)

        watch_id = self.node.known_txs_var.transitioned.watch(
            update_remote_view_of_my_known_txs)
        self.connection_lost_event.watch(
            lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))

        self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())

        def update_remote_view_of_my_mining_txs(before, after):
            added = set(after) - set(before)
            removed = set(before) - set(after)
            if added:
                self.remote_remembered_txs_size += sum(
                    100 + bitcoin_data.tx_type.packed_size(after[x])
                    for x in added)
                assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
                fragment(
                    self.send_remember_tx,
                    tx_hashes=[x for x in added if x in self.remote_tx_hashes],
                    txs=[
                        after[x] for x in added
                        if x not in self.remote_tx_hashes
                    ])
            if removed:
                self.send_forget_tx(tx_hashes=list(removed))
                self.remote_remembered_txs_size -= sum(
                    100 + bitcoin_data.tx_type.packed_size(before[x])
                    for x in removed)

        watch_id2 = self.node.mining_txs_var.transitioned.watch(
            update_remote_view_of_my_mining_txs)
        self.connection_lost_event.watch(
            lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))

        self.remote_remembered_txs_size += sum(
            100 + bitcoin_data.tx_type.packed_size(x)
            for x in self.node.mining_txs_var.value.values())
        assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
        fragment(self.send_remember_tx,
                 tx_hashes=[],
                 txs=self.node.mining_txs_var.value.values())
Esempio n. 7
0
 def start(self):
     assert not self.running
     self.running = True
     self._stop_thinking = deferral.run_repeatedly(self._think)
Esempio n. 8
0
 def handle_version(self, version, services, addr_to, addr_from, nonce, sub_version, mode, best_share_hash):
     if self.other_version is not None:
         raise PeerMisbehavingError('more than one version message')
     if version < (1300 if self.node.net.NAME == 'bitcoin' else 8):
         raise PeerMisbehavingError('peer too old')
     
     self.other_version = version
     self.other_sub_version = sub_version[:512]
     self.other_services = services
     
     if nonce == self.node.nonce:
         raise PeerMisbehavingError('was connected to self')
     if nonce in self.node.peers:
         if p2pool.DEBUG:
             print 'Detected duplicate connection, disconnecting from %s:%i' % self.addr
         self.disconnect()
         return
     
     self.nonce = nonce
     self.connected2 = True
     
     self.timeout_delayed.cancel()
     self.timeout_delayed = reactor.callLater(100, self._timeout)
     
     old_dataReceived = self.dataReceived
     def new_dataReceived(data):
         if self.timeout_delayed is not None:
             self.timeout_delayed.reset(100)
         old_dataReceived(data)
     self.dataReceived = new_dataReceived
     
     self.factory.proto_connected(self)
     
     self._stop_thread = deferral.run_repeatedly(lambda: [
         self.send_ping(),
     random.expovariate(1/100)][-1])
     
     self._stop_thread2 = deferral.run_repeatedly(lambda: [
         self.send_addrme(port=self.node.serverfactory.listen_port.getHost().port) if self.node.serverfactory.listen_port is not None else None,
     random.expovariate(1/(100*len(self.node.peers) + 1))][-1])
     
     if best_share_hash is not None:
         self.node.handle_share_hashes([best_share_hash], self)
     
     def update_remote_view_of_my_known_txs(before, after):
         added = set(after) - set(before)
         removed = set(before) - set(after)
         if added:
             self.send_have_tx(tx_hashes=list(added))
         if removed:
             self.send_losing_tx(tx_hashes=list(removed))
             
             # cache forgotten txs here for a little while so latency of "losing_tx" packets doesn't cause problems
             key = max(self.known_txs_cache) + 1 if self.known_txs_cache else 0
             self.known_txs_cache[key] = dict((h, before[h]) for h in removed)
             reactor.callLater(20, self.known_txs_cache.pop, key)
     watch_id = self.node.known_txs_var.transitioned.watch(update_remote_view_of_my_known_txs)
     self.connection_lost_event.watch(lambda: self.node.known_txs_var.transitioned.unwatch(watch_id))
     
     self.send_have_tx(tx_hashes=self.node.known_txs_var.value.keys())
     
     def update_remote_view_of_my_mining_txs(before, after):
         added = set(after) - set(before)
         removed = set(before) - set(after)
         if added:
             self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(after[x]) for x in added)
             assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
             fragment(self.send_remember_tx, tx_hashes=[x for x in added if x in self.remote_tx_hashes], txs=[after[x] for x in added if x not in self.remote_tx_hashes])
         if removed:
             self.send_forget_tx(tx_hashes=list(removed))
             self.remote_remembered_txs_size -= sum(100 + bitcoin_data.tx_type.packed_size(before[x]) for x in removed)
     watch_id2 = self.node.mining_txs_var.transitioned.watch(update_remote_view_of_my_mining_txs)
     self.connection_lost_event.watch(lambda: self.node.mining_txs_var.transitioned.unwatch(watch_id2))
     
     self.remote_remembered_txs_size += sum(100 + bitcoin_data.tx_type.packed_size(x) for x in self.node.mining_txs_var.value.values())
     assert self.remote_remembered_txs_size <= self.max_remembered_txs_size
     fragment(self.send_remember_tx, tx_hashes=[], txs=self.node.mining_txs_var.value.values())