Пример #1
0
    def __init__(self, env, controller):
        super().__init__()
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.controller = controller
        self.loop = controller.loop
        if env.irc and env.coin.IRC_PREFIX:
            self.irc = IRC(env, self)
        else:
            self.irc = None

        # Our clearnet and Tor Peers, if any
        self.myselves =  [Peer(ident.host, env.server_features(), 'env')
                          for ident in env.identities]
        self.retry_event = asyncio.Event()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = SocksProxy(env.tor_proxy_host, env.tor_proxy_port,
                                loop=self.loop)
        self.import_peers()
Пример #2
0
 def __init__(self, env, controller):
     super().__init__()
     # Initialise the Peer class
     Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
     self.env = env
     self.controller = controller
     self.loop = controller.loop
     self.myselves = peers_from_env(env)
     self.retry_event = asyncio.Event()
     # Peers have one entry per hostname.  Once connected, the
     # ip_addr property is either None, an onion peer, or the
     # IP address that was connected to.  Adding a peer will evict
     # any other peers with the same host name or IP address.
     self.peers = set()
     self.permit_onion_peer_time = time.time()
     self.proxy = SocksProxy(env.tor_proxy_host, env.tor_proxy_port,
                             loop=self.loop)
     self.import_peers()
Пример #3
0
 def __init__(self, env, controller):
     super().__init__()
     # Initialise the Peer class
     Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
     self.env = env
     self.controller = controller
     self.loop = controller.loop
     self.irc = IRC(env, self)
     self.myself = peer_from_env(env)
     # value is max outgoing connections at a time
     self.semaphore = asyncio.BoundedSemaphore(value=8)
     self.retry_event = asyncio.Event()
     # Peers have one entry per hostname.  Once connected, the
     # ip_addr property is either None, an onion peer, or the
     # IP address that was connected to.  Adding a peer will evict
     # any other peers with the same host name or IP address.
     self.peers = set()
     self.onion_peers = []
     self.last_tor_retry_time = 0
     self.tor_proxy = SocksProxy(env.tor_proxy_host, env.tor_proxy_port,
                                 loop=self.loop)
     self.import_peers()
Пример #4
0
class PeerManager(util.LoggedClass):
    '''Looks after the DB of peer network servers.

    Attempts to maintain a connection with up to 8 peers.
    Issues a 'peers.subscribe' RPC to them and tells them our data.
    '''
    def __init__(self, env, controller):
        super().__init__()
        # Initialise the Peer class
        Peer.DEFAULT_PORTS = env.coin.PEER_DEFAULT_PORTS
        self.env = env
        self.controller = controller
        self.loop = controller.loop

        # Our clearnet and Tor Peers, if any
        self.myselves = [
            Peer(ident.host, env.server_features(), 'env')
            for ident in env.identities
        ]
        self.retry_event = asyncio.Event()
        # Peers have one entry per hostname.  Once connected, the
        # ip_addr property is either None, an onion peer, or the
        # IP address that was connected to.  Adding a peer will evict
        # any other peers with the same host name or IP address.
        self.peers = set()
        self.permit_onion_peer_time = time.time()
        self.proxy = SocksProxy(env.tor_proxy_host,
                                env.tor_proxy_port,
                                loop=self.loop)
        self.import_peers()

    def my_clearnet_peer(self):
        '''Returns the clearnet peer representing this server, if any.'''
        clearnet = [peer for peer in self.myselves if not peer.is_tor]
        return clearnet[0] if clearnet else None

    def info(self):
        '''The number of peers.'''
        self.set_peer_statuses()
        counter = Counter(peer.status for peer in self.peers)
        return {
            'bad': counter[PEER_BAD],
            'good': counter[PEER_GOOD],
            'never': counter[PEER_NEVER],
            'stale': counter[PEER_STALE],
            'total': len(self.peers),
        }

    def set_peer_statuses(self):
        '''Set peer statuses.'''
        cutoff = time.time() - STALE_SECS
        for peer in self.peers:
            if peer.bad:
                peer.status = PEER_BAD
            elif peer.last_good > cutoff:
                peer.status = PEER_GOOD
            elif peer.last_good:
                peer.status = PEER_STALE
            else:
                peer.status = PEER_NEVER

    def rpc_data(self):
        '''Peer data for the peers RPC method.'''
        self.set_peer_statuses()
        descs = ['good', 'stale', 'never', 'bad']

        def peer_data(peer):
            data = peer.serialize()
            data['status'] = descs[peer.status]
            return data

        def peer_key(peer):
            return (peer.bad, -peer.last_good)

        return [peer_data(peer) for peer in sorted(self.peers, key=peer_key)]

    def add_peers(self, peers, limit=2, check_ports=False, source=None):
        '''Add a limited number of peers that are not already present.'''
        retry = False
        new_peers = []
        for peer in peers:
            if not peer.is_public:
                continue
            matches = peer.matches(self.peers)
            if not matches:
                new_peers.append(peer)
            elif check_ports:
                for match in matches:
                    if match.check_ports(peer):
                        self.logger.info('ports changed for {}'.format(peer))
                        retry = True

        if new_peers:
            retry = True
            source = source or new_peers[0].source
            if limit:
                random.shuffle(new_peers)
                use_peers = new_peers[:limit]
            else:
                use_peers = new_peers
            for n, peer in enumerate(use_peers):
                self.logger.info(
                    'accepted new peer {:d}/{:d} {} from {} '.format(
                        n + 1, len(use_peers), peer, source))
            self.peers.update(use_peers)

        if retry:
            self.retry_event.set()

    def permit_new_onion_peer(self):
        '''Accept a new onion peer only once per random time interval.'''
        now = time.time()
        if now < self.permit_onion_peer_time:
            return False
        self.permit_onion_peer_time = now + random.randrange(0, 1200)
        return True

    async def on_add_peer(self, features, source_info):
        '''Add a peer (but only if the peer resolves to the source).'''
        if not source_info:
            self.log_info('ignored add_peer request: no source info')
            return False
        source = source_info[0]
        peers = Peer.peers_from_features(features, source)
        if not peers:
            self.log_info('ignored add_peer request: no peers given')
            return False

        # Just look at the first peer, require it
        peer = peers[0]
        host = peer.host
        if peer.is_tor:
            permit = self.permit_new_onion_peer()
            reason = 'rate limiting'
        else:
            try:
                infos = await self.loop.getaddrinfo(host,
                                                    80,
                                                    type=socket.SOCK_STREAM)
            except socket.gaierror:
                permit = False
                reason = 'address resolution failure'
            else:
                permit = any(source == info[-1][0] for info in infos)
                reason = 'source-destination mismatch'

        if permit:
            self.log_info('accepted add_peer request from {} for {}'.format(
                source, host))
            self.add_peers([peer], check_ports=True)
        else:
            self.log_warning(
                'rejected add_peer request from {} for {} ({})'.format(
                    source, host, reason))

        return permit

    def on_peers_subscribe(self, is_tor):
        '''Returns the server peers as a list of (ip, host, details) tuples.

        We return all peers we've connected to in the last day.
        Additionally, if we don't have onion routing, we return a few
        hard-coded onion servers.
        '''
        cutoff = time.time() - STALE_SECS
        recent = [
            peer for peer in self.peers
            if peer.last_good > cutoff and not peer.bad and peer.is_public
        ]
        onion_peers = []

        # Always report ourselves if valid (even if not public)
        peers = set(myself for myself in self.myselves
                    if myself.last_good > cutoff)

        # Bucket the clearnet peers and select up to two from each
        buckets = defaultdict(list)
        for peer in recent:
            if peer.is_tor:
                onion_peers.append(peer)
            else:
                buckets[peer.bucket()].append(peer)
        for bucket_peers in buckets.values():
            random.shuffle(bucket_peers)
            peers.update(bucket_peers[:2])

        # Add up to 20% onion peers (but up to 10 is OK anyway)
        random.shuffle(onion_peers)
        max_onion = 50 if is_tor else max(10, len(peers) // 4)

        peers.update(onion_peers[:max_onion])

        return [peer.to_tuple() for peer in peers]

    def import_peers(self):
        '''Import hard-coded peers from a file or the coin defaults.'''
        self.add_peers(self.myselves)

        # Add the hard-coded ones unless only returning self
        if self.env.peer_discovery != self.env.PD_SELF:
            coin_peers = self.env.coin.PEERS
            peers = [
                Peer.from_real_name(real_name, 'coins.py')
                for real_name in coin_peers
            ]
            self.add_peers(peers, limit=None)

    def ensure_future(self, coro, callback=None):
        '''Schedule the coro to be run.'''
        return self.controller.ensure_future(coro, callback=callback)

    async def main_loop(self):
        '''Main loop performing peer maintenance.  This includes

          1) Forgetting unreachable peers.
          2) Verifying connectivity of new peers.
          3) Retrying old peers at regular intervals.
        '''
        if self.env.peer_discovery != self.env.PD_ON:
            self.logger.info('peer discovery is disabled')
            return

        # Wait a few seconds after starting the proxy detection loop
        # for proxy detection to succeed
        self.ensure_future(self.proxy.auto_detect_loop())
        await self.proxy.tried_event.wait()

        self.logger.info(
            'beginning peer discovery; force use of proxy: {}'.format(
                self.env.force_proxy))

        while True:
            timeout = self.loop.call_later(WAKEUP_SECS, self.retry_event.set)
            await self.retry_event.wait()
            self.retry_event.clear()
            timeout.cancel()
            await self.retry_peers()

    def is_coin_onion_peer(self, peer):
        '''Return true if this peer is a hard-coded onion peer.'''
        return peer.is_tor and any(peer.host in real_name
                                   for real_name in self.env.coin.PEERS)

    async def retry_peers(self):
        '''Retry peers that are close to getting stale.'''
        # Exponential backoff of retries
        now = time.time()
        nearly_stale_time = (now - STALE_SECS) + WAKEUP_SECS * 2

        def should_retry(peer):
            # Retry a peer whose ports might have updated
            if peer.other_port_pairs:
                return True
            # Retry a good connection if it is about to turn stale
            if peer.try_count == 0:
                return peer.last_good < nearly_stale_time
            # Retry a failed connection if enough time has passed
            return peer.last_try < now - WAKEUP_SECS * 2**peer.try_count

        peers = [peer for peer in self.peers if should_retry(peer)]

        for peer in peers:
            peer.try_count += 1
            pairs = peer.connection_port_pairs()
            if peer.bad or not pairs:
                self.maybe_forget_peer(peer)
            else:
                self.retry_peer(peer, pairs)

    def retry_peer(self, peer, port_pairs):
        peer.last_try = time.time()
        kind, port = port_pairs[0]
        sslc = ssl.SSLContext(ssl.PROTOCOL_TLS) if kind == 'SSL' else None

        if self.env.force_proxy or peer.is_tor:
            # Only attempt a proxy connection if the proxy is up
            if not self.proxy.is_up():
                return
            create_connection = self.proxy.create_connection
        else:
            create_connection = self.loop.create_connection

        # Use our listening Host/IP for outgoing connections so our
        # peers see the correct source.
        host = self.env.cs_host(for_rpc=False)
        if isinstance(host, list):
            host = host[0]
        local_addr = (host, None) if host else None

        protocol_factory = partial(PeerSession, peer, self, kind)
        coro = create_connection(protocol_factory,
                                 peer.host,
                                 port,
                                 ssl=sslc,
                                 local_addr=local_addr)
        callback = partial(self.connection_done, peer, port_pairs)
        self.ensure_future(coro, callback)

    def connection_done(self, peer, port_pairs, future):
        '''Called when a connection attempt succeeds or fails.

        If failed, log it and try remaining port pairs.  If none,
        release the connection count semaphore.
        '''
        exception = future.exception()
        if exception:
            kind, port = port_pairs[0]
            self.logger.info('failed connecting to {} at {} port {:d} '
                             'in {:.1f}s: {}'.format(
                                 peer, kind, port,
                                 time.time() - peer.last_try, exception))
            port_pairs = port_pairs[1:]
            if port_pairs:
                self.retry_peer(peer, port_pairs)
            else:
                self.maybe_forget_peer(peer)

    def set_verification_status(self, peer, kind, good):
        '''Called when a verification succeeded or failed.'''
        now = time.time()
        if self.env.force_proxy or peer.is_tor:
            how = 'via {} over Tor'.format(kind)
        else:
            how = 'via {} at {}'.format(kind, peer.ip_addr)
        status = 'verified' if good else 'failed to verify'
        elapsed = now - peer.last_try
        self.log_info('{} {} {} in {:.1f}s'.format(status, peer, how, elapsed))

        if good:
            peer.try_count = 0
            peer.last_good = now
            peer.source = 'peer'
            # At most 2 matches if we're a host name, potentially several if
            # we're an IP address (several instances can share a NAT).
            matches = peer.matches(self.peers)
            for match in matches:
                if match.ip_address:
                    if len(matches) > 1:
                        self.peers.remove(match)
                elif peer.host in match.features['hosts']:
                    match.update_features_from_peer(peer)
        else:
            self.maybe_forget_peer(peer)

    def maybe_forget_peer(self, peer):
        '''Forget the peer if appropriate, e.g. long-term unreachable.'''
        if peer.last_good and not peer.bad:
            try_limit = 10
        else:
            try_limit = 3
        forget = peer.try_count >= try_limit

        if forget:
            desc = 'bad' if peer.bad else 'unreachable'
            self.logger.info('forgetting {} peer: {}'.format(desc, peer))
            self.peers.discard(peer)

        return forget