コード例 #1
0
class BatchRequest(object):
    
    def __init__(self, parent, start):
        self.parent = parent
        self.numactive = 0
        self.start = start
        self.requests = OrderedDict()

    def add_request(self, filename, begin, length):
        r = (filename, begin, length)
        assert r not in self.requests
        self.parent._add_request(filename, begin, length, self)
        self.requests[r] = None
        self.numactive += 1

    def got_request(self, filename, begin, length, data):
        self.requests[(filename, begin, length)] = data
        self.numactive -= 1

    def get_result(self):
        if self.numactive > 0:
            return None
        chunks = []
        for k in self.requests.itervalues():
            chunks.append(k)
        return ''.join(chunks)
コード例 #2
0
class BatchRequest(object):
    def __init__(self, parent, start):
        self.parent = parent
        self.numactive = 0
        self.start = start
        self.requests = OrderedDict()

    def add_request(self, filename, begin, length):
        r = (filename, begin, length)
        assert r not in self.requests
        self.parent._add_request(filename, begin, length, self)
        self.requests[r] = None
        self.numactive += 1

    def got_request(self, filename, begin, length, data):
        self.requests[(filename, begin, length)] = data
        self.numactive -= 1

    def get_result(self):
        if self.numactive > 0:
            return None
        chunks = []
        for k in self.requests.itervalues():
            chunks.append(k)
        return ''.join(chunks)
コード例 #3
0
    def __init__(self, make_upload, downloader, choker,
                 numpieces, ratelimiter,
                 rawserver, config, my_id, add_task, infohash, context,
                 addcontactfunc, reported_port, tracker_ips, log_prefix):
        """
            @param my_id: my peer id.
            @param tracker_ips: list of tracker ip addresses.
               ConnectionManager does not drop connections from the tracker.
               This allows trackers to perform NAT checks even when there
               are max_allow_in connections.
            @param log_prefix: string used as the prefix for all
               log entries generated by the ConnectionManager and its
               created Connectors.
        """
        self.make_upload = make_upload
        self.downloader = downloader
        self.choker = choker
        # aaargh
        self.piece_size = downloader.storage.piece_size
        self.numpieces = numpieces
        self.ratelimiter = ratelimiter
        self.rawserver = rawserver
        self.my_id = my_id
        self.config = config
        self.add_task = add_task
        self.infohash = infohash
        self.context = context
        self.addcontact = addcontactfunc
        self.reported_port = reported_port
        self.everinc = False
        self.tracker_ips = tracker_ips
        self.log_prefix = log_prefix
        self.closed = False        

        # submitted
        self.pending_connections = {}

        # transport connected
        self.connectors = set()

        # protocol active
        # we do a lot of itterating and few mutations, so use a list
        self.complete_connectors = [] # set()

        # use a dict for a little semi-randomness        
        self.spares = {} # OrderedDict()
        self.cached_peers = OrderedDict()
        self.cache_limit = 300

        self.connector_ips = {} # { ip: count }
        self.connector_ids = set()

        self.reopen(reported_port)        
        self.banned = set()
        self.add_task(config['keepalive_interval'], self.send_keepalives)
        self.add_task(config['pex_interval'], self.send_pex)

        self.throttled = False
コード例 #4
0
 def __init__(self, parent, start):
     self.parent = parent
     self.numactive = 0
     self.start = start
     self.requests = OrderedDict()
コード例 #5
0
 def __init__(self, parent, start):
     self.parent = parent
     self.numactive = 0
     self.start = start
     self.requests = OrderedDict()
コード例 #6
0
    def __init__(self, make_upload, downloader, choker, numpieces, ratelimiter,
                 rawserver, config, private, my_id, add_task, infohash,
                 context, addcontactfunc, reported_port, tracker_ips,
                 log_prefix):
        """
            @param downloader: MultiDownload for this torrent.
            @param my_id: my peer id.
            @param tracker_ips: list of tracker ip addresses.
               ConnectionManager does not drop connections from the tracker.
               This allows trackers to perform NAT checks even when there
               are max_allow_in connections.
            @param log_prefix: string used as the prefix for all
               log entries generated by the ConnectionManager and its
               created Connectors.
        """
        self.make_upload = make_upload
        self.downloader = downloader
        self.choker = choker
        # aaargh
        self.piece_size = downloader.storage.piece_size
        self.numpieces = numpieces
        self.ratelimiter = ratelimiter
        self.rawserver = rawserver
        self.my_id = my_id
        self.private = private
        self.config = config
        self.add_task = add_task
        self.infohash = infohash
        self.context = context
        self.addcontact = addcontactfunc
        self.reported_port = reported_port
        self.everinc = False
        self.tracker_ips = tracker_ips
        self.log_prefix = log_prefix
        self.logger = logging.getLogger(self.log_prefix)
        self.closed = False

        # submitted
        self.pending_connections = {}

        # transport connected
        self.connectors = set()

        # protocol active
        # we do a lot of itterating and few mutations, so use a list
        self.complete_connectors = []  # set()

        # use a dict for a little semi-randomness
        self.spares = {}  # OrderedDict()
        self.cached_peers = OrderedDict()
        self.cache_limit = 300

        self.connector_ips = DictWithInts()
        self.connector_ids = DictWithInts()

        self.banned = set()

        self._ka_task = self.add_task(config['keepalive_interval'],
                                      self.send_keepalives)
        self._pex_task = None
        if not self.private:
            self._pex_task = self.add_task(config['pex_interval'],
                                           self.send_pex)

        self.reopen(reported_port)
コード例 #7
0
class ConnectionManager(InternetSubscriber):
    def __init__(self, make_upload, downloader, choker, numpieces, ratelimiter,
                 rawserver, config, private, my_id, add_task, infohash,
                 context, addcontactfunc, reported_port, tracker_ips,
                 log_prefix):
        """
            @param downloader: MultiDownload for this torrent.
            @param my_id: my peer id.
            @param tracker_ips: list of tracker ip addresses.
               ConnectionManager does not drop connections from the tracker.
               This allows trackers to perform NAT checks even when there
               are max_allow_in connections.
            @param log_prefix: string used as the prefix for all
               log entries generated by the ConnectionManager and its
               created Connectors.
        """
        self.make_upload = make_upload
        self.downloader = downloader
        self.choker = choker
        # aaargh
        self.piece_size = downloader.storage.piece_size
        self.numpieces = numpieces
        self.ratelimiter = ratelimiter
        self.rawserver = rawserver
        self.my_id = my_id
        self.private = private
        self.config = config
        self.add_task = add_task
        self.infohash = infohash
        self.context = context
        self.addcontact = addcontactfunc
        self.reported_port = reported_port
        self.everinc = False
        self.tracker_ips = tracker_ips
        self.log_prefix = log_prefix
        self.logger = logging.getLogger(self.log_prefix)
        self.closed = False

        # submitted
        self.pending_connections = {}

        # transport connected
        self.connectors = set()

        # protocol active
        # we do a lot of itterating and few mutations, so use a list
        self.complete_connectors = []  # set()

        # use a dict for a little semi-randomness
        self.spares = {}  # OrderedDict()
        self.cached_peers = OrderedDict()
        self.cache_limit = 300

        self.connector_ips = DictWithInts()
        self.connector_ids = DictWithInts()

        self.banned = set()

        self._ka_task = self.add_task(config['keepalive_interval'],
                                      self.send_keepalives)
        self._pex_task = None
        if not self.private:
            self._pex_task = self.add_task(config['pex_interval'],
                                           self.send_pex)

        self.reopen(reported_port)

    def cleanup(self):
        if not self.closed:
            self.close_connections()
        del self.context
        self.cached_peers.clear()
        if self._ka_task.active():
            self._ka_task.cancel()
        if self._pex_task and self._pex_task.active():
            self._pex_task.cancel()

    def reopen(self, port):
        self.closed = False
        self.reported_port = port
        self.unthrottle_connections()
        for addr in self.cached_peers:
            self._fire_cached_connection(addr)
        self.rawserver.internet_watcher.add_subscriber(self)

    def internet_active(self):
        for addr in self.cached_peers.iterkeys():
            self._fire_cached_connection(addr)

    def remove_addr_from_cache(self, addr):
        # could have been an incoming connection
        # or could have been dropped by the cache limit
        if addr in self.cached_peers:
            del self.cached_peers[addr]

    def try_one_connection(self):
        keys = self.cached_peers.keys()
        if not keys:
            return False
        addr = random.choice(keys)
        self._fire_cached_connection(addr)
        return True

    def _fire_cached_connection(self, addr):
        v = self.cached_peers[addr]
        complete, (id, handler, a, kw) = v
        return self._start_connection(addr, id, handler, *a, **kw)

    def cache_complete_peer(self, addr, pid, handler, *a, **kw):
        self.cache_peer(addr, pid, handler, 1, *a, **kw)

    def cache_incomplete_peer(self, addr, pid, handler, *a, **kw):
        self.cache_peer(addr, pid, handler, 0, *a, **kw)

    def cache_peer(self, addr, pid, handler, complete, *a, **kw):
        # obey the cache size limit
        if (addr not in self.cached_peers
                and len(self.cached_peers) >= self.cache_limit):
            for k, v in self.cached_peers.iteritems():
                if not v[0]:
                    del self.cached_peers[k]
                    break
            else:
                # cache full of completes, delete a random peer.
                # yes, this can cache an incomplete when the cache is full of
                # completes, but only 1 because of the filter above.
                oldaddr = self.cached_peers.keys()[0]
                del self.cached_peers[oldaddr]
        elif not complete:
            if addr in self.cached_peers and self.cached_peers[addr][0]:
                # don't overwrite a complete with an incomplete.
                return
        self.cached_peers[addr] = (complete, (pid, handler, a, kw))

    def send_keepalives(self):
        self._ka_task = self.add_task(self.config['keepalive_interval'],
                                      self.send_keepalives)
        for c in self.complete_connectors:
            c.send_keepalive()

    def send_pex(self):
        self._pex_task = self.add_task(self.config['pex_interval'],
                                       self.send_pex)
        pex_set = set()
        for c in self.complete_connectors:
            if c.listening_port:
                pex_set.add((c.ip, c.listening_port))
        for c in self.complete_connectors:
            c.send_pex(pex_set)

    def hashcheck_succeeded(self, i):
        for c in self.complete_connectors:
            # should we send a have message if peer already has the piece?
            # yes! it is low bandwidth and useful for that peer.
            c.send_have(i)

    def find_connection_in_common(self, addr):
        for c in self.complete_connectors:
            if addr in c.remote_pex_set:
                return c

    # returns False if the connection info has been pushed on to self.spares
    # other filters and a successful connection return True
    def start_connection(self, addr, id=None, encrypt=False, lan=False):
        """@param addr: domain name/ip address and port pair.
           @param id: peer id.
           """
        return self._start_connection(addr,
                                      id,
                                      GaurdedInitialConnection,
                                      encrypt=encrypt,
                                      lan=lan)

    def start_http_connection(self, url):
        r = urlparse.urlparse(url)
        host = r[1]
        if ':' in host:
            host, port = host.split(':')
            port = int(port)
        else:
            port = 80
        df = self.rawserver.gethostbyname(host)
        df.addCallback(self._connect_http, port, url)
        df.addLogback(self.logger.warning, "Resolve failed")

    def _connect_http(self, ip, port, url):
        self._start_connection((ip, port),
                               url,
                               HTTPInitialConnection,
                               urgent=True)

    def _start_connection(self, addr, pid, handler, *a, **kw):
        """@param addr: domain name/ip address and port pair.
           @param pid: peer id.
           """
        if self.closed:
            return True
        if addr[0] in self.banned:
            return True
        if pid == self.my_id:
            return True

        for v in self.connectors:
            if pid and v.id == pid:
                return True
            if self.config['one_connection_per_ip'] and v.ip == addr[0]:
                return True

        total_outstanding = len(self.connectors)
        # it's possible the pending connections could eventually complete,
        # so we have to account for those when enforcing max_initiate
        total_outstanding += len(self.pending_connections)

        if total_outstanding >= self.config['max_initiate']:
            self.spares[(addr, pid)] = (handler, a, kw)
            return False

        # if these fail, I'm getting a very weird addr object
        assert isinstance(addr, tuple)
        assert isinstance(addr[0], str)
        assert isinstance(addr[1], int)
        if ONLY_LOCAL and addr[0] != "127.0.0.1" and not addr[0].startswith(
                "192.168") and addr[1] != 80:
            return True

        if GLOBAL_FILTER and not GLOBAL_FILTER(addr[0], addr[1], "out"):
            return True

        if addr not in self.cached_peers:
            self.cache_incomplete_peer(addr, pid, handler, *a, **kw)

        # sometimes we try to connect to a peer we're already trying to
        # connect to
        #assert addr not in self.pending_connections
        if addr in self.pending_connections:
            return True

        kw['log_prefix'] = self.log_prefix
        timeout = 30
        if use_timeout_order:
            timeout = timeout_order[0]
        kw.setdefault('timeout', timeout)
        h = handler(self, pid, *a, **kw)
        self.pending_connections[addr] = (h, (addr, pid, handler, a, kw))
        urgent = kw.pop('urgent', False)
        connector = self.rawserver.start_connection(
            addr,
            h,
            self.context,
            # we'll handle timeouts.
            # not so fond of this.
            timeout=None,
            urgent=urgent)
        h.connector = connector

        return True

    def _resubmit_connection(self, addr):
        # we leave it on pending_connections.
        # so the standard connection_failed handling occurs.
        h, info = self.pending_connections[addr]
        addr, pid, handler, a, kw = info

        self.spares[(addr, pid)] = (handler, a, kw)

    def _cancel_connection(self, addr):
        if addr not in self.pending_connections:
            # already made
            return

        # we leave it on pending_connections.
        # so the standard connection_failed handling occurs.
        h, info = self.pending_connections[addr]
        addr, pid, handler, a, kw = info

        if use_timeout_order and h.timeout < timeout_order[-1]:
            for t in timeout_order:
                if t > h.timeout:
                    h.timeout = t
                    break
            else:
                h.timeout = timeout_order[-1]
            # this feels odd
            kw['timeout'] = h.timeout
            self.spares[(addr, pid)] = (handler, a, kw)

        # do this last, since twisted might fire the event handler from inside
        # the function
        # HMM:
        # should be stopConnecting, but I've seen this fail.
        # close does the same thing, but disconnects in the case where the
        # connection was made. Not sure how that occurs without add being in
        # self.pending_connections
        # Maybe this was fixed recently in CRLR.
        #h.connector.stopConnecting()
        h.connector.close()

    def connection_handshake_completed(self, connector):

        self.connector_ips.add(connector.ip)
        self.connector_ids.add(connector.id)

        self.complete_connectors.append(connector)

        connector.upload = self.make_upload(connector)
        connector.download = self.downloader.make_download(connector)
        self.choker.connection_made(connector)
        if connector.uses_dht:
            connector.send_port(self.reported_port)

        if self.config['resolve_hostnames']:
            df = self.rawserver.gethostbyaddr(connector.ip)

            def save_hostname(hostname_tuple):
                hostname, aliases, ips = hostname_tuple
                connector.hostname = hostname

            df.addCallback(save_hostname)
            df.addErrback(lambda fuckoff: None)

    def got_port(self, connector):
        if self.addcontact and connector.uses_dht and \
           connector.dht_port != None:
            self.addcontact(connector.connection.ip, connector.dht_port)

    def ever_got_incoming(self):
        return self.everinc

    def how_many_connections(self):
        return len(self.complete_connectors)

    def replace_connection(self):
        if self.closed:
            return
        while self.spares:
            k, v = self.spares.popitem()
            addr, id = k
            handler, a, kw = v
            started = self._start_connection(addr, id, handler, *a, **kw)
            if not started:
                # start_connection decided to push this connection back on to
                # self.spares because a limit was hit. break now or loop
                # forever
                break

    def throttle_connections(self):
        self.throttled = True
        for c in iter_rand_pos(self.connectors):
            c.connection.pause_reading()

    def unthrottle_connections(self):
        self.throttled = False
        for c in iter_rand_pos(self.connectors):
            c.connection.resume_reading()
            # arg. resume actually flushes the buffers in iocpreactor, so
            # we have to check the state constantly
            if self.throttled:
                break

    def close_connection(self, id):
        for c in self.connectors:
            if c.id == id and not c.closed:
                c.connection.close()
                c.closed = True

    def close_connections(self):
        self.rawserver.internet_watcher.remove_subscriber(self)
        self.closed = True

        pending = self.pending_connections.values()
        # drop connections which could be made after we're not interested
        for h, info in pending:
            h.connector.close()

        for c in self.connectors:
            if not c.closed:
                c.connection.close()
                c.closed = True

    def singleport_connection(self, connector):
        """hand-off from SingleportListener once the infohash is known and
           thus we can map a connection on to a particular Torrent."""

        if connector.ip in self.banned:
            return False
        m = self.config['max_allow_in']
        if (m and len(self.connectors) >= m
                and connector.ip not in self.tracker_ips):
            return False
        self._add_connection(connector)
        if self.closed:
            return False
        connector.set_parent(self)
        connector.connection.context = self.context
        return True

    def _add_connection(self, connector):
        self.connectors.add(connector)

        if self.closed:
            connector.connection.close()
        elif self.throttled:
            connector.connection.pause_reading()

    def ban(self, ip):
        self.banned.add(ip)

    def connection_lost(self, connector):
        assert isinstance(connector, Connector)
        self.connectors.remove(connector)

        if self.ratelimiter:
            self.ratelimiter.dequeue(connector)

        if connector.complete:
            self.connector_ips.remove(connector.ip)
            self.connector_ids.remove(connector.id)

            self.complete_connectors.remove(connector)
            self.choker.connection_lost(connector)
コード例 #8
0
class ConnectionManager(InternetSubscriber):

    def __init__(self, make_upload, downloader, choker,
                 numpieces, ratelimiter,
                 rawserver, config, private, my_id, add_task, infohash, context,
                 addcontactfunc, reported_port, tracker_ips, log_prefix ): 
        """
            @param downloader: MultiDownload for this torrent.
            @param my_id: my peer id.
            @param tracker_ips: list of tracker ip addresses.
               ConnectionManager does not drop connections from the tracker.
               This allows trackers to perform NAT checks even when there
               are max_allow_in connections.
            @param log_prefix: string used as the prefix for all
               log entries generated by the ConnectionManager and its
               created Connectors.
        """
        self.make_upload = make_upload
        self.downloader = downloader
        self.choker = choker
        # aaargh
        self.piece_size = downloader.storage.piece_size
        self.numpieces = numpieces
        self.ratelimiter = ratelimiter
        self.rawserver = rawserver
        self.my_id = my_id
        self.private = private
        self.config = config
        self.add_task = add_task
        self.infohash = infohash
        self.context = context
        self.addcontact = addcontactfunc
        self.reported_port = reported_port
        self.everinc = False
        self.tracker_ips = tracker_ips
        self.log_prefix = log_prefix
        self.logger = logging.getLogger(self.log_prefix)
        self.closed = False        

        # submitted
        self.pending_connections = {}

        # transport connected
        self.connectors = set()

        # protocol active
        # we do a lot of itterating and few mutations, so use a list
        self.complete_connectors = [] # set()

        # use a dict for a little semi-randomness        
        self.spares = {} # OrderedDict()
        self.cached_peers = OrderedDict()
        self.cache_limit = 300

        self.connector_ips = DictWithInts()
        self.connector_ids = DictWithInts()

        self.banned = set()

        self._ka_task = self.add_task(config['keepalive_interval'],
                                      self.send_keepalives)
        self._pex_task = None
        if not self.private:
            self._pex_task = self.add_task(config['pex_interval'],
                                           self.send_pex)

        self.reopen(reported_port)        

    def cleanup(self):
        if not self.closed:
            self.close_connections()
        del self.context
        self.cached_peers.clear()
        if self._ka_task.active():
            self._ka_task.cancel()
        if self._pex_task and self._pex_task.active():
            self._pex_task.cancel()

    def reopen(self, port):
        self.closed = False
        self.reported_port = port
        self.unthrottle_connections()
        for addr in self.cached_peers:
            self._fire_cached_connection(addr)
        self.rawserver.internet_watcher.add_subscriber(self)

    def internet_active(self):
        for addr in self.cached_peers.iterkeys():
            self._fire_cached_connection(addr)

    def remove_addr_from_cache(self, addr):
        # could have been an incoming connection
        # or could have been dropped by the cache limit
        if addr in self.cached_peers:
            del self.cached_peers[addr]

    def try_one_connection(self):
        keys = self.cached_peers.keys()
        if not keys:
            return False
        addr = random.choice(keys)
        self._fire_cached_connection(addr)
        return True

    def _fire_cached_connection(self, addr):
        v = self.cached_peers[addr]
        complete, (id, handler, a, kw) = v
        return self._start_connection(addr, id, handler, *a, **kw)

    def cache_complete_peer(self, addr, pid, handler, *a, **kw):
        self.cache_peer(addr, pid, handler, 1, *a, **kw)

    def cache_incomplete_peer(self, addr, pid, handler, *a, **kw):
        self.cache_peer(addr, pid, handler, 0, *a, **kw)

    def cache_peer(self, addr, pid, handler, complete, *a, **kw):
        # obey the cache size limit
        if (addr not in self.cached_peers and
            len(self.cached_peers) >= self.cache_limit):
            for k, v in self.cached_peers.iteritems():
                if not v[0]:
                    del self.cached_peers[k]
                    break
            else:
                # cache full of completes, delete a random peer.
                # yes, this can cache an incomplete when the cache is full of
                # completes, but only 1 because of the filter above.
                oldaddr = self.cached_peers.keys()[0]
                del self.cached_peers[oldaddr]
        elif not complete:
            if addr in self.cached_peers and self.cached_peers[addr][0]:
                # don't overwrite a complete with an incomplete.
                return
        self.cached_peers[addr] = (complete, (pid, handler, a, kw))

    def send_keepalives(self):
        self._ka_task = self.add_task(self.config['keepalive_interval'],
                                      self.send_keepalives)
        for c in self.complete_connectors:
            c.send_keepalive()

    def send_pex(self):
        self._pex_task = self.add_task(self.config['pex_interval'],
                                       self.send_pex)
        pex_set = set()
        for c in self.complete_connectors:
            if c.listening_port:
                pex_set.add((c.ip, c.listening_port))
        for c in self.complete_connectors:
            c.send_pex(pex_set)

    def hashcheck_succeeded(self, i):
        for c in self.complete_connectors:
            # should we send a have message if peer already has the piece?
            # yes! it is low bandwidth and useful for that peer.
            c.send_have(i)

    def find_connection_in_common(self, addr):
        for c in self.complete_connectors:
            if addr in c.remote_pex_set:
                return c
        
    # returns False if the connection info has been pushed on to self.spares
    # other filters and a successful connection return True
    def start_connection(self, addr, id=None, encrypt=False, lan=False):
        """@param addr: domain name/ip address and port pair.
           @param id: peer id.
           """
        return self._start_connection(addr, id, GaurdedInitialConnection,
                                      encrypt=encrypt,
                                      lan=lan)
    
    def start_http_connection(self, url):
        r = urlparse.urlparse(url)
        host = r[1]
        if ':' in host:
            host, port = host.split(':')
            port = int(port)
        else:
            port = 80
        df = self.rawserver.gethostbyname(host)
        df.addCallback(self._connect_http, port, url)
        df.addLogback(self.logger.warning, "Resolve failed")

    def _connect_http(self, ip, port, url):
        self._start_connection((ip, port), url,
                               HTTPInitialConnection, urgent=True)

    def _start_connection(self, addr, pid, handler, *a, **kw):
        """@param addr: domain name/ip address and port pair.
           @param pid: peer id.
           """
        if self.closed:
            return True 
        if addr[0] in self.banned:
            return True
        if pid == self.my_id:
            return True

        for v in self.connectors:
            if pid and v.id == pid:
                return True
            if self.config['one_connection_per_ip'] and v.ip == addr[0]:
                return True

        total_outstanding = len(self.connectors)
        # it's possible the pending connections could eventually complete,
        # so we have to account for those when enforcing max_initiate
        total_outstanding += len(self.pending_connections)
        
        if total_outstanding >= self.config['max_initiate']:
            self.spares[(addr, pid)] = (handler, a, kw)
            return False

        # if these fail, I'm getting a very weird addr object        
        assert isinstance(addr, tuple)
        assert isinstance(addr[0], str)
        assert isinstance(addr[1], int)
        if ONLY_LOCAL and addr[0] != "127.0.0.1" and not addr[0].startswith("192.168") and addr[1] != 80:
            return True

        if GLOBAL_FILTER and not GLOBAL_FILTER(addr[0], addr[1], "out"):
            return True

        if addr not in self.cached_peers:
            self.cache_incomplete_peer(addr, pid, handler, *a, **kw)

        # sometimes we try to connect to a peer we're already trying to 
        # connect to 
        #assert addr not in self.pending_connections
        if addr in self.pending_connections:
            return True

        kw['log_prefix'] = self.log_prefix
        timeout = 30
        if use_timeout_order:
            timeout = timeout_order[0]
        kw.setdefault('timeout', timeout)
        h = handler(self, pid, *a, **kw)
        self.pending_connections[addr] = (h, (addr, pid, handler, a, kw))
        urgent = kw.pop('urgent', False)
        connector = self.rawserver.start_connection(addr, h, self.context,
                                                    # we'll handle timeouts.
                                                    # not so fond of this.
                                                    timeout=None,
                                                    urgent=urgent)
        h.connector = connector

        return True

    def _resubmit_connection(self, addr):
        # we leave it on pending_connections.
        # so the standard connection_failed handling occurs.
        h, info = self.pending_connections[addr]
        addr, pid, handler, a, kw = info

        self.spares[(addr, pid)] = (handler, a, kw)

    def _cancel_connection(self, addr):
        if addr not in self.pending_connections:
            # already made
            return

        # we leave it on pending_connections.
        # so the standard connection_failed handling occurs.
        h, info = self.pending_connections[addr]
        addr, pid, handler, a, kw = info

        if use_timeout_order and h.timeout < timeout_order[-1]:
            for t in timeout_order:
                if t > h.timeout:
                    h.timeout = t
                    break
            else:
                h.timeout = timeout_order[-1]
            # this feels odd
            kw['timeout'] = h.timeout
            self.spares[(addr, pid)] = (handler, a, kw)

        # do this last, since twisted might fire the event handler from inside
        # the function
        # HMM:
        # should be stopConnecting, but I've seen this fail.
        # close does the same thing, but disconnects in the case where the
        # connection was made. Not sure how that occurs without add being in
        # self.pending_connections
        # Maybe this was fixed recently in CRLR.
        #h.connector.stopConnecting()
        h.connector.close()

    def connection_handshake_completed(self, connector):

        self.connector_ips.add(connector.ip)
        self.connector_ids.add(connector.id)

        self.complete_connectors.append(connector)

        connector.upload = self.make_upload(connector)
        connector.download = self.downloader.make_download(connector)
        self.choker.connection_made(connector)
        if connector.uses_dht:
            connector.send_port(self.reported_port)

        if self.config['resolve_hostnames']:
            df = self.rawserver.gethostbyaddr(connector.ip)
            def save_hostname(hostname_tuple):
                hostname, aliases, ips = hostname_tuple
                connector.hostname = hostname
            df.addCallback(save_hostname)
            df.addErrback(lambda fuckoff : None)

    def got_port(self, connector):
        if self.addcontact and connector.uses_dht and \
           connector.dht_port != None:
            self.addcontact(connector.connection.ip, connector.dht_port)

    def ever_got_incoming(self):
        return self.everinc

    def how_many_connections(self):
        return len(self.complete_connectors)

    def replace_connection(self):
        if self.closed:
            return
        while self.spares:
            k, v = self.spares.popitem()
            addr, id = k
            handler, a, kw = v
            started = self._start_connection(addr, id, handler, *a, **kw)
            if not started:
                # start_connection decided to push this connection back on to
                # self.spares because a limit was hit. break now or loop
                # forever
                break

    def throttle_connections(self):
        self.throttled = True
        for c in iter_rand_pos(self.connectors):
            c.connection.pause_reading()

    def unthrottle_connections(self):
        self.throttled = False
        for c in iter_rand_pos(self.connectors):
            c.connection.resume_reading()
            # arg. resume actually flushes the buffers in iocpreactor, so
            # we have to check the state constantly
            if self.throttled:
                break

    def close_connection(self, id):
        for c in self.connectors:
            if c.id == id and not c.closed:
                c.connection.close()
                c.closed = True

    def close_connections(self):
        self.rawserver.internet_watcher.remove_subscriber(self)
        self.closed = True

        pending = self.pending_connections.values()
        # drop connections which could be made after we're not interested
        for h, info in pending:
            h.connector.close()
            
        for c in self.connectors:
            if not c.closed:
                c.connection.close()
                c.closed = True

    def singleport_connection(self, connector):
        """hand-off from SingleportListener once the infohash is known and
           thus we can map a connection on to a particular Torrent."""
        
        if connector.ip in self.banned:
            return False
        m = self.config['max_allow_in']
        if (m and len(self.connectors) >= m and 
            connector.ip not in self.tracker_ips):
            return False
        self._add_connection(connector)
        if self.closed:
            return False
        connector.set_parent(self)
        connector.connection.context = self.context
        return True

    def _add_connection(self, connector):
        self.connectors.add(connector)

        if self.closed:
            connector.connection.close()
        elif self.throttled:
            connector.connection.pause_reading()

    def ban(self, ip):
        self.banned.add(ip)

    def connection_lost(self, connector):
        assert isinstance(connector, Connector)
        self.connectors.remove(connector)

        if self.ratelimiter:
            self.ratelimiter.dequeue(connector)

        if connector.complete:
            self.connector_ips.remove(connector.ip)
            self.connector_ids.remove(connector.id)
            
            self.complete_connectors.remove(connector)
            self.choker.connection_lost(connector)