def is_snubbed(self): if (self.interested and not self.choked and clock() - self.last2 > self.downloader.snub_time): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time
def is_snubbed(self): if ( self.interested and not self.choked and clock() - self.last2 > self.downloader.snub_time ): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time
def _round_robin(self): self.schedule(self._round_robin, 5) if self.super_seed: cons = list(range(len(self.connections))) to_close = [] count = self.config['min_uploads'] - self.last_preferred if count > 0: # optimization shuffle(cons) for c in cons: i = self.picker.next_have(self.connections[c], count > 0) if i is None: continue if i < 0: to_close.append(self.connections[c]) continue self.connections[c].send_have(i) count -= 1 for c in to_close: c.close() if self.last_round_robin + self.round_robin_period < clock(): self.last_round_robin = clock() for i in range(1, len(self.connections)): c = self.connections[i] u = c.get_upload() if u.is_choked() and u.is_interested(): self.connections = self.connections[ i:] + self.connections[:i] break self._rechoke()
def _round_robin(self): self.schedule(self._round_robin, 5) if self.super_seed: cons = range(len(self.connections)) to_close = [] count = self.config['min_uploads']-self.last_preferred if count > 0: # optimization shuffle(cons) for c in cons: i = self.picker.next_have(self.connections[c], count > 0) if i is None: continue if i < 0: to_close.append(self.connections[c]) continue self.connections[c].send_have(i) count -= 1 for c in to_close: c.close() if self.last_round_robin + self.round_robin_period < clock(): self.last_round_robin = clock() for i in xrange(1, len(self.connections)): c = self.connections[i] u = c.get_upload() if u.is_choked() and u.is_interested(): self.connections = self.connections[i:] + self.connections[:i] break self._rechoke()
def __init__(self, storage, picker, backlog, max_rate_period, numpieces, chunksize, measurefunc, snub_time, kickbans_ok, kickfunc, banfunc): self.storage = storage self.picker = picker self.backlog = backlog self.max_rate_period = max_rate_period self.measurefunc = measurefunc self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size) self.numpieces = numpieces self.chunksize = chunksize self.snub_time = snub_time self.kickfunc = kickfunc self.banfunc = banfunc self.disconnectedseeds = {} self.downloads = [] self.perip = {} self.gotbaddata = {} self.kicked = {} self.banned = {} self.kickbans_ok = kickbans_ok self.kickbans_halted = False self.super_seeding = False self.endgamemode = False self.endgame_queued_pieces = [] self.all_requests = [] self.discarded = 0L # self.download_rate = 25000 # 25K/s test rate self.download_rate = 0 self.bytes_requested = 0 self.last_time = clock() self.queued_out = {} self.requeueing = False self.paused = False
def __init__(self, storage, picker, backlog, max_rate_period, numpieces, chunksize, measurefunc, snub_time, kickbans_ok, kickfunc, banfunc): self.storage = storage self.picker = picker self.backlog = backlog self.max_rate_period = max_rate_period self.measurefunc = measurefunc self.totalmeasure = Measure(max_rate_period * storage.piece_length / storage.request_size) self.numpieces = numpieces self.chunksize = chunksize self.snub_time = snub_time self.kickfunc = kickfunc self.banfunc = banfunc self.disconnectedseeds = {} self.downloads = [] self.perip = {} self.gotbaddata = {} self.kicked = {} self.banned = {} self.kickbans_ok = kickbans_ok self.kickbans_halted = False self.super_seeding = False self.endgamemode = False self.endgame_queued_pieces = [] self.all_requests = [] self.discarded = 0 # self.download_rate = 25000 # 25K/s test rate self.download_rate = 0 self.bytes_requested = 0 self.last_time = clock() self.queued_out = {} self.requeueing = False self.paused = False
def old_style_init(self): while self.initialize_tasks: msg, done, init, next = self.initialize_tasks.pop(0) if init(): self.statusfunc(activity=msg, fractionDone=done) t = clock() + STATS_INTERVAL x = 0 while x is not None: if t < clock(): t = clock() + STATS_INTERVAL self.statusfunc(fractionDone=x) self.unpauseflag.wait() if self.flag.isSet(): return False x = next() self.statusfunc(fractionDone=0) return True
def num_disconnected_seeds(self): # first expire old ones expired = [] for id,t in self.disconnectedseeds.items(): if clock() - t > EXPIRE_TIME: #Expire old seeds after so long expired.append(id) for id in expired: # self.picker.seed_disappeared() del self.disconnectedseeds[id] return len(self.disconnectedseeds)
def num_disconnected_seeds(self): # first expire old ones expired = [] for id, t in list(self.disconnectedseeds.items()): if clock() - t > EXPIRE_TIME: #Expire old seeds after so long expired.append(id) for id in expired: # self.picker.seed_disappeared() del self.disconnectedseeds[id] return len(self.disconnectedseeds)
def next_have(self, connection, looser_upload): if self.seed_time is None: self.seed_time = clock() return None if clock( ) < self.seed_time + 10: # wait 10 seconds after seeing the first peers return None # to give time to grab have lists if not connection.upload.super_seeding: return None olddl = self.seed_connections.get(connection) if olddl is None: ip = connection.get_ip() olddl = self.past_ips.get(ip) if olddl is not None: # peer reconnected self.seed_connections[connection] = olddl if olddl is not None: if looser_upload: num = 1 # send a new have even if it hasn't spread that piece elsewhere else: num = 2 if self.seed_got_haves[olddl] < num: return None if not connection.upload.was_ever_interested: # it never downloaded it? connection.upload.skipped_count += 1 if connection.upload.skipped_count >= 3: # probably another stealthed seed return -1 # signal to close it for tier in self.interests: for piece in tier: if not connection.download.have[piece]: seedint = self.level_in_interests[piece] self.level_in_interests[ piece] += 1 # tweak it up one, so you don't duplicate effort if seedint == len(self.interests) - 1: self.interests.append([]) self._shift_over(piece, self.interests[seedint], self.interests[seedint + 1]) self.seed_got_haves[piece] = 0 # reset this self.seed_connections[connection] = piece connection.upload.seed_have_list.append(piece) return piece return -1 # something screwy; terminate connection
def send_unchoke(self): if self.send_choke_queued: self.send_choke_queued = False if DEBUG: print 'CHOKE SUPPRESSED' else: self._send_message(UNCHOKE) if ( self.partial_message or self.just_unchoked is None or not self.upload.interested or self.download.active_requests ): self.just_unchoked = 0 else: self.just_unchoked = clock()
def __init__(self, config, schedule, picker, done=lambda: False): self.config = config self.round_robin_period = config['round_robin_period'] self.schedule = schedule self.picker = picker self.connections = [] self.last_preferred = 0 self.last_round_robin = clock() self.done = done self.super_seed = False self.paused = False schedule(self._round_robin, 5)
def __init__(self, config, schedule, picker, done = lambda: False): self.config = config self.round_robin_period = config['round_robin_period'] self.schedule = schedule self.picker = picker self.connections = [] self.last_preferred = 0 self.last_round_robin = clock() self.done = done self.super_seed = False self.paused = False schedule(self._round_robin, 5)
def got_piece(self, index, begin, piece): length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): self.downloader.piece_flunked(index) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove( (index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) return self.downloader.storage.do_I_have(index)
def send_unchoke(self): if self.send_choke_queued: self.send_choke_queued = False if DEBUG: print 'CHOKE SUPPRESSED' else: self._send_message(UNCHOKE) if (self.partial_message or self.just_unchoked is None or not self.upload.interested or self.download.active_requests): self.just_unchoked = 0 else: self.just_unchoked = clock()
def next_have(self, connection, looser_upload): if self.seed_time is None: self.seed_time = clock() return None if clock() < self.seed_time + 10: # wait 10 seconds after seeing the first peers return None # to give time to grab have lists if not connection.upload.super_seeding: return None olddl = self.seed_connections.get(connection) if olddl is None: ip = connection.get_ip() olddl = self.past_ips.get(ip) if olddl is not None: # peer reconnected self.seed_connections[connection] = olddl if olddl is not None: if looser_upload: num = 1 # send a new have even if it hasn't spread that piece elsewhere else: num = 2 if self.seed_got_haves[olddl] < num: return None if not connection.upload.was_ever_interested: # it never downloaded it? connection.upload.skipped_count += 1 if connection.upload.skipped_count >= 3: # probably another stealthed seed return -1 # signal to close it for tier in self.interests: for piece in tier: if not connection.download.have[piece]: seedint = self.level_in_interests[piece] self.level_in_interests[piece] += 1 # tweak it up one, so you don't duplicate effort if seedint == len(self.interests) - 1: self.interests.append([]) self._shift_over(piece, self.interests[seedint], self.interests[seedint + 1]) self.seed_got_haves[piece] = 0 # reset this self.seed_connections[connection] = piece connection.upload.seed_have_list.append(piece) return piece return -1 # something screwy; terminate connection
def got_piece(self, index, begin, piece): length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, piece, self.guard): self.downloader.piece_flunked(index) return False if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) return self.downloader.storage.do_I_have(index)
def expire_downloaders(self): for x in list(self.times.keys()): for myid, t in list(self.times[x].items()): if t < self.prevtime: self.delete_peer(x, myid) self.prevtime = clock() if (self.keep_dead != 1): for key, value in list(self.downloads.items()): if len(value) == 0 and (self.allowed is None or key not in self.allowed): del self.times[key] del self.downloads[key] del self.seedcount[key] self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
def queue_limit(self): if not self.download_rate: return 10e10 # that's a big queue! t = clock() self.bytes_requested -= (t - self.last_time) * self.download_rate self.last_time = t if not self.requeueing and self.queued_out and self.bytes_requested < 0: self.requeueing = True q = list(self.queued_out.keys()) shuffle(q) self.queued_out = {} for d in q: d._request_more() self.requeueing = False if -self.bytes_requested > 5 * self.download_rate: self.bytes_requested = -5 * self.download_rate return max(int(-self.bytes_requested / self.chunksize), 0)
def queue_limit(self): if not self.download_rate: return 10e10 # that's a big queue! t = clock() self.bytes_requested -= (t - self.last_time) * self.download_rate self.last_time = t if not self.requeueing and self.queued_out and self.bytes_requested < 0: self.requeueing = True q = self.queued_out.keys() shuffle(q) self.queued_out = {} for d in q: d._request_more() self.requeueing = False if -self.bytes_requested > 5*self.download_rate: self.bytes_requested = -5*self.download_rate return max(int(-self.bytes_requested/self.chunksize),0)
def got_request(self, i, p, l): self.upload.got_request(i, p, l) if self.just_unchoked: self.connecter.ratelimiter.ping(clock() - self.just_unchoked) self.just_unchoked = 0
def add_data(self, infohash, event, ip, paramslist): peers = self.downloads.setdefault(infohash, {}) ts = self.times.setdefault(infohash, {}) self.completed.setdefault(infohash, 0) self.seedcount.setdefault(infohash, 0) def params(key, default=None, l=paramslist): if key in l: return l[key][0] return default myid = params('peer_id', '') if len(myid) != 20: raise ValueError('id not of length 20') if event not in ['started', 'completed', 'stopped', 'snooped', None]: raise ValueError('invalid event') port = int(params('port', '')) if port < 0 or port > 65535: raise ValueError('invalid port') left = int(params('left', '')) if left < 0: raise ValueError('invalid amount left') uploaded = int(params('uploaded', '')) downloaded = int(params('downloaded', '')) peer = peers.get(myid) islocal = local_IPs.includes(ip) mykey = params('key') if peer: auth = peer.get('key', -1) == mykey or peer.get('ip') == ip gip = params('ip') if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): ip1 = gip else: ip1 = ip if params('numwant') is not None: rsize = min(int(params('numwant')), self.response_size) else: rsize = self.response_size if event == 'stopped': if peer: if auth: self.delete_peer(infohash, myid) elif not peer: ts[myid] = clock() peer = {'ip': ip, 'port': port, 'left': left} if mykey: peer['key'] = mykey if gip: peer['given ip'] = gip if port: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) else: peer['nat'] = 2**30 if event == 'completed': self.completed[infohash] += 1 if not left: self.seedcount[infohash] += 1 peers[myid] = peer else: if not auth: return rsize # return w/o changing stats ts[myid] = clock() if not left and peer['left']: self.completed[infohash] += 1 self.seedcount[infohash] += 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: bc[1][myid] = bc[0][myid] del bc[0][myid] elif left and not peer['left']: self.completed[infohash] -= 1 self.seedcount[infohash] -= 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: bc[0][myid] = bc[1][myid] del bc[1][myid] peer['left'] = left if port: recheck = False if ip != peer['ip']: peer['ip'] = ip recheck = True if gip != peer.get('given ip'): if gip: peer['given ip'] = gip elif 'given ip' in peer: del peer['given ip'] recheck = True natted = peer.get('nat', -1) if recheck: if natted == 0: l = self.becache[infohash] y = not peer['left'] for x in l: del x[y][myid] if natted >= 0: del peer['nat'] # restart NAT testing if natted and natted < self.natcheck: recheck = True if recheck: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) return rsize
def add_disconnected_seed(self, id): # if not self.disconnectedseeds.has_key(id): # self.picker.seed_seen_recently() self.disconnectedseeds[id] = clock()
def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize): data = {} # return data seeds = self.seedcount[infohash] data['complete'] = seeds data['incomplete'] = len(self.downloads[infohash]) - seeds if (self.config['allowed_controls'] and 'warning message' in self.allowed[infohash]): data['warning message'] = self.allowed[infohash]['warning message'] if tracker: data['interval'] = self.config['multitracker_reannounce_interval'] if not rsize: return data cache = self.cached_t.setdefault(infohash, None) if (not cache or len(cache[1]) < rsize or cache[0] + self.config['min_time_between_cache_refreshes'] < clock()): bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) cache = [ clock(), list(bc[0][0].values()) + list(bc[0][1].values()) ] self.cached_t[infohash] = cache shuffle(cache[1]) cache = cache[1] data['peers'] = cache[-rsize:] del cache[-rsize:] return data data['interval'] = self.reannounce_interval if stopped or not rsize: # save some bandwidth data['peers'] = [] return data bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) len_l = len(bc[0][0]) len_s = len(bc[0][1]) if not (len_l + len_s): # caches are empty! data['peers'] = [] return data l_get_size = int(float(rsize) * (len_l) / (len_l + len_s)) cache = self.cached.setdefault(infohash, [None, None, None])[return_type] if cache and ( not cache[1] or (is_seed and len(cache[1]) < rsize) or len(cache[1]) < l_get_size or cache[0] + self.config['min_time_between_cache_refreshes'] < self.cachetime): cache = None if not cache: peers = self.downloads[infohash] vv = [[], [], []] for key, ip, port in self.t2tlist.harvest( infohash): # empty if disabled if key not in peers: vv[0].append({'ip': ip, 'port': port, 'peer id': key}) vv[1].append({'ip': ip, 'port': port}) vv[2].append(compact_peer_info(ip, port)) cache = [ self.cachetime, list(bc[return_type][0].values()) + vv[return_type], list(bc[return_type][1].values()) ] shuffle(cache[1]) shuffle(cache[2]) self.cached[infohash][return_type] = cache for rr in range(len(self.cached[infohash])): if rr != return_type: try: self.cached[infohash][rr][1].extend(vv[rr]) except: pass if len(cache[1]) < l_get_size: peerdata = cache[1] if not is_seed: peerdata.extend(cache[2]) cache[1] = [] cache[2] = [] else: if not is_seed: peerdata = cache[2][l_get_size - rsize:] del cache[2][l_get_size - rsize:] rsize -= len(peerdata) else: peerdata = [] if rsize: peerdata.extend(cache[1][-rsize:]) del cache[1][-rsize:] if return_type == 2: peerdata = ''.join(peerdata) data['peers'] = peerdata return data
def got_request(self, i, p, l): self.upload.got_request(i, p, l) if self.just_unchoked: self.connecter.ratelimiter.ping(clock() - self.just_unchoked) self.just_unchoked = 0
def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke = True) self.last2 = clock()
def send_interested(self): if not self.interested: self.interested = True self.connection.send_interested() if not self.choked: self.last2 = clock()
def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke=True) self.last2 = clock()
def send_interested(self): if not self.interested: self.interested = True self.connection.send_interested() if not self.choked: self.last2 = clock()
def add_disconnected_seed(self, id): # if not self.disconnectedseeds.has_key(id): # self.picker.seed_seen_recently() self.disconnectedseeds[id]=clock()
def __init__(self, config, rawserver): self.config = config self.response_size = config['response_size'] self.dfile = config['dfile'] self.natcheck = config['nat_check'] favicon = config['favicon'] self.parse_dir_interval = config['parse_dir_interval'] self.favicon = None if favicon: try: h = open(favicon, 'r') self.favicon = h.read() h.close() except: print( "**warning** specified favicon file -- %s -- does not exist." % favicon) self.rawserver = rawserver self.cached = { } # format: infohash: [[time1, l1, s1], [time2, l2, s2], [time3, l3, s3]] self.cached_t = {} # format: infohash: [time, cache] self.times = {} self.state = {} self.seedcount = {} self.allowed_IPs = None self.banned_IPs = None if config['allowed_ips'] or config['banned_ips']: self.allowed_ip_mtime = 0 self.banned_ip_mtime = 0 self.read_ip_lists() self.only_local_override_ip = config['only_local_override_ip'] if self.only_local_override_ip == 2: self.only_local_override_ip = not config['nat_check'] if exists(self.dfile): try: h = open(self.dfile, 'rb') ds = h.read() h.close() tempstate = bdecode(ds) if 'peers' not in tempstate: tempstate = {'peers': tempstate} statefiletemplate(tempstate) self.state = tempstate except: print('**warning** statefile ' + self.dfile + ' corrupt; resetting') self.downloads = self.state.setdefault('peers', {}) self.completed = self.state.setdefault('completed', {}) self.becache = {} # format: infohash: [[l1, s1], [l2, s2], [l3, s3]] for infohash, ds in list(self.downloads.items()): self.seedcount[infohash] = 0 for x, y in list(ds.items()): ip = y['ip'] if ((self.allowed_IPs and not self.allowed_IPs.includes(ip)) or (self.banned_IPs and self.banned_IPs.includes(ip))): del ds[x] continue if not y['left']: self.seedcount[infohash] += 1 if y.get('nat', -1): continue gip = y.get('given_ip') if is_valid_ip(gip) and (not self.only_local_override_ip or local_IPs.includes(ip)): ip = gip self.natcheckOK(infohash, x, ip, y['port'], y['left']) for x in list(self.downloads.keys()): self.times[x] = {} for y in list(self.downloads[x].keys()): self.times[x][y] = 0 self.trackerid = createPeerID('-T-') seed(self.trackerid) self.reannounce_interval = config['reannounce_interval'] self.save_dfile_interval = config['save_dfile_interval'] self.show_names = config['show_names'] rawserver.add_task(self.save_state, self.save_dfile_interval) self.prevtime = clock() self.timeout_downloaders_interval = config[ 'timeout_downloaders_interval'] rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) self.logfile = None self.log = None if (config['logfile']) and (config['logfile'] != '-'): try: self.logfile = config['logfile'] self.log = open(self.logfile, 'a') sys.stdout = self.log print("# Log Started: ", isotime()) except: print("**warning** could not redirect stdout to log file: ", sys.exc_info()[0]) if config['hupmonitor']: def huphandler(signum, frame, self=self): try: self.log.close() self.log = open(self.logfile, 'a') sys.stdout = self.log print("# Log reopened: ", isotime()) except: print("**warning** could not reopen logfile") signal.signal(signal.SIGHUP, huphandler) self.allow_get = config['allow_get'] self.t2tlist = T2TList(config['multitracker_enabled'], self.trackerid, config['multitracker_reannounce_interval'], config['multitracker_maxpeers'], config['http_timeout'], self.rawserver) if config['allowed_list']: if config['allowed_dir']: print( '**warning** allowed_dir and allowed_list options cannot be used together' ) print('**warning** disregarding allowed_dir') config['allowed_dir'] = '' self.allowed = self.state.setdefault('allowed_list', {}) self.allowed_list_mtime = 0 self.parse_allowed() self.remove_from_state('allowed', 'allowed_dir_files') if config['multitracker_allowed'] == 'autodetect': config['multitracker_allowed'] = 'none' config['allowed_controls'] = 0 elif config['allowed_dir']: self.allowed = self.state.setdefault('allowed', {}) self.allowed_dir_files = self.state.setdefault( 'allowed_dir_files', {}) self.allowed_dir_blocked = {} self.parse_allowed() self.remove_from_state('allowed_list') else: self.allowed = None self.remove_from_state('allowed', 'allowed_dir_files', 'allowed_list') if config['multitracker_allowed'] == 'autodetect': config['multitracker_allowed'] = 'none' config['allowed_controls'] = 0 self.uq_broken = unquote('+') != ' ' self.keep_dead = config['keep_dead'] self.Filter = Filter(rawserver.add_task) aggregator = config['aggregator'] if aggregator == '0': self.is_aggregator = False self.aggregator_key = None else: self.is_aggregator = True if aggregator == '1': self.aggregator_key = None else: self.aggregator_key = aggregator self.natcheck = False send = config['aggregate_forward'] if not send: self.aggregate_forward = None else: try: self.aggregate_forward, self.aggregate_password = send.split( ',') except: self.aggregate_forward = send self.aggregate_password = None self.dedicated_seed_id = config['dedicated_seed_id'] self.is_seeded = {} self.cachetime = 0 self.cachetimeupdate()