def is_snubbed(self, just_check = False): if not self.choked and not just_check and self.app_mode != 'node' and clock() - self.last2 > self.downloader.snub_time and not self.connection.connection.is_helper_con() and not self.connection.connection.is_coordinator_con(): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() return clock() - self.last > self.downloader.snub_time
def next_have(self, connection, looser_upload): if DEBUG: log('pp::next_have: ---') if self.seed_time is None: self.seed_time = clock() return if clock() < self.seed_time + 10: return if not connection.upload.super_seeding: return if connection in self.seed_connections: if looser_upload: num = 1 else: num = 2 if self.seed_got_haves[self.seed_connections[connection]] < num: return if not connection.upload.was_ever_interested: connection.upload.skipped_count += 1 if connection.upload.skipped_count >= 3: return -1 for tier in self.interests: for piece in tier: if not connection.download.have[piece]: seedint = self.level_in_interests[piece] self.level_in_interests[piece] += 1 if seedint == len(self.interests) - 1: self.interests.append([]) self._shift_over(piece, self.interests[seedint], self.interests[seedint + 1]) self.seed_got_haves[piece] = 0 self.seed_connections[connection] = piece connection.upload.seed_have_list.append(piece) return piece return -1
def _round_robin(self): self.schedule(self._round_robin, 5) if self.super_seed: cons = range(len(self.connections)) to_close = [] count = self.config['min_uploads'] - self.last_preferred if count > 0: shuffle(cons) for c in cons: if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): i = self.picker.next_have(self.connections[c], count > 0) if i is None: continue if i < 0: to_close.append(self.connections[c]) continue self.connections[c].send_have(i) count -= 1 else: to_close.append(self.connections[c]) for c in to_close: c.close() if self.last_round_robin + self.round_robin_period < clock(): self.last_round_robin = clock() for i in xrange(1, len(self.connections)): c = self.connections[i] if self.seeding_manager is None or self.seeding_manager.is_conn_eligible(c): u = c.get_upload() if u.is_choked() and u.is_interested(): self.connections = self.connections[i:] + self.connections[:i] break self._rechoke()
def is_snubbed(self, just_check=False): if not self.choked and not just_check and self.app_mode != 'node' and clock( ) - self.last2 > self.downloader.snub_time and not self.connection.connection.is_helper_con( ) and not self.connection.connection.is_coordinator_con(): for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() return clock() - self.last > self.downloader.snub_time
def got_piece(self, index, begin, hashlist, piece): if self.bad_performance_counter: self.bad_performance_counter -= 1 if DEBUG: print >> sys.stderr, 'decreased bad_performance_counter to', self.bad_performance_counter length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) if DEBUG: print >> sys.stderr, 'Downloader: got_piece: removed one request from all_requests', len( self.downloader.all_requests), 'remaining' self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.short_term_measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): self.downloader.piece_flunked(index) return False self.downloader.picker.got_piece(index, begin, length) if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove( (index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() self._request_more() self.downloader.check_complete(index) self.connection.total_downloaded += length return self.downloader.storage.do_I_have(index)
def __init__(self, bgapp, title): wx.Frame.__init__(self, None, title=title, pos=(50, 10), size=(1100, 450)) self.bgapp = bgapp self.spewwait = clock() self.Bind(wx.EVT_CLOSE, self.OnClose) self.SetBackgroundColour(wx.Colour(255, 255, 255)) fw = 12 spewList = wx.ListCtrl(self, pos=(0, 0), size=(1000, 300), style=wx.LC_REPORT | wx.LC_HRULES | wx.LC_VRULES) spewList.InsertColumn(0, 'Optimistic Unchoke', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(1, 'Peer ID', width=0) spewList.InsertColumn(2, 'IP', width=fw * 11) spewList.InsertColumn(3, 'Local/Remote', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(4, 'Up', format=wx.LIST_FORMAT_RIGHT, width=fw * 2) spewList.InsertColumn(5, 'Interested', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(6, 'Choking', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(7, 'Down', format=wx.LIST_FORMAT_RIGHT, width=fw * 8) spewList.InsertColumn(8, 'Interesting', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(9, 'Choked', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(10, 'Snubbed', format=wx.LIST_FORMAT_CENTER, width=fw * 2) spewList.InsertColumn(11, 'Downloaded', format=wx.LIST_FORMAT_RIGHT, width=fw * 5) spewList.InsertColumn(12, 'Uploaded', format=wx.LIST_FORMAT_RIGHT, width=fw * 5) spewList.InsertColumn(13, 'Completed', format=wx.LIST_FORMAT_RIGHT, width=fw * 6) spewList.InsertColumn(14, 'Peer Download Speed', format=wx.LIST_FORMAT_RIGHT, width=fw * 10) spewList.InsertColumn(15, 'Requested Piece', format=wx.LIST_FORMAT_CENTER, width=fw * 6) spewList.InsertColumn(16, 'Received Piece', format=wx.LIST_FORMAT_CENTER, width=fw * 6) self.spewList = spewList labelVOD = wx.StaticText(self, -1, 'static text') self.labelVOD = labelVOD gridSizer = wx.FlexGridSizer(cols=1, vgap=5) gridSizer.Add(spewList, -1, wx.EXPAND) gridSizer.Add(labelVOD, -1, wx.EXPAND) self.SetSizer(gridSizer) self.bgapp.statFrame = self
def got_piece(self, index, begin, hashlist, piece): if self.bad_performance_counter: self.bad_performance_counter -= 1 if DEBUG: print >> sys.stderr, 'decreased bad_performance_counter to', self.bad_performance_counter length = len(piece) try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) if DEBUG: print >> sys.stderr, 'Downloader: got_piece: removed one request from all_requests', len(self.downloader.all_requests), 'remaining' self.last = clock() self.last2 = clock() self.measure.update_rate(length) self.short_term_measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): self.downloader.piece_flunked(index) return False self.downloader.picker.got_piece(index, begin, length) if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() self._request_more() self.downloader.check_complete(index) self.connection.total_downloaded += length return self.downloader.storage.do_I_have(index)
def num_disconnected_seeds(self): expired = [] for id, t in self.disconnectedseeds.items(): if clock() - t > EXPIRE_TIME: expired.append(id) for id in expired: del self.disconnectedseeds[id] return len(self.disconnectedseeds)
def got_unchoke(self): if self.choked: if DEBUG: log('downloader::got_unchoke: got unchoke: ip', self.connection.get_ip(), 'interested', self.interested) self.choked = False if self.interested: self._request_more(new_unchoke=True) self.last2 = clock() elif DEBUG: log('downloader::got_unchoke: already unchoked: ip', self.connection.get_ip())
def update(self, spew, statistics=None, vod_stats=None): if clock() - self.clock > 2: self.clock = clock() info = '' if vod_stats is not None and vod_stats['videostatus'] is not None: vs = vod_stats['videostatus'] info += 'br: ' + str(vs.bitrate / 1024) info += ', plen: ' + str(vs.piecelen / 1024) info += ', first: ' + str(vs.first_piece) info += ', last: ' + str(vs.last_piece) info += ', have: ' + str(vs.numhave) info += '\nprebuf: ' + str(vs.prebuffering) info += ', playing: ' + str(vs.playing) info += ', paused: ' + str(vs.paused) info += '\nlive_first: ' + str(vs.live_first_piece) info += ', live_last: ' + str(vs.live_last_piece) info += '\nppos: ' + str(vs.playback_pos) if vs.live_first_piece is not None: info += ' -' + str( vs.dist_range(vs.live_first_piece, vs.playback_pos)) if vs.live_last_piece is not None: info += ', +' + str( vs.dist_range(vs.playback_pos, vs.live_last_piece)) info += '\nhr: ' + str(vs.get_high_range()) info += '\nlpos: ' + str(vs.live_startpos) info += '\npieces: ' have = vs.have[:] if len(have): p = None f = None t = None for i in sorted(have): if p is None: f = i elif i != p + 1: t = p info += str(f) + '-' + str(t) + ' ' f = i p = i info += str(f) + '-' + str(i) self.lbl_info.SetLabel(info)
def __init__(self, bgapp, title): wx.Frame.__init__(self, None, title=title, pos=(50, 10), size=(1100, 450)) self.bgapp = bgapp self.clock = clock() self.Bind(wx.EVT_CLOSE, self.OnClose) self.SetBackgroundColour(wx.Colour(255, 255, 255)) self.lbl_info = wx.StaticText(self, -1, 'info') gridSizer = wx.FlexGridSizer(cols=1, vgap=5) gridSizer.Add(self.lbl_info, -1, wx.EXPAND) self.SetSizer(gridSizer) self.bgapp.live_frame = self
def update(self, spew, statistics = None, vod_stats = None): if clock() - self.clock > 2: self.clock = clock() info = '' if vod_stats is not None and vod_stats['videostatus'] is not None: vs = vod_stats['videostatus'] info += 'br: ' + str(vs.bitrate / 1024) info += ', plen: ' + str(vs.piecelen / 1024) info += ', first: ' + str(vs.first_piece) info += ', last: ' + str(vs.last_piece) info += ', have: ' + str(vs.numhave) info += '\nprebuf: ' + str(vs.prebuffering) info += ', playing: ' + str(vs.playing) info += ', paused: ' + str(vs.paused) info += '\nlive_first: ' + str(vs.live_first_piece) info += ', live_last: ' + str(vs.live_last_piece) info += '\nppos: ' + str(vs.playback_pos) if vs.live_first_piece is not None: info += ' -' + str(vs.dist_range(vs.live_first_piece, vs.playback_pos)) if vs.live_last_piece is not None: info += ', +' + str(vs.dist_range(vs.playback_pos, vs.live_last_piece)) info += '\nhr: ' + str(vs.get_high_range()) info += '\nlpos: ' + str(vs.live_startpos) info += '\npieces: ' have = vs.have[:] if len(have): p = None f = None t = None for i in sorted(have): if p is None: f = i elif i != p + 1: t = p info += str(f) + '-' + str(t) + ' ' f = i p = i info += str(f) + '-' + str(i) self.lbl_info.SetLabel(info)
def __init__(self, infohash, storage, picker, backlog, max_rate_period, numpieces, chunksize, measurefunc, snub_time, kickbans_ok, kickfunc, banfunc, scheduler=None): self.infohash = infohash self.b64_infohash = b64encode(infohash) self.storage = storage self.picker = picker self.backlog = backlog self.max_rate_period = max_rate_period self.measurefunc = measurefunc self.totalmeasure = Measure(max_rate_period * storage.piece_length / storage.request_size) self.numpieces = numpieces self.chunksize = chunksize self.snub_time = snub_time self.kickfunc = kickfunc self.banfunc = banfunc self.disconnectedseeds = {} self.downloads = [] self.perip = {} self.gotbaddata = {} self.kicked = {} self.banned = {} self.kickbans_ok = kickbans_ok self.kickbans_halted = False self.super_seeding = False self.endgamemode = False self.endgame_queued_pieces = [] self.all_requests = [] self.discarded = 0L self.download_rate = 0 self.bytes_requested = 0 self.last_time = clock() self.queued_out = {} self.requeueing = False self.paused = False self.scheduler = scheduler self.scheduler(self.dlr_periodic_check, 1) if self.picker is not None: if self.picker.helper is not None: self.picker.helper.set_downloader(self)
def _round_robin(self): self.schedule(self._round_robin, 5) if self.super_seed: cons = range(len(self.connections)) to_close = [] count = self.config['min_uploads'] - self.last_preferred if count > 0: shuffle(cons) for c in cons: if self.seeding_manager is None or self.seeding_manager.is_conn_eligible( c): i = self.picker.next_have(self.connections[c], count > 0) if i is None: continue if i < 0: to_close.append(self.connections[c]) continue self.connections[c].send_have(i) count -= 1 else: to_close.append(self.connections[c]) for c in to_close: c.close() if self.last_round_robin + self.round_robin_period < clock(): self.last_round_robin = clock() for i in xrange(1, len(self.connections)): c = self.connections[i] if self.seeding_manager is None or self.seeding_manager.is_conn_eligible( c): u = c.get_upload() if u.is_choked() and u.is_interested(): self.connections = self.connections[ i:] + self.connections[:i] break self._rechoke()
def __init__(self, config, schedule, picker, done=lambda: False): self.app_mode = globalConfig.get_mode() self.config = config self.round_robin_period = config['round_robin_period'] self.schedule = schedule self.picker = picker self.connections = [] self.last_preferred = 0 self.last_round_robin = clock() self.done = done self.super_seed = False self.paused = False schedule(self._round_robin, 5) self.seeding_manager = None
def __init__(self, config, schedule, picker, done = lambda : False): self.app_mode = globalConfig.get_mode() self.config = config self.round_robin_period = config['round_robin_period'] self.schedule = schedule self.picker = picker self.connections = [] self.last_preferred = 0 self.last_round_robin = clock() self.done = done self.super_seed = False self.paused = False schedule(self._round_robin, 5) self.seeding_manager = None
def update(self): if DEBUG: print >> sys.stderr, 'RatePredictor:update' self.raw_server.add_task(self.update, self.probing_period) current_value = self.rate_measure.get_rate() / 1000.0 current_time = clock() if self.value is None or current_time - self.timestamp > self.max_period: self.value = current_value else: self.value = self.alpha * current_value + (1 - self.alpha) * self.value if self.max_rate > 0 and self.value > self.max_rate: self.value = self.max_rate self.timestamp = current_time
def expire_downloaders(self): for x in self.times.keys(): for myid, t in self.times[x].items(): if t < self.prevtime: self.delete_peer(x, myid) self.prevtime = clock() if self.keep_dead != 1: for key, value in self.downloads.items(): if len(value) == 0 and (self.allowed is None or not self.allowed.has_key(key)): del self.times[key] del self.downloads[key] del self.seedcount[key] self.rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval)
def queue_limit(self): if not self.download_rate: return 100000000000.0 t = clock() self.bytes_requested -= (t - self.last_time) * self.download_rate self.last_time = t if not self.requeueing and self.queued_out and self.bytes_requested < 0: self.requeueing = True q = self.queued_out.keys() shuffle(q) self.queued_out = {} for d in q: d._request_more() self.requeueing = False if -self.bytes_requested > 5 * self.download_rate: self.bytes_requested = -5 * self.download_rate ql = max(int(-self.bytes_requested / self.chunksize), 0) return ql
def __init__(self, infohash, storage, picker, backlog, max_rate_period, numpieces, chunksize, measurefunc, snub_time, kickbans_ok, kickfunc, banfunc, scheduler = None): self.infohash = infohash self.b64_infohash = b64encode(infohash) self.storage = storage self.picker = picker self.backlog = backlog self.max_rate_period = max_rate_period self.measurefunc = measurefunc self.totalmeasure = Measure(max_rate_period * storage.piece_length / storage.request_size) self.numpieces = numpieces self.chunksize = chunksize self.snub_time = snub_time self.kickfunc = kickfunc self.banfunc = banfunc self.disconnectedseeds = {} self.downloads = [] self.perip = {} self.gotbaddata = {} self.kicked = {} self.banned = {} self.kickbans_ok = kickbans_ok self.kickbans_halted = False self.super_seeding = False self.endgamemode = False self.endgame_queued_pieces = [] self.all_requests = [] self.discarded = 0L self.download_rate = 0 self.bytes_requested = 0 self.last_time = clock() self.queued_out = {} self.requeueing = False self.paused = False self.scheduler = scheduler self.scheduler(self.dlr_periodic_check, 1) if self.picker is not None: if self.picker.helper is not None: self.picker.helper.set_downloader(self)
def add_disconnected_seed(self, id): self.disconnectedseeds[id] = clock()
def updateStats(self, spew, statistics=None, vod_stats=None): if spew is not None and clock() - self.spewwait > 1: self.spewwait = clock() spewList = self.spewList spewlen = len(spew) + 2 if statistics is not None: kickbanlen = len(statistics.peers_kicked) + len( statistics.peers_banned) if kickbanlen: spewlen += kickbanlen + 1 else: kickbanlen = 0 for x in range(spewlen - spewList.GetItemCount()): i = wx.ListItem() spewList.InsertItem(i) for x in range(spewlen, spewList.GetItemCount()): spewList.DeleteItem(len(spew) + 1) tot_uprate = 0.0 tot_downrate = 0.0 tot_downloaded = 0 for x in range(len(spew)): if spew[x]['optimistic'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 0, a) spewList.SetStringItem(x, 1, spew[x]['id']) spewList.SetStringItem(x, 2, spew[x]['ip']) spewList.SetStringItem(x, 3, spew[x]['direction']) if spew[x]['uprate'] > 100: spewList.SetStringItem( x, 4, '%.0f kB/s' % (float(spew[x]['uprate']) / 1000)) else: spewList.SetStringItem(x, 4, ' ') tot_uprate += spew[x]['uprate'] if spew[x]['uinterested'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 5, a) if spew[x]['uchoked'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 6, a) bitrate = None if vod_stats['videostatus'] is not None: bitrate = vod_stats['videostatus'].bitrate if spew[x]['downrate'] > 100: str_downrate = '%.0f' % (spew[x]['downrate'] / 1024.0) if 'short_downrate' in spew[x]: if bitrate is None: str_downrate += ' (%.0f)' % ( spew[x]['short_downrate'] / 1024 / 0.0) else: str_downrate += ' (%.0f, %.1f)' % ( spew[x]['short_downrate'] / 1024.0, spew[x]['short_downrate'] / float(bitrate)) spewList.SetStringItem(x, 7, str_downrate) else: spewList.SetStringItem(x, 7, ' ') tot_downrate += spew[x]['downrate'] if spew[x]['dinterested'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 8, a) if spew[x]['dchoked'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 9, a) if spew[x]['snubbed'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 10, a) tot_downloaded += spew[x]['dtotal'] spewList.SetStringItem( x, 11, '%.2f MiB' % (float(spew[x]['dtotal']) / 1048576)) if spew[x]['utotal'] is not None: a = '%.2f MiB' % (float(spew[x]['utotal']) / 1048576) else: a = '' spewList.SetStringItem(x, 12, a) spewList.SetStringItem( x, 13, '%.1f%%' % (float(int(spew[x]['completed'] * 1000)) / 10)) if spew[x]['speed'] is not None: a = '%.0f' % (float(spew[x]['speed']) / 1024) if 'speed_proxy' in spew[x]: a += ' | p:%.0f' % (float(spew[x]['speed_proxy']) / 1024) if 'speed_non_proxy' in spew[x]: a += ' | r:%.0f' % (float(spew[x]['speed_non_proxy']) / 1024) else: a = '' spewList.SetStringItem(x, 14, a) spewList.SetStringItem(x, 15, str(spew[x]['last_requested_piece'])) spewList.SetStringItem(x, 16, str(spew[x]['last_received_piece'])) x = len(spew) for i in range(17): spewList.SetStringItem(x, i, '') x += 1 spewList.SetStringItem(x, 2, ' TOTALS:') spewList.SetStringItem(x, 4, '%.0f kB/s' % (float(tot_uprate) / 1024)) spewList.SetStringItem(x, 7, '%.0f kB/s' % (float(tot_downrate) / 1024)) spewList.SetStringItem( x, 11, '%.2f MiB' % (float(tot_downloaded) / 1048576)) spewList.SetStringItem(x, 12, '') for i in [0, 1, 3, 5, 6, 8, 9, 10, 13, 14, 15, 16]: spewList.SetStringItem(x, i, '') if kickbanlen: x += 1 for i in range(17): spewList.SetStringItem(x, i, '') for peer in statistics.peers_kicked: x += 1 spewList.SetStringItem(x, 2, peer[0]) spewList.SetStringItem(x, 1, peer[1]) spewList.SetStringItem(x, 4, 'KICKED') for i in [ 0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ]: spewList.SetStringItem(x, i, '') for peer in statistics.peers_banned: x += 1 spewList.SetStringItem(x, 2, peer[0]) spewList.SetStringItem(x, 1, peer[1]) spewList.SetStringItem(x, 4, 'BANNED') for i in [ 0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 ]: spewList.SetStringItem(x, i, '') if vod_stats is not None: info = '' if DEBUG_PROXY_BUF: for pos, data in vod_stats['proxybuf'].iteritems(): length = len(data) info += str(pos) + ' ' for i in xrange(length / 131072): info += '-' info += str(pos + length - 1) + '\n' info += 'buf: ' + str(vod_stats['outbuf']) + '\n' if DEBUG_VIDEOSTATUS: if vod_stats['videostatus'] is not None: vs = vod_stats['videostatus'] info += ' >> idx: ' + str(vs.fileindex) info += ', br: ' + str(vs.bitrate / 1024) info += ', len: ' + str(vs.piecelen / 1024) info += ', first: ' + str(vs.first_piece) info += ', last: ' + str(vs.last_piece) info += ', have: ' + str(vs.numhave) info += ', comp: %.2f' % vs.completed info += ', prebuf: ' + str(vs.prebuffering) info += ', pos: ' + str(vs.playback_pos) info += ', hp: ' + str(vs.prebuf_high_priority_pieces) info += ', pp: ' + str(vs.prebuf_missing_pieces) have = vs.have[:] have.sort() info += ', pieces: ' + str(have) for vs in vod_stats['extra_videostatus']: info += '\n index: ' + str(vs.fileindex) info += ', first piece: ' + str(vs.first_piece) info += ', last piece: ' + str(vs.last_piece) info += ', numhave: ' + str(vs.numhave) info += ', completed: %.2f' % vs.completed info += ', prebuf: ' + str(vs.prebuffering) info += ', hp: ' + str(vs.prebuf_high_priority_pieces) info += ', pp: ' + str(vs.prebuf_missing_pieces) have = vs.have[:] have.sort() info += ', pieces: ' + str(have) if DEBUG_PIECES: if statistics is not None: for piece in xrange( len(statistics.storage_inactive_list)): inactive = statistics.storage_inactive_list[piece] if inactive is None: inactive = 'all' elif inactive == 1: inactive = 'none' else: inactive = str(len(inactive)) info += '\n' + str( piece ) + ': inactive=' + inactive + ' active=' + str( statistics.storage_active_list[piece] ) + ' dirty=' + str( statistics.storage_dirty_list[piece]) self.labelVOD.SetLabel(info)
def add_data(self, infohash, event, ip, paramslist): peers = self.downloads.setdefault(infohash, {}) ts = self.times.setdefault(infohash, {}) self.completed.setdefault(infohash, 0) self.seedcount.setdefault(infohash, 0) def params(key, default = None, l = paramslist): if l.has_key(key): return l[key][0] return default myid = params('peer_id', '') if len(myid) != 20: raise ValueError, 'id not of length 20' if event not in ('started', 'completed', 'stopped', 'snooped', None): raise ValueError, 'invalid event' port = long(params('port', '')) if port < 0 or port > 65535: raise ValueError, 'invalid port' left = long(params('left', '')) if left < 0: raise ValueError, 'invalid amount left' uploaded = long(params('uploaded', '')) downloaded = long(params('downloaded', '')) peer = peers.get(myid) islocal = local_IPs.includes(ip) mykey = params('key') if peer: auth = peer.get('key', -1) == mykey or peer.get('ip') == ip else: auth = None gip = params('ip') if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): ip1 = gip else: ip1 = ip if params('numwant') is not None: rsize = min(int(params('numwant')), self.response_size) else: rsize = self.response_size if DEBUG: log('itracker::add_data: infohash', infohash, 'event', event, 'ip', ip, 'gip', gip, 'port', port, 'myid', myid, 'mykey', mykey, 'auth', auth) if event == 'stopped': if peer: if auth: if DEBUG: log('itracker::add_data: delete peer: infohash', infohash, 'myid', myid) self.delete_peer(infohash, myid) elif not peer: ts[myid] = clock() peer = {'ip': ip, 'port': port, 'left': left} if mykey: peer['key'] = mykey if gip: peer['given ip'] = gip if port: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) else: peer['nat'] = 1073741824 if event == 'completed': self.completed[infohash] += 1 if not left: self.seedcount[infohash] += 1 if DEBUG: log('itracker::add_data: add new peer: myid', myid, 'peer', peer) peers[myid] = peer else: if not auth: return rsize ts[myid] = clock() if not left and peer['left']: self.completed[infohash] += 1 self.seedcount[infohash] += 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: bc[1][myid] = bc[0][myid] del bc[0][myid] if peer['left']: peer['left'] = left if port: recheck = False if ip != peer['ip']: peer['ip'] = ip recheck = True if gip != peer.get('given ip'): if gip: peer['given ip'] = gip elif peer.has_key('given ip'): del peer['given ip'] recheck = True natted = peer.get('nat', -1) if recheck: if natted == 0: l = self.becache[infohash] y = not peer['left'] for x in l: try: del x[y][myid] except KeyError: pass if not self.natcheck or islocal: del peer['nat'] if natted and natted < self.natcheck: recheck = True if recheck: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) return rsize
def updateStats(self, spew, statistics = None, vod_stats = None): if spew is not None and clock() - self.spewwait > 1: self.spewwait = clock() spewList = self.spewList spewlen = len(spew) + 2 if statistics is not None: kickbanlen = len(statistics.peers_kicked) + len(statistics.peers_banned) if kickbanlen: spewlen += kickbanlen + 1 else: kickbanlen = 0 for x in range(spewlen - spewList.GetItemCount()): i = wx.ListItem() spewList.InsertItem(i) for x in range(spewlen, spewList.GetItemCount()): spewList.DeleteItem(len(spew) + 1) tot_uprate = 0.0 tot_downrate = 0.0 tot_downloaded = 0 for x in range(len(spew)): if spew[x]['optimistic'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 0, a) spewList.SetStringItem(x, 1, spew[x]['id']) spewList.SetStringItem(x, 2, spew[x]['ip']) spewList.SetStringItem(x, 3, spew[x]['direction']) if spew[x]['uprate'] > 100: spewList.SetStringItem(x, 4, '%.0f kB/s' % (float(spew[x]['uprate']) / 1000)) else: spewList.SetStringItem(x, 4, ' ') tot_uprate += spew[x]['uprate'] if spew[x]['uinterested'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 5, a) if spew[x]['uchoked'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 6, a) bitrate = None if vod_stats['videostatus'] is not None: bitrate = vod_stats['videostatus'].bitrate if spew[x]['downrate'] > 100: str_downrate = '%.0f' % (spew[x]['downrate'] / 1024.0) if 'short_downrate' in spew[x]: if bitrate is None: str_downrate += ' (%.0f)' % (spew[x]['short_downrate'] / 1024 / 0.0) else: str_downrate += ' (%.0f, %.1f)' % (spew[x]['short_downrate'] / 1024.0, spew[x]['short_downrate'] / float(bitrate)) spewList.SetStringItem(x, 7, str_downrate) else: spewList.SetStringItem(x, 7, ' ') tot_downrate += spew[x]['downrate'] if spew[x]['dinterested'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 8, a) if spew[x]['dchoked'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 9, a) if spew[x]['snubbed'] == 1: a = '*' else: a = ' ' spewList.SetStringItem(x, 10, a) tot_downloaded += spew[x]['dtotal'] spewList.SetStringItem(x, 11, '%.2f MiB' % (float(spew[x]['dtotal']) / 1048576)) if spew[x]['utotal'] is not None: a = '%.2f MiB' % (float(spew[x]['utotal']) / 1048576) else: a = '' spewList.SetStringItem(x, 12, a) spewList.SetStringItem(x, 13, '%.1f%%' % (float(int(spew[x]['completed'] * 1000)) / 10)) if spew[x]['speed'] is not None: a = '%.0f' % (float(spew[x]['speed']) / 1024) if 'speed_proxy' in spew[x]: a += ' | p:%.0f' % (float(spew[x]['speed_proxy']) / 1024) if 'speed_non_proxy' in spew[x]: a += ' | r:%.0f' % (float(spew[x]['speed_non_proxy']) / 1024) else: a = '' spewList.SetStringItem(x, 14, a) spewList.SetStringItem(x, 15, str(spew[x]['last_requested_piece'])) spewList.SetStringItem(x, 16, str(spew[x]['last_received_piece'])) x = len(spew) for i in range(17): spewList.SetStringItem(x, i, '') x += 1 spewList.SetStringItem(x, 2, ' TOTALS:') spewList.SetStringItem(x, 4, '%.0f kB/s' % (float(tot_uprate) / 1024)) spewList.SetStringItem(x, 7, '%.0f kB/s' % (float(tot_downrate) / 1024)) spewList.SetStringItem(x, 11, '%.2f MiB' % (float(tot_downloaded) / 1048576)) spewList.SetStringItem(x, 12, '') for i in [0, 1, 3, 5, 6, 8, 9, 10, 13, 14, 15, 16]: spewList.SetStringItem(x, i, '') if kickbanlen: x += 1 for i in range(17): spewList.SetStringItem(x, i, '') for peer in statistics.peers_kicked: x += 1 spewList.SetStringItem(x, 2, peer[0]) spewList.SetStringItem(x, 1, peer[1]) spewList.SetStringItem(x, 4, 'KICKED') for i in [0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]: spewList.SetStringItem(x, i, '') for peer in statistics.peers_banned: x += 1 spewList.SetStringItem(x, 2, peer[0]) spewList.SetStringItem(x, 1, peer[1]) spewList.SetStringItem(x, 4, 'BANNED') for i in [0, 3, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]: spewList.SetStringItem(x, i, '') if vod_stats is not None: info = '' if DEBUG_PROXY_BUF: for pos, data in vod_stats['proxybuf'].iteritems(): length = len(data) info += str(pos) + ' ' for i in xrange(length / 131072): info += '-' info += str(pos + length - 1) + '\n' info += 'buf: ' + str(vod_stats['outbuf']) + '\n' if DEBUG_VIDEOSTATUS: if vod_stats['videostatus'] is not None: vs = vod_stats['videostatus'] info += ' >> idx: ' + str(vs.fileindex) info += ', br: ' + str(vs.bitrate / 1024) info += ', len: ' + str(vs.piecelen / 1024) info += ', first: ' + str(vs.first_piece) info += ', last: ' + str(vs.last_piece) info += ', have: ' + str(vs.numhave) info += ', comp: %.2f' % vs.completed info += ', prebuf: ' + str(vs.prebuffering) info += ', pos: ' + str(vs.playback_pos) info += ', hp: ' + str(vs.prebuf_high_priority_pieces) info += ', pp: ' + str(vs.prebuf_missing_pieces) have = vs.have[:] have.sort() info += ', pieces: ' + str(have) for vs in vod_stats['extra_videostatus']: info += '\n index: ' + str(vs.fileindex) info += ', first piece: ' + str(vs.first_piece) info += ', last piece: ' + str(vs.last_piece) info += ', numhave: ' + str(vs.numhave) info += ', completed: %.2f' % vs.completed info += ', prebuf: ' + str(vs.prebuffering) info += ', hp: ' + str(vs.prebuf_high_priority_pieces) info += ', pp: ' + str(vs.prebuf_missing_pieces) have = vs.have[:] have.sort() info += ', pieces: ' + str(have) if DEBUG_PIECES: if statistics is not None: for piece in xrange(len(statistics.storage_inactive_list)): inactive = statistics.storage_inactive_list[piece] if inactive is None: inactive = 'all' elif inactive == 1: inactive = 'none' else: inactive = str(len(inactive)) info += '\n' + str(piece) + ': inactive=' + inactive + ' active=' + str(statistics.storage_active_list[piece]) + ' dirty=' + str(statistics.storage_dirty_list[piece]) self.labelVOD.SetLabel(info)
def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize): data = {} seeds = self.seedcount[infohash] data['complete'] = seeds data['incomplete'] = len(self.downloads[infohash]) - seeds if self.config['tracker_allowed_controls'] and self.allowed[ infohash].has_key('warning message'): data['warning message'] = self.allowed[infohash]['warning message'] if tracker: data['interval'] = self.config[ 'tracker_multitracker_reannounce_interval'] if not rsize: return data cache = self.cached_t.setdefault(infohash, None) if not cache or len(cache[1]) < rsize or cache[0] + self.config[ 'tracker_min_time_between_cache_refreshes'] < clock(): bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) cache = [clock(), bc[0][0].values() + bc[0][1].values()] self.cached_t[infohash] = cache shuffle(cache[1]) cache = cache[1] data['peers'] = cache[-rsize:] del cache[-rsize:] return data data['interval'] = self.reannounce_interval if stopped or not rsize: data['peers'] = [] return data bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) len_l = len(bc[0][0]) len_s = len(bc[0][1]) if not len_l + len_s: data['peers'] = [] return data l_get_size = int(float(rsize) * len_l / (len_l + len_s)) cache = self.cached.setdefault(infohash, [None, None, None])[return_type] if cache and (not cache[1] or is_seed and len(cache[1]) < rsize or len(cache[1]) < l_get_size or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < self.cachetime): cache = None if not cache: peers = self.downloads[infohash] vv = [[], [], []] for key, ip, port in self.t2tlist.harvest(infohash): if not peers.has_key(key): vv[0].append({'ip': ip, 'port': port, 'peer id': key}) vv[1].append({'ip': ip, 'port': port}) vv[2].append(compact_peer_info(ip, port)) cache = [ self.cachetime, bc[return_type][0].values() + vv[return_type], bc[return_type][1].values() ] shuffle(cache[1]) shuffle(cache[2]) self.cached[infohash][return_type] = cache for rr in xrange(len(self.cached[infohash])): if rr != return_type: try: self.cached[infohash][rr][1].extend(vv[rr]) except: pass if len(cache[1]) < l_get_size: peerdata = cache[1] if not is_seed: peerdata.extend(cache[2]) cache[1] = [] cache[2] = [] else: if not is_seed: peerdata = cache[2][l_get_size - rsize:] del cache[2][l_get_size - rsize:] rsize -= len(peerdata) else: peerdata = [] if rsize: peerdata.extend(cache[1][-rsize:]) del cache[1][-rsize:] if return_type == 2: peerdata = ''.join(peerdata) data['peers'] = peerdata return data
def __init__(self, config, rawserver): self.config = config self.response_size = config['tracker_response_size'] self.dfile = config['tracker_dfile'] self.natcheck = config['tracker_nat_check'] favicon = config['tracker_favicon'] self.parse_dir_interval = config['tracker_parse_dir_interval'] self.favicon = None if favicon: try: h = open(favicon, 'rb') self.favicon = h.read() h.close() except: print '**warning** specified favicon file -- %s -- does not exist.' % favicon self.rawserver = rawserver self.cached = {} self.cached_t = {} self.times = {} self.state = {} self.seedcount = {} self.allowed_IPs = None self.banned_IPs = None if config['tracker_allowed_ips'] or config['tracker_banned_ips']: self.allowed_ip_mtime = 0 self.banned_ip_mtime = 0 self.read_ip_lists() self.only_local_override_ip = config['tracker_only_local_override_ip'] if self.only_local_override_ip == 2: self.only_local_override_ip = not config['tracker_nat_check'] if exists(self.dfile): try: h = open(self.dfile, 'rb') if self.config['tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: ds = h.read() tempstate = bdecode(ds) else: tempstate = pickle.load(h) h.close() if not tempstate.has_key('peers'): tempstate = {'peers': tempstate} statefiletemplate(tempstate) self.state = tempstate except: print '**warning** statefile ' + self.dfile + ' corrupt; resetting' self.downloads = self.state.setdefault('peers', {}) self.completed = self.state.setdefault('completed', {}) self.becache = {} for infohash, ds in self.downloads.items(): self.seedcount[infohash] = 0 for x, y in ds.items(): ip = y['ip'] if self.allowed_IPs and not self.allowed_IPs.includes(ip) or self.banned_IPs and self.banned_IPs.includes(ip): del ds[x] continue if not y['left']: self.seedcount[infohash] += 1 if y.get('nat', -1): continue gip = y.get('given_ip') if is_valid_ip(gip) and (not self.only_local_override_ip or local_IPs.includes(ip)): ip = gip self.natcheckOK(infohash, x, ip, y['port'], y['left']) for x in self.downloads.keys(): self.times[x] = {} for y in self.downloads[x].keys(): self.times[x][y] = 0 self.trackerid = createPeerID('-T-') seed(self.trackerid) self.reannounce_interval = config['tracker_reannounce_interval'] self.save_dfile_interval = config['tracker_save_dfile_interval'] self.show_names = config['tracker_show_names'] rawserver.add_task(self.save_state, self.save_dfile_interval) self.prevtime = clock() self.timeout_downloaders_interval = config['tracker_timeout_downloaders_interval'] rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) self.logfile = None self.log = None if config['tracker_logfile'] and config['tracker_logfile'] != '-': try: self.logfile = config['tracker_logfile'] self.log = open(self.logfile, 'a') sys.stdout = self.log print '# Log Started: ', isotime() except: print '**warning** could not redirect stdout to log file: ', sys.exc_info()[0] if config['tracker_hupmonitor']: def huphandler(signum, frame, self = self): try: self.log.close() self.log = open(self.logfile, 'a') sys.stdout = self.log print '# Log reopened: ', isotime() except: print '**warning** could not reopen logfile' signal.signal(signal.SIGHUP, huphandler) self.allow_get = config['tracker_allow_get'] self.t2tlist = T2TList(config['tracker_multitracker_enabled'], self.trackerid, config['tracker_multitracker_reannounce_interval'], config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'], self.rawserver) if config['tracker_allowed_list']: if config['tracker_allowed_dir']: print '**warning** allowed_dir and allowed_list options cannot be used together' print '**warning** disregarding allowed_dir' config['tracker_allowed_dir'] = '' self.allowed = self.state.setdefault('allowed_list', {}) self.allowed_list_mtime = 0 self.parse_allowed() self.remove_from_state('allowed', 'allowed_dir_files') if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE config['tracker_allowed_controls'] = 0 elif config['tracker_allowed_dir']: self.allowed = self.state.setdefault('allowed', {}) self.allowed_dir_files = self.state.setdefault('allowed_dir_files', {}) self.allowed_dir_blocked = {} self.parse_allowed() self.remove_from_state('allowed_list') else: self.allowed = None self.remove_from_state('allowed', 'allowed_dir_files', 'allowed_list') if config['tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE config['tracker_allowed_controls'] = 0 self.uq_broken = unquote('+') != ' ' self.keep_dead = config['tracker_keep_dead'] self.Filter = Filter(rawserver.add_task) aggregator = config['tracker_aggregator'] if aggregator == 0: self.is_aggregator = False self.aggregator_key = None else: self.is_aggregator = True if aggregator == 1: self.aggregator_key = None else: self.aggregator_key = aggregator self.natcheck = False send = config['tracker_aggregate_forward'] if not send: self.aggregate_forward = None else: try: self.aggregate_forward, self.aggregate_password = send except: self.aggregate_forward = send self.aggregate_password = None self.cachetime = 0 self.track_cachetimeupdate()
def __init__(self, config, rawserver): self.config = config self.response_size = config['tracker_response_size'] self.dfile = config['tracker_dfile'] self.natcheck = config['tracker_nat_check'] favicon = config['tracker_favicon'] self.parse_dir_interval = config['tracker_parse_dir_interval'] self.favicon = None if favicon: try: h = open(favicon, 'rb') self.favicon = h.read() h.close() except: print '**warning** specified favicon file -- %s -- does not exist.' % favicon self.rawserver = rawserver self.cached = {} self.cached_t = {} self.times = {} self.state = {} self.seedcount = {} self.allowed_IPs = None self.banned_IPs = None if config['tracker_allowed_ips'] or config['tracker_banned_ips']: self.allowed_ip_mtime = 0 self.banned_ip_mtime = 0 self.read_ip_lists() self.only_local_override_ip = config['tracker_only_local_override_ip'] if self.only_local_override_ip == 2: self.only_local_override_ip = not config['tracker_nat_check'] if exists(self.dfile): try: h = open(self.dfile, 'rb') if self.config[ 'tracker_dfile_format'] == ITRACKDBFORMAT_BENCODE: ds = h.read() tempstate = bdecode(ds) else: tempstate = pickle.load(h) h.close() if not tempstate.has_key('peers'): tempstate = {'peers': tempstate} statefiletemplate(tempstate) self.state = tempstate except: print '**warning** statefile ' + self.dfile + ' corrupt; resetting' self.downloads = self.state.setdefault('peers', {}) self.completed = self.state.setdefault('completed', {}) self.becache = {} for infohash, ds in self.downloads.items(): self.seedcount[infohash] = 0 for x, y in ds.items(): ip = y['ip'] if self.allowed_IPs and not self.allowed_IPs.includes( ip) or self.banned_IPs and self.banned_IPs.includes( ip): del ds[x] continue if not y['left']: self.seedcount[infohash] += 1 if y.get('nat', -1): continue gip = y.get('given_ip') if is_valid_ip(gip) and (not self.only_local_override_ip or local_IPs.includes(ip)): ip = gip self.natcheckOK(infohash, x, ip, y['port'], y['left']) for x in self.downloads.keys(): self.times[x] = {} for y in self.downloads[x].keys(): self.times[x][y] = 0 self.trackerid = createPeerID('-T-') seed(self.trackerid) self.reannounce_interval = config['tracker_reannounce_interval'] self.save_dfile_interval = config['tracker_save_dfile_interval'] self.show_names = config['tracker_show_names'] rawserver.add_task(self.save_state, self.save_dfile_interval) self.prevtime = clock() self.timeout_downloaders_interval = config[ 'tracker_timeout_downloaders_interval'] rawserver.add_task(self.expire_downloaders, self.timeout_downloaders_interval) self.logfile = None self.log = None if config['tracker_logfile'] and config['tracker_logfile'] != '-': try: self.logfile = config['tracker_logfile'] self.log = open(self.logfile, 'a') sys.stdout = self.log print '# Log Started: ', isotime() except: print '**warning** could not redirect stdout to log file: ', sys.exc_info( )[0] if config['tracker_hupmonitor']: def huphandler(signum, frame, self=self): try: self.log.close() self.log = open(self.logfile, 'a') sys.stdout = self.log print '# Log reopened: ', isotime() except: print '**warning** could not reopen logfile' signal.signal(signal.SIGHUP, huphandler) self.allow_get = config['tracker_allow_get'] self.t2tlist = T2TList( config['tracker_multitracker_enabled'], self.trackerid, config['tracker_multitracker_reannounce_interval'], config['tracker_multitracker_maxpeers'], config['tracker_multitracker_http_timeout'], self.rawserver) if config['tracker_allowed_list']: if config['tracker_allowed_dir']: print '**warning** allowed_dir and allowed_list options cannot be used together' print '**warning** disregarding allowed_dir' config['tracker_allowed_dir'] = '' self.allowed = self.state.setdefault('allowed_list', {}) self.allowed_list_mtime = 0 self.parse_allowed() self.remove_from_state('allowed', 'allowed_dir_files') if config[ 'tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE config['tracker_allowed_controls'] = 0 elif config['tracker_allowed_dir']: self.allowed = self.state.setdefault('allowed', {}) self.allowed_dir_files = self.state.setdefault( 'allowed_dir_files', {}) self.allowed_dir_blocked = {} self.parse_allowed() self.remove_from_state('allowed_list') else: self.allowed = None self.remove_from_state('allowed', 'allowed_dir_files', 'allowed_list') if config[ 'tracker_multitracker_allowed'] == ITRACKMULTI_ALLOW_AUTODETECT: config['tracker_multitracker_allowed'] = ITRACKMULTI_ALLOW_NONE config['tracker_allowed_controls'] = 0 self.uq_broken = unquote('+') != ' ' self.keep_dead = config['tracker_keep_dead'] self.Filter = Filter(rawserver.add_task) aggregator = config['tracker_aggregator'] if aggregator == 0: self.is_aggregator = False self.aggregator_key = None else: self.is_aggregator = True if aggregator == 1: self.aggregator_key = None else: self.aggregator_key = aggregator self.natcheck = False send = config['tracker_aggregate_forward'] if not send: self.aggregate_forward = None else: try: self.aggregate_forward, self.aggregate_password = send except: self.aggregate_forward = send self.aggregate_password = None self.cachetime = 0 self.track_cachetimeupdate()
def peerlist(self, infohash, stopped, tracker, is_seed, return_type, rsize): data = {} seeds = self.seedcount[infohash] data['complete'] = seeds data['incomplete'] = len(self.downloads[infohash]) - seeds if self.config['tracker_allowed_controls'] and self.allowed[infohash].has_key('warning message'): data['warning message'] = self.allowed[infohash]['warning message'] if tracker: data['interval'] = self.config['tracker_multitracker_reannounce_interval'] if not rsize: return data cache = self.cached_t.setdefault(infohash, None) if not cache or len(cache[1]) < rsize or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < clock(): bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) cache = [clock(), bc[0][0].values() + bc[0][1].values()] self.cached_t[infohash] = cache shuffle(cache[1]) cache = cache[1] data['peers'] = cache[-rsize:] del cache[-rsize:] return data data['interval'] = self.reannounce_interval if stopped or not rsize: data['peers'] = [] return data bc = self.becache.setdefault(infohash, [[{}, {}], [{}, {}], [{}, {}]]) len_l = len(bc[0][0]) len_s = len(bc[0][1]) if not len_l + len_s: data['peers'] = [] return data l_get_size = int(float(rsize) * len_l / (len_l + len_s)) cache = self.cached.setdefault(infohash, [None, None, None])[return_type] if cache and (not cache[1] or is_seed and len(cache[1]) < rsize or len(cache[1]) < l_get_size or cache[0] + self.config['tracker_min_time_between_cache_refreshes'] < self.cachetime): cache = None if not cache: peers = self.downloads[infohash] vv = [[], [], []] for key, ip, port in self.t2tlist.harvest(infohash): if not peers.has_key(key): vv[0].append({'ip': ip, 'port': port, 'peer id': key}) vv[1].append({'ip': ip, 'port': port}) vv[2].append(compact_peer_info(ip, port)) cache = [self.cachetime, bc[return_type][0].values() + vv[return_type], bc[return_type][1].values()] shuffle(cache[1]) shuffle(cache[2]) self.cached[infohash][return_type] = cache for rr in xrange(len(self.cached[infohash])): if rr != return_type: try: self.cached[infohash][rr][1].extend(vv[rr]) except: pass if len(cache[1]) < l_get_size: peerdata = cache[1] if not is_seed: peerdata.extend(cache[2]) cache[1] = [] cache[2] = [] else: if not is_seed: peerdata = cache[2][l_get_size - rsize:] del cache[2][l_get_size - rsize:] rsize -= len(peerdata) else: peerdata = [] if rsize: peerdata.extend(cache[1][-rsize:]) del cache[1][-rsize:] if return_type == 2: peerdata = ''.join(peerdata) data['peers'] = peerdata return data
def add_data(self, infohash, event, ip, paramslist): peers = self.downloads.setdefault(infohash, {}) ts = self.times.setdefault(infohash, {}) self.completed.setdefault(infohash, 0) self.seedcount.setdefault(infohash, 0) def params(key, default=None, l=paramslist): if l.has_key(key): return l[key][0] return default myid = params('peer_id', '') if len(myid) != 20: raise ValueError, 'id not of length 20' if event not in ('started', 'completed', 'stopped', 'snooped', None): raise ValueError, 'invalid event' port = long(params('port', '')) if port < 0 or port > 65535: raise ValueError, 'invalid port' left = long(params('left', '')) if left < 0: raise ValueError, 'invalid amount left' uploaded = long(params('uploaded', '')) downloaded = long(params('downloaded', '')) peer = peers.get(myid) islocal = local_IPs.includes(ip) mykey = params('key') if peer: auth = peer.get('key', -1) == mykey or peer.get('ip') == ip else: auth = None gip = params('ip') if is_valid_ip(gip) and (islocal or not self.only_local_override_ip): ip1 = gip else: ip1 = ip if params('numwant') is not None: rsize = min(int(params('numwant')), self.response_size) else: rsize = self.response_size if DEBUG: log('itracker::add_data: infohash', infohash, 'event', event, 'ip', ip, 'gip', gip, 'port', port, 'myid', myid, 'mykey', mykey, 'auth', auth) if event == 'stopped': if peer: if auth: if DEBUG: log('itracker::add_data: delete peer: infohash', infohash, 'myid', myid) self.delete_peer(infohash, myid) elif not peer: ts[myid] = clock() peer = {'ip': ip, 'port': port, 'left': left} if mykey: peer['key'] = mykey if gip: peer['given ip'] = gip if port: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) else: peer['nat'] = 1073741824 if event == 'completed': self.completed[infohash] += 1 if not left: self.seedcount[infohash] += 1 if DEBUG: log('itracker::add_data: add new peer: myid', myid, 'peer', peer) peers[myid] = peer else: if not auth: return rsize ts[myid] = clock() if not left and peer['left']: self.completed[infohash] += 1 self.seedcount[infohash] += 1 if not peer.get('nat', -1): for bc in self.becache[infohash]: bc[1][myid] = bc[0][myid] del bc[0][myid] if peer['left']: peer['left'] = left if port: recheck = False if ip != peer['ip']: peer['ip'] = ip recheck = True if gip != peer.get('given ip'): if gip: peer['given ip'] = gip elif peer.has_key('given ip'): del peer['given ip'] recheck = True natted = peer.get('nat', -1) if recheck: if natted == 0: l = self.becache[infohash] y = not peer['left'] for x in l: try: del x[y][myid] except KeyError: pass if not self.natcheck or islocal: del peer['nat'] if natted and natted < self.natcheck: recheck = True if recheck: if not self.natcheck or islocal: peer['nat'] = 0 self.natcheckOK(infohash, myid, ip1, port, left) else: NatCheck(self.connectback_result, infohash, myid, ip1, port, self.rawserver) return rsize