def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper self.proxy_have = Bitfield(downloader.numpieces) # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0
def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config, is_supporter_seed=False): #SmoothIT: supporter seed self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config['max_slice_length'] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config['breakup_seed_bitfield']: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None # Merkle self.hashlist = [] # SmoothIT_ self.is_supporter_seed = is_supporter_seed print >> sys.stderr, "Uploader: am_supporter_seed=%s" % self.is_supporter_seed
def __init__(self, infohash, storage, picker, backlog, max_rate_period, numpieces, chunksize, measurefunc, snub_time, kickbans_ok, kickfunc, banfunc, scheduler = None, supporter_ips = []): self.supporter_ips = supporter_ips self.infohash = infohash self.b64_infohash = b64encode(infohash) self.storage = storage self.picker = picker self.backlog = backlog self.max_rate_period = max_rate_period self.measurefunc = measurefunc self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size) self.numpieces = numpieces self.chunksize = chunksize self.snub_time = snub_time self.kickfunc = kickfunc self.banfunc = banfunc self.disconnectedseeds = {} self.downloads = [] self.perip = {} self.gotbaddata = {} self.kicked = {} self.banned = {} self.kickbans_ok = kickbans_ok self.kickbans_halted = False self.super_seeding = False self.endgamemode = False self.endgame_queued_pieces = [] self.all_requests = [] self.discarded = 0L self.download_rate = 0 # self.download_rate = 25000 # 25K/s test rate self.bytes_requested = 0 self.last_time = clock() self.queued_out = {} self.requeueing = False self.paused = False self.scheduler = scheduler # SmoothIT_ self.logger = logging.getLogger("Tribler.Downloader") # _SmoothIT # hack: we should not import this since it is not part of the # core nor should we import here, but otherwise we will get # import errors # # _event_reporter stores events that are logged somewhere... from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance self._event_reporter = get_reporter_instance() # check periodicaly self.scheduler(self.dlr_periodic_check, 1) self.support_required = True
def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt # SmoothIT_ self.logger = logging.getLogger("Tribler.SingleDownload") self.support_required = True # _SmoothIT self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0 # SmoothIT_ : collect block stats self.block_stats = ( [] ) # hold statistics of received blocks, format: piece_index, block_offset, block_size, sender_ip, sender_port, sender_id
def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config, is_supporter_seed=False): # SmoothIT: supporter seed self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config["max_slice_length"] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config["max_rate_period"], config["upload_rate_fudge"]) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config["breakup_seed_bitfield"]: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None # Merkle self.hashlist = [] # SmoothIT_ self.is_supporter_seed = is_supporter_seed print >>sys.stderr, "Uploader: am_supporter_seed=%s" % self.is_supporter_seed
def __init__(self, downloader, url): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt self.downloader = downloader self.baseurl = url try: (scheme, self.netloc, path, pars, query, fragment) = urlparse(url) except: self.downloader.errorfunc('cannot parse http seed address: ' + url) return if scheme != 'http': self.downloader.errorfunc('http seed url not http: ' + url) return try: self.connection = HTTPConnection(self.netloc) except: self.downloader.errorfunc('cannot connect to http seed: ' + url) return self.seedurl = path if pars: self.seedurl += ';' + pars self.seedurl += '?' if query: self.seedurl += query + '&' self.seedurl += 'info_hash=' + quote(self.downloader.infohash) self.measure = Measure(downloader.max_rate_period) self.index = None self.url = '' self.requests = [] self.request_size = 0 self.endflag = False self.error = None self.retry_period = 30 self._retry_period = None self.errorcount = 0 self.goodseed = False self.active = False self.cancelled = False self.resched(randint(2, 10))
def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt # SmoothIT_ self.logger = logging.getLogger("Tribler.SingleDownload") self.support_required = True # _SmoothIT self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0 #SmoothIT_ : collect block stats self.block_stats = [] # hold statistics of received blocks, format: piece_index, block_offset, block_size, sender_ip, sender_port, sender_id
class SVCTransporter(MovieOnDemandTransporter): """ Takes care of providing a bytestream interface based on the available pieces. """ # seconds to prebuffer if bitrate is known (always for SVC) PREBUF_SEC_VOD = 10 # max number of seconds in queue to player # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it # Arno: 2008-07-15: 0.8.6h apparently can BUFFER_TIME = 5.0 # polling interval to refill buffer #REFILL_INTERVAL = BUFFER_TIME * 0.75 # Arno: there's is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug! REFILL_INTERVAL = 0.1 # amount of time (seconds) to push a packet into # the player queue ahead of schedule VLC_BUFFER_SIZE = 0 PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum # bitrate (in KByte/s) that the playback birate-estimator must have to make us # set the bitrate in movieselector. MINPLAYBACKRATE = 32*1024 # maximum delay between pops before we force a restart (seconds) MAX_POP_TIME = 60 def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): # dirty hack to get the Tribler Session from BaseLib.Core.Session import Session session = Session.get_instance() if session.get_overlay(): # see comment in else section on importing... from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler self._playback_stats = VideoPlaybackDBHandler.get_instance() else: # hack: we should not import this since it is not part of # the core nor should we import here, but otherwise we # will get import errors from BaseLib.Player.Reporter import VideoPlaybackReporter self._playback_stats = VideoPlaybackReporter.get_instance() # add an event to indicate that the user wants playback to # start def set_nat(nat): self._playback_stats.add_event(self._playback_key, "nat:%s" % nat) self._playback_key = base64.b64encode(os.urandom(20)) self._playback_stats.add_event(self._playback_key, "play-init") self._playback_stats.add_event(self._playback_key, "piece-size:%d" % videostatus.piecelen) self._playback_stats.add_event(self._playback_key, "num-pieces:%d" % videostatus.movie_numpieces) self._playback_stats.add_event(self._playback_key, "bitrate:%d" % videostatus.bitrate) self._playback_stats.add_event(self._playback_key, "nat:%s" % session.get_nat_type(callback=set_nat)) self._complete = False self.videoinfo = videoinfo self.bt1download = bt1download self.piecepicker = bt1download.picker self.rawserver = bt1download.rawserver self.storagewrapper = bt1download.storagewrapper self.fileselector = bt1download.fileselector self.vodeventfunc = vodeventfunc self.videostatus = vs = videostatus # Add quotes around path, as that's what os.popen() wants on win32 if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: self.video_analyser_path='"'+videoanalyserpath+'"' else: self.video_analyser_path=videoanalyserpath # counter for the sustainable() call. Every X calls the # buffer-percentage is updated. self.sustainable_counter = sys.maxint # boudewijn: because we now update the downloadrate for each # received chunk instead of each piece we do not need to # average the measurement over a 'long' period of time. Also, # we only update the downloadrate for pieces that are in the # high priority range giving us a better estimation on how # likely the pieces will be available on time. self.overall_rate = Measure(10) self.high_range_rate = Measure(2) # buffer: a link to the piecepicker buffer self.has = self.piecepicker.has # number of pieces in buffer self.pieces_in_buffer = 0 self.data_ready = Condition() # Arno: Call FFMPEG only if the torrent did not provide the # bitrate and video dimensions. This is becasue FFMPEG # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown # reasons # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important assert vs.bitrate_set self.doing_ffmpeg_analysis = False self.doing_bitrate_est = False self.videodim = None #self.movieselector.videodim self.player_opened_with_width_height = False self.ffmpeg_est_bitrate = None prebufsecs = self.PREBUF_SEC_VOD # assumes first piece is whole (first_piecelen == piecelen) piecesneeded = vs.time_to_pieces( prebufsecs ) bytesneeded = piecesneeded * vs.piecelen self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) if self.doing_ffmpeg_analysis and DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" self.nreceived = 0 if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Setting MIME type to",self.videoinfo['mimetype'] self.set_mimetype(self.videoinfo['mimetype']) # some statistics self.stat_playedpieces = 0 # number of pieces played successfully self.stat_latepieces = 0 # number of pieces that arrived too late self.stat_droppedpieces = 0 # number of pieces dropped self.stat_stalltime = 0.0 # total amount of time the video was stalled self.stat_prebuffertime = 0.0 # amount of prebuffer time used self.stat_pieces = PieceStats() # information about each piece # start periodic tasks self.curpiece = "" self.curpiece_pos = 0 # The outbuf keeps only the pieces from the base layer.. We play if we # have at least a piece from the base layer! self.outbuf = [] #self.last_pop = None # time of last pop self.reset_bitrate_prediction() self.lasttime=0 # For DownloadState self.prebufprogress = 0.0 self.prebufstart = time.time() self.playable = False self.usernotified = False self.outbuflen = None # LIVESOURCEAUTH self.authenticator = None self.refill_rawserv_tasker() self.tick_second() # link to others (last thing to do) self.piecepicker.set_transporter( self ) #self.start() if FAKEPLAYBACK: import threading class FakeReader(threading.Thread): def __init__(self,movie): threading.Thread.__init__(self) self.movie = movie def run(self): self.movie.start() while not self.movie.done(): self.movie.read() t = FakeReader(self) t.start() #self.rawserver.add_task( fakereader, 0.0 ) def parse_video(self): """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """ vs = self.videostatus width = None height = None # Start ffmpeg, let it write to a temporary file to prevent # blocking problems on Win32 when FFMPEG outputs lots of # (error) messages. # [loghandle,logfilename] = mkstemp() os.close(loghandle) if sys.platform == "win32": # Not "Nul:" but "nul" is /dev/null on Win32 sink = 'nul' else: sink = '/dev/null' # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE! (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' ) """ # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an # error with the double quotes around the command, but that's not it. Be warned! cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1' print >>sys.stderr,time.asctime(),'-', "vod: trans: Video analyser command is",cmd (child_out,child_in) = os.popen2(cmd,'b') # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG! """ # feed all the pieces download_range = vs.download_range() # We get the bitrate from the base layer and determine the rest based on this first, last = download_range[0] for i in xrange(first,last): piece = self.get_piece( i ) if piece is None: break try: child_out.write( piece ) except IOError: print_exc(file=sys.stderr) break child_out.close() child_in.close() logfile = open(logfilename, 'r') # find the bitrate in the output bitrate = None r = re.compile( "bitrate= *([0-9.]+)kbits/s" ) r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," ) # video dimensions WIDTHxHEIGHT founddim = False for x in logfile.readlines(): if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: FFMPEG said:",x occ = r.findall( x ) if occ: # use the latest mentioning of bitrate bitrate = float( occ[-1] ) * 1024 / 8 if DEBUG: if bitrate is not None: print >>sys.stderr,time.asctime(),'-', "vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024) else: print >>sys.stderr,time.asctime(),'-', "vod: trans: Bitrate could not be determined by FFMPEG" occ = r2.findall( x ) if occ and not founddim: # use first occurence dim = occ[0] idx = dim.find('x') width = int(dim[:idx]) height = int(dim[idx+1:]) founddim = True if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: width",width,"heigth",height logfile.close() try: os.remove(logfilename) except: pass return [bitrate,width,height] def update_prebuffering(self,received_piece=None): """ Update prebuffering process. 'received_piece' is a hint that we just received this piece; keep at 'None' for an update in general. """ if DEBUG: print >>sys.stderr, time.asctime(),'-', "vod: Updating prebuffer. Received piece: ", received_piece vs = self.videostatus if not vs.prebuffering: return if received_piece: self.nreceived += 1 # for the prebuffer we keep track only of the base layer high_range = vs.generate_base_high_range() high_range_length = vs.get_base_high_range_length() # Arno, 2010-01-13: This code is only used when *pre*buffering, not # for in-playback buffering. See refill_buffer() for that. # Restored original code here that looks at max_prebuf_packets # and not highrange. The highrange solution didn't allow the prebuf # time to be varied independently of highrange width. # wantprebuflen = min(self.max_prebuf_packets,high_range_length) high_range_list = list(high_range) wantprebuflist = high_range_list[:wantprebuflen] missing_pieces = filter(lambda i: not self.have_piece(i), wantprebuflist) gotall = not missing_pieces if high_range_length: self.prebufprogress = min(1, float(wantprebuflen - len(missing_pieces)) / max(1, wantprebuflen)) else: self.prebufprogress = 1.0 if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer" if not gotall and DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis" if vs.dropping: if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets: perc = float(self.max_prebuf_packets)/10.0 if float(len(missing_pieces)) < perc or self.nreceived > (2*len(missing_pieces)): # If less then 10% of packets missing, or we got 2 times the packets we need already, # force start of playback gotall = True if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already" if gotall and self.doing_ffmpeg_analysis: [bitrate,width,height] = self.parse_video() self.doing_ffmpeg_analysis = False if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: after parse",bitrate,self.doing_bitrate_est if bitrate is None or round(bitrate)== 0: if self.doing_bitrate_est: # Errr... there was no playtime info in the torrent # and FFMPEG can't tell us... #bitrate = (1*1024*1024/8) # 1mbps # Ric: in svc every piece should be 2,56 sec. bitrate = vs.piecelen / 2.56 if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024) vs.set_bitrate(bitrate) self._playback_stats.add_event(self._playback_key, "bitrate-guess:%d" % bitrate) else: if self.doing_bitrate_est: # There was no playtime info in torrent, use what FFMPEG tells us self.ffmpeg_est_bitrate = bitrate bitrate *= 1.1 # Make FFMPEG estimation 10% higher if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024) vs.set_bitrate(bitrate) self._playback_stats.add_event(self._playback_key, "bitrate-ffmpeg:%d" % bitrate) if width is not None and height is not None: diff = False if self.videodim is None: self.videodim = (width,height) self.height = height elif self.videodim[0] != width or self.videodim[1] != height: diff = True if not self.player_opened_with_width_height or diff: #self.user_setsize(self.videodim) pass # # 10/03/09 boudewijn: For VOD we will wait for the entire # # buffer to fill (gotall) before we start playback. For live # # this is unlikely to happen and we will therefore only wait # # until we estimate that we have enough_buffer. # if (gotall or vs.live_streaming) and self.enough_buffer(): if gotall and self.enough_buffer(): # enough buffer and could estimated bitrate - start streaming if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Prebuffering done",currentThread().getName() self.data_ready.acquire() vs.prebuffering = False self.stat_prebuffertime = time.time() - self.prebufstart self.notify_playable() self.data_ready.notify() self.data_ready.release() elif DEBUG: if self.doing_ffmpeg_analysis: print >>sys.stderr,time.asctime(),'-', "vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets) else: print >>sys.stderr,time.asctime(),'-', "vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time()) def got_have(self,piece): vs = self.videostatus # update stats self.stat_pieces.set( piece, "known", time.time() ) """ if vs.playing and vs.wraparound: # check whether we've slipped back too far d = vs.wraparound_delta n = max(1,self.piecepicker.num_nonempty_neighbours()/2) if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d: # have is confirmed by more than half of the neighours and is in second half of future window print >>sys.stderr,time.asctime(),'-', "vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n) self.start(force=True) """ def got_piece(self, piece_id, begin, length): """ Called when a chunk has been downloaded. This information can be used to estimate download speed. """ if self.videostatus.in_high_range(piece_id): self.high_range_rate.update_rate(length) if DEBUG: print >>sys.stderr, time.asctime(),'-', "vod: high priority rate:", self.high_range_rate.get_rate() def complete(self,piece,downloaded=True): """ Called when a movie piece has been downloaded or was available from the start (disk). """ vs = self.videostatus if vs.in_high_range(piece): self._playback_stats.add_event(self._playback_key, "hipiece:%d" % piece) else: self._playback_stats.add_event(self._playback_key, "piece:%d" % piece) if not self._complete and self.piecepicker.am_I_complete(): self._complete = True self._playback_stats.add_event(self._playback_key, "complete") self._playback_stats.flush() self.stat_pieces.set( piece, "complete", time.time() ) if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Completed",piece if downloaded: self.overall_rate.update_rate( vs.piecelen ) if vs.in_download_range( piece ): self.pieces_in_buffer += 1 else: if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos) self.stat_latepieces += 1 if vs.playing and vs.playback_pos == piece: # we were delaying for this piece self.refill_buffer() self.update_prebuffering( piece ) def set_pos(self,pos): """ Update the playback position. Called when playback is started (depending on requested offset). """ vs = self.videostatus oldpos = vs.playback_pos vs.playback_pos = pos # fast forward for i in xrange(oldpos,pos+1): if self.has[i]: self.pieces_in_buffer -= 1 # fast rewind for i in xrange(pos,oldpos+1): if self.has[i]: self.pieces_in_buffer += 1 def inc_pos(self): vs = self.videostatus if self.has[vs.playback_pos]: self.pieces_in_buffer -= 1 vs.inc_playback_pos() def expected_download_time(self): """ Expected download time left. """ vs = self.videostatus if vs.wraparound: return float(2 ** 31) # Ric: TODO for the moment keep track only of the base layer. Afterwards we will send # different signals depending on the buffer layer pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer if pieces_left <= 0: return 0.0 # list all pieces from the high priority set that have not # been completed uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range()) # when all pieces in the high-range have been downloaded, # we have an expected download time of zero if not uncompleted_pieces: return 0.0 # the download time estimator is very inacurate when we only # have a few chunks left. therefore, we will put more emphesis # on the overall_rate as the number of uncompleted_pieces does # down. total_length = vs.get_high_range_length() uncompleted_length = len(uncompleted_pieces) expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \ self.overall_rate.get_rate() * uncompleted_length / total_length if expected_download_speed < 0.1: return float(2 ** 31) return pieces_left * vs.piecelen / expected_download_speed def expected_playback_time(self): """ Expected playback time left. """ vs = self.videostatus pieces_to_play = vs.last_piece - vs.playback_pos + 1 if pieces_to_play <= 0: return 0.0 if not vs.bitrate: return float(2 ** 31) return pieces_to_play * vs.piecelen / vs.bitrate def expected_buffering_time(self): """ Expected time required for buffering. """ download_time = self.expected_download_time() playback_time = self.expected_playback_time() #print >>sys.stderr,time.asctime(),'-', "EXPECT",self.expected_download_time(),self.expected_playback_time() # Infinite minus infinite is still infinite if download_time > float(2 ** 30) and playback_time > float(2 ** 30): return float(2 ** 31) return abs(download_time - playback_time) def enough_buffer(self): """ Returns True if we can safely start playback without expecting to run out of buffer. """ return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0 def tick_second(self): self.rawserver.add_task( self.tick_second, 1.0 ) vs = self.videostatus # Adjust estimate every second, but don't display every second display = False # (int(time.time()) % 5) == 0 if DEBUG: # display print >>sys.stderr,time.asctime(),'-', "vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024) if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering: if self.doing_bitrate_est: if display: print >>sys.stderr,time.asctime(),'-', "vod: Estimated playback time: %5.0fs [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None) if self.ffmpeg_est_bitrate is None: vs.set_bitrate( self.playbackrate.rate ) if display: sys.stderr.flush() # # MovieTransport interface # # WARNING: these methods will be called by other threads than NetworkThread! # def size( self ): # Ric: returning the size of the base layer return self.videostatus.selected_movie[0]["size"] def read(self,numbytes=None): """ Read a set of pieces. The return data will be a byte for the pieces presence and a set of pieces depending on the available quality. Return None in case of an error or end-of-stream. """ vs = self.videostatus # keep track in the base layer if not self.curpiece: # curpiece_pos could be set to something other than 0! # for instance, a seek request sets curpiece_pos but does not # set curpiece. base_layer_piece = self.pop() if base_layer_piece is None: return None piecenr,self.curpiece = base_layer_piece relatives = vs.get_respective_piece(piecenr) if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: popped piece %d to transport to player," % piecenr, "relative pieces are", relatives curpos = self.curpiece_pos left = len(self.curpiece) - curpos if numbytes is None: # default on one piece per read numbytes = left # TODO ask, we could leave it like this if left > numbytes: # piece contains enough -- return what was requested data = self.curpiece[curpos:curpos+numbytes] self.curpiece_pos += numbytes else: # TODO add get_bitrate method in SVC status to see how many # pieces we need from the different layers! header = str(vs.piecelen) data = header # return remainder of the piece, could be less than numbytes data += self.curpiece[curpos:] for i in relatives: if self.has[i]: if DEBUG: print>>sys.stderr, time.asctime(),'-', "vod: trans: filling stream with piece %d from an enhancement layer" % i data += self.get_piece(i) #print>>sys.stderr, time.asctime(),'-', "vod: trans: filling stream with piece %d from an enhancement layer" % i, len(data) self.curpiece = "" self.curpiece_pos = 0 return data def start( self, bytepos = 0, force = False ): """ Initialise to start playing at position `bytepos'. """ self._playback_stats.add_event(self._playback_key, "play") # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we # should. Also see if we need the read numbytes here, or that it # is better handled at a higher layer. For live it is currently # done at a higher level, see VariableReadAuthStreamWrapper because # we have to strip the signature. Hence the self.curpiece buffer here # is superfluous. Get rid off it or check if # # curpiece[0:piecelen] # # returns curpiece if piecelen has length piecelen == optimize for # piecesized case. # # For VOD seeking we may use the numbytes facility to seek to byte offsets # not just piece offsets. # vs = self.videostatus if vs.playing and not force: return # lock before changing startpos or any other playing variable self.data_ready.acquire() try: # Determine piece number and offset if bytepos < vs.piecelen: piece = vs.first_piece offset = bytepos else: newbytepos = bytepos - vs.first_piecelen piece = vs.first_piece + newbytepos / vs.piecelen + 1 offset = newbytepos % vs.piecelen if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: === START, START, START, START, START, START, START, START, START, START, START, START, START,START" print >>sys.stderr,time.asctime(),'-', "vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) # Initialise all playing variables self.curpiece = "" # piece currently being popped self.curpiece_pos = offset # TODO self.set_pos( piece ) self.outbuf = [] #self.last_pop = time.time() self.reset_bitrate_prediction() vs.playing = True self.playbackrate = Measure( 60 ) finally: self.data_ready.release() # ARNOTODO: start is called by non-NetworkThreads, these following methods # are usually called by NetworkThread. # # We now know that this won't be called until notify_playable() so # perhaps this can be removed? # # CAREFUL: if we use start() for seeking... that's OK. User won't be # able to seek before he got his hands on the stream, so after # notify_playable() # See what we can do right now self.update_prebuffering() self.refill_buffer() def stop( self ): """ Playback is stopped. """ self._playback_stats.add_event(self._playback_key, "stop") vs = self.videostatus if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: === STOP = player closed conn === " if not vs.playing: return vs.playing = False # clear buffer and notify possible readers self.data_ready.acquire() self.outbuf = [] #self.last_pop = None vs.prebuffering = False self.data_ready.notify() self.data_ready.release() def pause( self, autoresume = False ): """ Pause playback. If `autoresume' is set, playback is expected to be resumed automatically once enough data has arrived. """ self._playback_stats.add_event(self._playback_key, "pause") vs = self.videostatus if not vs.playing or not vs.pausable: return if vs.paused: vs.autoresume = autoresume return if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: paused (autoresume: %s)" % (autoresume,) vs.paused = True vs.autoresume = autoresume self.paused_at = time.time() #self.reset_bitrate_prediction() self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume }) def resume( self ): """ Resume paused playback. """ self._playback_stats.add_event(self._playback_key, "resume") vs = self.videostatus if not vs.playing or not vs.paused or not vs.pausable: return if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: resumed" vs.paused = False vs.autoresume = False self.stat_stalltime += time.time() - self.paused_at self.addtime_bitrate_prediction( time.time() - self.paused_at ) self.videoinfo["usercallback"](VODEVENT_RESUME,{}) self.update_prebuffering() self.refill_buffer() def autoresume( self, testfunc = lambda: True ): """ Resumes if testfunc returns True. If not, will test every second. """ vs = self.videostatus if not vs.playing or not vs.paused or not vs.autoresume: return if not testfunc(): self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 ) return if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Resuming, since we can maintain this playback position" self.resume() def done( self ): vs = self.videostatus if not vs.playing: return True if vs.wraparound: return False return vs.playback_pos == vs.last_piece+1 and self.curpiece_pos >= len(self.curpiece) def seek(self,pos,whence=os.SEEK_SET): """ Seek to the given position, a number in bytes relative to both the "whence" reference point and the file being played. We currently actually seek at byte level, via the start() method. We support all forms of seeking, including seeking past the current playback pos. Note this may imply needing to prebuffer again or being paused. vs.playback_pos in NetworkThread domain. Does data_ready lock cover that? Nope. However, this doesn't appear to be respected in any of the MovieTransport methods, check all. Check * When seeking reset other buffering, e.g. read()'s self.curpiece and higher layers. """ vs = self.videostatus length = self.size() # lock before changing startpos or any other playing variable self.data_ready.acquire() try: if whence == os.SEEK_SET: abspos = pos elif whence == os.SEEK_END: if pos > 0: raise ValueError("seeking beyond end of stream") else: abspos = size+pos else: # SEEK_CUR raise ValueError("seeking does not currently support SEEK_CUR") self.stop() self.start(pos) finally: self.data_ready.release() def get_mimetype(self): return self.mimetype def set_mimetype(self,mimetype): self.mimetype = mimetype # # End of MovieTransport interface # def have_piece(self,piece): return self.piecepicker.has[piece] def get_piece(self,piece): """ Returns the data of a certain piece, or None. """ vs = self.videostatus if not self.have_piece( piece ): return None begin = 0 length = vs.piecelen data = self.storagewrapper.do_get_piece(piece, 0, length) if data is None: return None return data.tostring() def reset_bitrate_prediction(self): self.start_playback = None self.last_playback = None self.history_playback = collections.deque() def addtime_bitrate_prediction(self,seconds): if self.start_playback is not None: self.start_playback["local_ts"] += seconds def valid_piece_data(self,i,piece): if not piece: return False if not self.start_playback or self.authenticator is None: # no check possible return True s = self.start_playback seqnum = self.authenticator.get_seqnum( piece ) source_ts = self.authenticator.get_rtstamp( piece ) if seqnum < s["absnr"] or source_ts < s["source_ts"]: # old packet??? print >>sys.stderr,time.asctime(),'-', "vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d" % (i,seqnum,s["absnr"]) return False return True def update_bitrate_prediction(self,i,piece): """ Update the rate prediction given that piece i has just been pushed to the buffer. """ if self.authenticator is not None: seqnum = self.authenticator.get_seqnum( piece ) source_ts = self.authenticator.get_rtstamp( piece ) else: seqnum = i source_ts = 0 d = { "nr": i, "absnr": seqnum, "local_ts": time.time(), "source_ts": source_ts, } # record if self.start_playback is None: self.start_playback = d if self.last_playback and self.last_playback["absnr"] > d["absnr"]: # called out of order return self.last_playback = d # keep a recent history MAX_HIST_LEN = 10*60 # seconds self.history_playback.append( d ) # of at most 10 entries (or minutes if we keep receiving pieces) while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN: self.history_playback.popleft() if DEBUG: vs = self.videostatus first, last = self.history_playback[0], self.history_playback[-1] if first["source_ts"] and first != last: divd = (last["source_ts"] - first["source_ts"]) if divd == 0: divd = 0.000001 bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / divd,) else: bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate) print >>sys.stderr,time.asctime(),'-', "vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate) def piece_due(self,i): """ Return the time when we expect to have to send a certain piece to the player. For wraparound, future pieces are assumed. """ if self.start_playback is None: return float(2 ** 31) # end of time s = self.start_playback l = self.last_playback vs = self.videostatus if not vs.wraparound and i < l["nr"]: # should already have arrived! return time.time() # assume at most one wrap-around between l and i piecedist = (i - l["nr"]) % vs.movie_numpieces if s["source_ts"]: # ----- we have timing information from the source first, last = self.history_playback[0], self.history_playback[-1] if first != last: # we have at least two recent pieces, so can calculate average bitrate. use the recent history # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]) else: # fall-back to bitrate predicted from torrent / ffmpeg bitrate = vs.bitrate # extrapolate with the average bitrate so far return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW else: # ----- no timing information from pieces, so do old-fashioned methods if vs.live_streaming: # Arno, 2008-11-20: old-fashioned method is well bad, # ignore. return time.time() + 60.0 else: i = piecedist + (l["absnr"] - s["absnr"]) if s["nr"] == vs.first_piece: bytepos = vs.first_piecelen + (i-1) * vs.piecelen else: bytepos = i * vs.piecelen return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW def max_buffer_size( self ): vs = self.videostatus return max(256*1024, vs.piecelen * 4, self.BUFFER_TIME * vs.bitrate) def refill_buffer( self ): """ Push pieces (from the base layer) into the player FIFO when needed and able. This counts as playing the pieces as far as playback_pos is concerned.""" self.data_ready.acquire() vs = self.videostatus if vs.prebuffering or not vs.playing: self.data_ready.release() return if vs.paused: self.data_ready.release() return mx = self.max_buffer_size() self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] ) now = time.time() def buffer_underrun(): return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0 if buffer_underrun(): # TODO def sustainable(): self.sustainable_counter += 1 if self.sustainable_counter > 10: self.sustainable_counter = 0 base_high_range_length = vs.get_base_high_range_length() have_length = len(filter(lambda n:self.has[n], vs.generate_base_high_range())) # progress self.prebufprogress = min(1.0, float(have_length) / max(1, base_high_range_length)) return have_length >= base_high_range_length else: num_immediate_packets = 0 base_high_range_length = vs.get_base_high_range_length() for piece in vs.generate_base_high_range(): if self.has[piece]: num_immediate_packets += 1 if num_immediate_packets >= base_high_range_length: break else: break else: # progress self.prebufprogress = 1.0 # completed loop without breaking, so we have everything we need return True return num_immediate_packets >= base_high_range_length sus = sustainable() if vs.pausable and not sus: if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: BUFFER UNDERRUN -- PAUSING" self.pause( autoresume = True ) self.autoresume( sustainable ) # boudewijn: increase the minimum buffer size vs.increase_high_range() self.data_ready.release() return elif sus: if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: BUFFER UNDERRUN -- IGNORING, rate is sustainable" else: if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: BUFFER UNDERRUN -- STALLING, cannot pause player to fall back some, so just wait for more pieces" self.data_ready.release() return def push( i, data ): # push packet into queue if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece) # update predictions based on this piece self.update_bitrate_prediction( i, data ) self.stat_playedpieces += 1 self.stat_pieces.set( i, "tobuffer", time.time() ) self.outbuf.append( (vs.playback_pos,data) ) self.outbuflen += len(data) self.data_ready.notify() self.inc_pos() def drop( i ): # drop packet if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i)) self.stat_droppedpieces += 1 self.stat_pieces.complete( i ) self.inc_pos() # We push in queue only pieces from the base layer download_range = vs.download_range() base_range = download_range[0] for piece in vs.generate_range( [base_range] ): ihavepiece = self.has[piece] forcedrop = False # check whether we have room to store it if self.outbuflen > mx: # buffer full break # final check for piece validity if ihavepiece: data = self.get_piece( piece ) if not self.valid_piece_data( piece, data ): # I should have the piece, but I don't: WAAAAHH! forcedrop = True ihavepiece = False if ihavepiece: # have piece - push it into buffer if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0) # piece found -- add it to the queue push( piece, data ) else: # don't have piece, or forced to drop if not vs.dropping and forcedrop: print >>sys.stderr,time.asctime(),'-', "vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece if vs.dropping or forcedrop: if time.time() >= self.piece_due( piece ) or buffer_underrun() or forcedrop: # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet drop( piece ) else: # we have time to wait for the piece and still have data in our buffer -- wait for packet if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: %d: due in %.2fs pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos) break else: # not dropping if self.outbuflen == 0: print >>sys.stderr,time.asctime(),'-', "vod: trans: SHOULD NOT HAPPEN: missing piece but not dropping. should have paused. pausable=",vs.pausable,"player reading too fast looking for I-Frame?" else: if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: prebuffering done, but could not fill buffer." break self.data_ready.release() def refill_rawserv_tasker( self ): self.refill_buffer() self.rawserver.add_task( self.refill_rawserv_tasker, self.REFILL_INTERVAL ) def pop( self ): self.data_ready.acquire() vs = self.videostatus while vs.prebuffering and not self.done(): # wait until done prebuffering self.data_ready.wait() while not self.outbuf and not self.done(): # wait until a piece is available #if DEBUG: # print >>sys.stderr,time.asctime(),'-', "vod: trans: Player waiting for data" self.data_ready.wait() if not self.outbuf: piece = None else: piece = self.outbuf.pop( 0 ) # nr,data pair self.playbackrate.update_rate( len(piece[1]) ) #self.last_pop = time.time() self.data_ready.release() if piece: self.stat_pieces.set( piece[0], "toplayer", time.time() ) self.stat_pieces.complete( piece[0] ) return piece def notify_playable(self): """ Tell user he can play the media, cf. BaseLib.Core.DownloadConfig.set_vod_event_callback() """ #if self.bufferinfo: # self.bufferinfo.set_playable() #self.progressinf.bufferinfo_updated_callback() # triblerAPI if self.usernotified: return self.usernotified = True self.prebufprogress = 1.0 self.playable = True #print >>sys.stderr,time.asctime(),'-', "vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback'] # MIME type determined normally in LaunchManyCore.network_vod_event_callback # However, allow for recognition by videoanalyser mimetype = self.get_mimetype() complete = self.piecepicker.am_I_complete() if complete: stream = None filename = self.videoinfo["outpath"] else: endstream = MovieTransportStreamWrapper(self) filename = None print >>sys.stderr,time.asctime(),'-', "3.3", self.size(), endstream, self.vodeventfunc, complete, self.size() # Call user callback #print >>sys.stderr,time.asctime(),'-', "vod: trans: notify_playable: calling:",self.vodeventfunc self.vodeventfunc( self.videoinfo, VODEVENT_START, { "complete": complete, "filename": filename, "mimetype": mimetype, "stream": endstream, "length": self.size(), } ) # # Methods for DownloadState to extract status info of VOD mode. # def get_stats(self): """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """ """ Called by network thread """ s = { "played": self.stat_playedpieces, "late": self.stat_latepieces, "dropped": self.stat_droppedpieces, "stall": self.stat_stalltime, "pos": self.videostatus.playback_pos, "prebuf": self.stat_prebuffertime, "pp": self.piecepicker.stats, "pieces": self.stat_pieces.pop_completed(), } return s def get_prebuffering_progress(self): """ Called by network thread """ return self.prebufprogress def is_playable(self): """ Called by network thread """ if not self.playable or self.videostatus.prebuffering: self.playable = (self.prebufprogress == 1.0 and self.enough_buffer()) return self.playable def get_playable_after(self): """ Called by network thread """ return self.expected_buffering_time() def get_duration(self): return 1.0 * self.videostatus.selected_movie[0]["size"] / self.videostatus.bitrate
def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): # dirty hack to get the Tribler Session from BaseLib.Core.Session import Session session = Session.get_instance() if session.get_overlay(): # see comment in else section on importing... from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler self._playback_stats = VideoPlaybackDBHandler.get_instance() else: # hack: we should not import this since it is not part of # the core nor should we import here, but otherwise we # will get import errors from BaseLib.Player.Reporter import VideoPlaybackReporter self._playback_stats = VideoPlaybackReporter.get_instance() # add an event to indicate that the user wants playback to # start def set_nat(nat): self._playback_stats.add_event(self._playback_key, "nat:%s" % nat) self._playback_key = base64.b64encode(os.urandom(20)) self._playback_stats.add_event(self._playback_key, "play-init") self._playback_stats.add_event(self._playback_key, "piece-size:%d" % videostatus.piecelen) self._playback_stats.add_event(self._playback_key, "num-pieces:%d" % videostatus.movie_numpieces) self._playback_stats.add_event(self._playback_key, "bitrate:%d" % videostatus.bitrate) self._playback_stats.add_event(self._playback_key, "nat:%s" % session.get_nat_type(callback=set_nat)) self._complete = False self.videoinfo = videoinfo self.bt1download = bt1download self.piecepicker = bt1download.picker self.rawserver = bt1download.rawserver self.storagewrapper = bt1download.storagewrapper self.fileselector = bt1download.fileselector self.vodeventfunc = vodeventfunc self.videostatus = vs = videostatus # Add quotes around path, as that's what os.popen() wants on win32 if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: self.video_analyser_path='"'+videoanalyserpath+'"' else: self.video_analyser_path=videoanalyserpath # counter for the sustainable() call. Every X calls the # buffer-percentage is updated. self.sustainable_counter = sys.maxint # boudewijn: because we now update the downloadrate for each # received chunk instead of each piece we do not need to # average the measurement over a 'long' period of time. Also, # we only update the downloadrate for pieces that are in the # high priority range giving us a better estimation on how # likely the pieces will be available on time. self.overall_rate = Measure(10) self.high_range_rate = Measure(2) # boudewijn: increase the initial minimum buffer size vs.increase_high_range() # buffer: a link to the piecepicker buffer self.has = self.piecepicker.has # number of pieces in buffer self.pieces_in_buffer = 0 self.data_ready = Condition() # Arno: Call FFMPEG only if the torrent did not provide the # bitrate and video dimensions. This is becasue FFMPEG # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown # reasons # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important if vs.bitrate_set: self.doing_ffmpeg_analysis = False self.doing_bitrate_est = False self.videodim = None #self.movieselector.videodim else: self.doing_ffmpeg_analysis = True self.doing_bitrate_est = True self.videodim = None self.player_opened_with_width_height = False self.ffmpeg_est_bitrate = None # number of packets required to preparse the video # I say we need 128 KB to sniff size and bitrate # Arno: 2007-01-04: Changed to 1MB. It appears ffplay works better with some # decent prebuffering. We should replace this with a timing based thing, if not self.doing_bitrate_est: prebufsecs = self.PREBUF_SEC_VOD # assumes first piece is whole (first_piecelen == piecelen) piecesneeded = vs.time_to_pieces( prebufsecs ) bytesneeded = piecesneeded * vs.piecelen else: # Arno, 2007-01-08: for very high bitrate files e.g. # 850 kilobyte/s (500 MB for 10 min 20 secs) this is too small # and we'll have packet loss because we start too soon. bytesneeded = 1024 * 1024 piecesneeded = 1 + int(ceil((bytesneeded - vs.piecelen) / float(vs.piecelen))) self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) if self.doing_ffmpeg_analysis and DEBUG: print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen if DEBUG: print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" self.nreceived = 0 if DEBUG: print >>sys.stderr,"vod: trans: Setting MIME type to",self.videoinfo['mimetype'] self.set_mimetype(self.videoinfo['mimetype']) # some statistics self.stat_playedpieces = 0 # number of pieces played successfully self.stat_latepieces = 0 # number of pieces that arrived too late self.stat_droppedpieces = 0 # number of pieces dropped self.stat_stalltime = 0.0 # total amount of time the video was stalled self.stat_prebuffertime = 0.0 # amount of prebuffer time used self.stat_pieces = PieceStats() # information about each piece # start periodic tasks self.curpiece = "" self.curpiece_pos = 0 # The outbuf keeps only the pieces from the base layer.. We play if we # have at least a piece from the base layer! self.outbuf = [] #self.last_pop = None # time of last pop self.reset_bitrate_prediction() self.lasttime=0 # For DownloadState self.prebufprogress = 0.0 self.prebufstart = time.time() self.playable = False self.usernotified = False self.outbuflen = None # LIVESOURCEAUTH self.authenticator = None self.refill_rawserv_tasker() self.tick_second() # link to others (last thing to do) self.piecepicker.set_transporter( self ) #self.start() if FAKEPLAYBACK: import threading class FakeReader(threading.Thread): def __init__(self,movie): threading.Thread.__init__(self) self.movie = movie def run(self): self.movie.start() while not self.movie.done(): self.movie.read() t = FakeReader(self) t.start()
def start( self, bytepos = 0, force = False ): """ Initialise to start playing at position `bytepos'. """ self._playback_stats.add_event(self._playback_key, "play") # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we # should. Also see if we need the read numbytes here, or that it # is better handled at a higher layer. For live it is currently # done at a higher level, see VariableReadAuthStreamWrapper because # we have to strip the signature. Hence the self.curpiece buffer here # is superfluous. Get rid off it or check if # # curpiece[0:piecelen] # # returns curpiece if piecelen has length piecelen == optimize for # piecesized case. # # For VOD seeking we may use the numbytes facility to seek to byte offsets # not just piece offsets. # vs = self.videostatus if vs.playing and not force: return # lock before changing startpos or any other playing variable self.data_ready.acquire() try: # Determine piece number and offset if bytepos < vs.piecelen: piece = vs.first_piece offset = bytepos else: newbytepos = bytepos - vs.first_piecelen piece = vs.first_piece + newbytepos / vs.piecelen + 1 offset = newbytepos % vs.piecelen if DEBUG: print >>sys.stderr,"vod: trans: === START, START, START, START, START, START, START, START, START, START, START, START, START,START" print >>sys.stderr,"vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) # Initialise all playing variables self.curpiece = "" # piece currently being popped self.curpiece_pos = offset # TODO self.set_pos( piece ) self.outbuf = [] #self.last_pop = time.time() self.reset_bitrate_prediction() vs.playing = True self.playbackrate = Measure( 60 ) # boudewijn: decrease the initial minimum buffer size vs.decrease_high_range() finally: self.data_ready.release() # ARNOTODO: start is called by non-NetworkThreads, these following methods # are usually called by NetworkThread. # # We now know that this won't be called until notify_playable() so # perhaps this can be removed? # # CAREFUL: if we use start() for seeking... that's OK. User won't be # able to seek before he got his hands on the stream, so after # notify_playable() # See what we can do right now self.update_prebuffering() self.refill_buffer()
class SVCTransporter(MovieOnDemandTransporter): """ Takes care of providing a bytestream interface based on the available pieces. """ # seconds to prebuffer if bitrate is known PREBUF_SEC_LIVE = 10 PREBUF_SEC_VOD = 10 # max number of seconds in queue to player # Arno: < 2008-07-15: St*pid vlc apparently can't handle lots of data pushed to it # Arno: 2008-07-15: 0.8.6h apparently can BUFFER_TIME = 5.0 # polling interval to refill buffer #REFILL_INTERVAL = BUFFER_TIME * 0.75 # Arno: there's is no guarantee we got enough (=BUFFER_TIME secs worth) to write to output bug! REFILL_INTERVAL = 0.1 # amount of time (seconds) to push a packet into # the player queue ahead of schedule VLC_BUFFER_SIZE = 0 PIECE_DUE_SKEW = 0.1 + VLC_BUFFER_SIZE # Arno: If we don't know playtime and FFMPEG gave no decent bitrate, this is the minimum # bitrate (in KByte/s) that the playback birate-estimator must have to make us # set the bitrate in movieselector. MINPLAYBACKRATE = 32*1024 # maximum delay between pops before we force a restart (seconds) MAX_POP_TIME = 60 def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): # dirty hack to get the Tribler Session from BaseLib.Core.Session import Session session = Session.get_instance() if session.get_overlay(): # see comment in else section on importing... from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler self._playback_stats = VideoPlaybackDBHandler.get_instance() else: # hack: we should not import this since it is not part of # the core nor should we import here, but otherwise we # will get import errors from BaseLib.Player.Reporter import VideoPlaybackReporter self._playback_stats = VideoPlaybackReporter.get_instance() # add an event to indicate that the user wants playback to # start def set_nat(nat): self._playback_stats.add_event(self._playback_key, "nat:%s" % nat) self._playback_key = base64.b64encode(os.urandom(20)) self._playback_stats.add_event(self._playback_key, "play-init") self._playback_stats.add_event(self._playback_key, "piece-size:%d" % videostatus.piecelen) self._playback_stats.add_event(self._playback_key, "num-pieces:%d" % videostatus.movie_numpieces) self._playback_stats.add_event(self._playback_key, "bitrate:%d" % videostatus.bitrate) self._playback_stats.add_event(self._playback_key, "nat:%s" % session.get_nat_type(callback=set_nat)) self._complete = False self.videoinfo = videoinfo self.bt1download = bt1download self.piecepicker = bt1download.picker self.rawserver = bt1download.rawserver self.storagewrapper = bt1download.storagewrapper self.fileselector = bt1download.fileselector self.vodeventfunc = vodeventfunc self.videostatus = vs = videostatus # Add quotes around path, as that's what os.popen() wants on win32 if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: self.video_analyser_path='"'+videoanalyserpath+'"' else: self.video_analyser_path=videoanalyserpath # counter for the sustainable() call. Every X calls the # buffer-percentage is updated. self.sustainable_counter = sys.maxint # boudewijn: because we now update the downloadrate for each # received chunk instead of each piece we do not need to # average the measurement over a 'long' period of time. Also, # we only update the downloadrate for pieces that are in the # high priority range giving us a better estimation on how # likely the pieces will be available on time. self.overall_rate = Measure(10) self.high_range_rate = Measure(2) # boudewijn: increase the initial minimum buffer size vs.increase_high_range() # buffer: a link to the piecepicker buffer self.has = self.piecepicker.has # number of pieces in buffer self.pieces_in_buffer = 0 self.data_ready = Condition() # Arno: Call FFMPEG only if the torrent did not provide the # bitrate and video dimensions. This is becasue FFMPEG # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown # reasons # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important if vs.bitrate_set: self.doing_ffmpeg_analysis = False self.doing_bitrate_est = False self.videodim = None #self.movieselector.videodim else: self.doing_ffmpeg_analysis = True self.doing_bitrate_est = True self.videodim = None self.player_opened_with_width_height = False self.ffmpeg_est_bitrate = None # number of packets required to preparse the video # I say we need 128 KB to sniff size and bitrate # Arno: 2007-01-04: Changed to 1MB. It appears ffplay works better with some # decent prebuffering. We should replace this with a timing based thing, if not self.doing_bitrate_est: prebufsecs = self.PREBUF_SEC_VOD # assumes first piece is whole (first_piecelen == piecelen) piecesneeded = vs.time_to_pieces( prebufsecs ) bytesneeded = piecesneeded * vs.piecelen else: # Arno, 2007-01-08: for very high bitrate files e.g. # 850 kilobyte/s (500 MB for 10 min 20 secs) this is too small # and we'll have packet loss because we start too soon. bytesneeded = 1024 * 1024 piecesneeded = 1 + int(ceil((bytesneeded - vs.piecelen) / float(vs.piecelen))) self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) if self.doing_ffmpeg_analysis and DEBUG: print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen if DEBUG: print >>sys.stderr,"vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" self.nreceived = 0 if DEBUG: print >>sys.stderr,"vod: trans: Setting MIME type to",self.videoinfo['mimetype'] self.set_mimetype(self.videoinfo['mimetype']) # some statistics self.stat_playedpieces = 0 # number of pieces played successfully self.stat_latepieces = 0 # number of pieces that arrived too late self.stat_droppedpieces = 0 # number of pieces dropped self.stat_stalltime = 0.0 # total amount of time the video was stalled self.stat_prebuffertime = 0.0 # amount of prebuffer time used self.stat_pieces = PieceStats() # information about each piece # start periodic tasks self.curpiece = "" self.curpiece_pos = 0 # The outbuf keeps only the pieces from the base layer.. We play if we # have at least a piece from the base layer! self.outbuf = [] #self.last_pop = None # time of last pop self.reset_bitrate_prediction() self.lasttime=0 # For DownloadState self.prebufprogress = 0.0 self.prebufstart = time.time() self.playable = False self.usernotified = False self.outbuflen = None # LIVESOURCEAUTH self.authenticator = None self.refill_rawserv_tasker() self.tick_second() # link to others (last thing to do) self.piecepicker.set_transporter( self ) #self.start() if FAKEPLAYBACK: import threading class FakeReader(threading.Thread): def __init__(self,movie): threading.Thread.__init__(self) self.movie = movie def run(self): self.movie.start() while not self.movie.done(): self.movie.read() t = FakeReader(self) t.start() #self.rawserver.add_task( fakereader, 0.0 ) def parse_video(self): """ Feeds the first max_prebuf_packets to ffmpeg to determine video bitrate. """ vs = self.videostatus width = None height = None # Start ffmpeg, let it write to a temporary file to prevent # blocking problems on Win32 when FFMPEG outputs lots of # (error) messages. # [loghandle,logfilename] = mkstemp() os.close(loghandle) if sys.platform == "win32": # Not "Nul:" but "nul" is /dev/null on Win32 sink = 'nul' else: sink = '/dev/null' # DON'T FORGET 'b' OTHERWISE WE'RE WRITING BINARY DATA IN TEXT MODE! (child_out,child_in) = os.popen2( "%s -y -i - -vcodec copy -acodec copy -f avi %s > %s 2>&1" % (self.video_analyser_path, sink, logfilename), 'b' ) """ # If the path is "C:\Program Files\bla\bla" (escaping left out) and that file does not exist # the output will say something cryptic like "vod: trans: FFMPEG said C:\Program" suggesting an # error with the double quotes around the command, but that's not it. Be warned! cmd = self.video_analyser_path+' -y -i - -vcodec copy -acodec copy -f avi '+sink+' > '+logfilename+' 2>&1' print >>sys.stderr,"vod: trans: Video analyser command is",cmd (child_out,child_in) = os.popen2(cmd,'b') # DON'T FORGET 'b' OTHERWISE THINGS GO WRONG! """ # feed all the pieces download_range = vs.download_range() # We get the bitrate from the base layer and determine the rest based on this first, last = download_range[0] for i in xrange(first,last): piece = self.get_piece( i ) if piece is None: break try: child_out.write( piece ) except IOError: print_exc(file=sys.stderr) break child_out.close() child_in.close() logfile = open(logfilename, 'r') # find the bitrate in the output bitrate = None r = re.compile( "bitrate= *([0-9.]+)kbits/s" ) r2 = re.compile( "Video:.* ([0-9]+x[0-9]+)," ) # video dimensions WIDTHxHEIGHT founddim = False for x in logfile.readlines(): if DEBUG: print >>sys.stderr,"vod: trans: FFMPEG said:",x occ = r.findall( x ) if occ: # use the latest mentioning of bitrate bitrate = float( occ[-1] ) * 1024 / 8 if DEBUG: if bitrate is not None: print >>sys.stderr,"vod: trans: Bitrate according to FFMPEG: %.2f KByte/s" % (bitrate/1024) else: print >>sys.stderr,"vod: trans: Bitrate could not be determined by FFMPEG" occ = r2.findall( x ) if occ and not founddim: # use first occurence dim = occ[0] idx = dim.find('x') width = int(dim[:idx]) height = int(dim[idx+1:]) founddim = True if DEBUG: print >>sys.stderr,"vod: width",width,"heigth",height logfile.close() try: os.remove(logfilename) except: pass return [bitrate,width,height] def update_prebuffering(self,received_piece=None): """ Update prebuffering process. 'received_piece' is a hint that we just received this piece; keep at 'None' for an update in general. """ if DEBUG: print >>sys.stderr, "vod: Updating prebuffer. Received piece: ", received_piece vs = self.videostatus if not vs.prebuffering: return if received_piece: self.nreceived += 1 # for the prebuffer we keep track only of the base layer high_range = vs.generate_base_high_range() high_range_length = vs.get_base_high_range_length() missing_pieces = filter(lambda i: not self.have_piece(i), high_range) gotall = not missing_pieces if high_range_length: self.prebufprogress = min(1, float(high_range_length - len(missing_pieces)) / max(1, high_range_length)) else: self.prebufprogress = 1.0 if DEBUG: print >>sys.stderr,"vod: trans: Already got",(self.prebufprogress*100.0),"% of prebuffer" if not gotall and DEBUG: print >>sys.stderr,"vod: trans: Still need pieces",missing_pieces,"for prebuffering/FFMPEG analysis" if vs.dropping: if not self.doing_ffmpeg_analysis and not gotall and not (0 in missing_pieces) and self.nreceived > self.max_prebuf_packets: perc = float(self.max_prebuf_packets)/10.0 if float(len(missing_pieces)) < perc or self.nreceived > (2*len(missing_pieces)): # If less then 10% of packets missing, or we got 2 times the packets we need already, # force start of playback gotall = True if DEBUG: print >>sys.stderr,"vod: trans: Forcing stop of prebuffering, less than",perc,"missing, or got 2N packets already" if gotall and self.doing_ffmpeg_analysis: [bitrate,width,height] = self.parse_video() self.doing_ffmpeg_analysis = False if DEBUG: print >>sys.stderr,"vod: trans: after parse",bitrate,self.doing_bitrate_est if bitrate is None or round(bitrate)== 0: if self.doing_bitrate_est: # Errr... there was no playtime info in the torrent # and FFMPEG can't tell us... #bitrate = (1*1024*1024/8) # 1mbps # Ric: in svc every piece should be 2,56 sec. bitrate = vs.piecelen / 2.56 if DEBUG: print >>sys.stderr,"vod: trans: No bitrate info avail, wild guess: %.2f KByte/s" % (bitrate/1024) vs.set_bitrate(bitrate) self._playback_stats.add_event(self._playback_key, "bitrate-guess:%d" % bitrate) else: if self.doing_bitrate_est: # There was no playtime info in torrent, use what FFMPEG tells us self.ffmpeg_est_bitrate = bitrate bitrate *= 1.1 # Make FFMPEG estimation 10% higher if DEBUG: print >>sys.stderr,"vod: trans: Estimated bitrate: %.2f KByte/s" % (bitrate/1024) vs.set_bitrate(bitrate) self._playback_stats.add_event(self._playback_key, "bitrate-ffmpeg:%d" % bitrate) if width is not None and height is not None: diff = False if self.videodim is None: self.videodim = (width,height) self.height = height elif self.videodim[0] != width or self.videodim[1] != height: diff = True if not self.player_opened_with_width_height or diff: #self.user_setsize(self.videodim) pass # # 10/03/09 boudewijn: For VOD we will wait for the entire # # buffer to fill (gotall) before we start playback. For live # # this is unlikely to happen and we will therefore only wait # # until we estimate that we have enough_buffer. # if (gotall or vs.live_streaming) and self.enough_buffer(): if gotall and self.enough_buffer(): # enough buffer and could estimated bitrate - start streaming if DEBUG: print >>sys.stderr,"vod: trans: Prebuffering done",currentThread().getName() self.data_ready.acquire() vs.prebuffering = False self.stat_prebuffertime = time.time() - self.prebufstart self.notify_playable() self.data_ready.notify() self.data_ready.release() elif DEBUG: if self.doing_ffmpeg_analysis: print >>sys.stderr,"vod: trans: Prebuffering: waiting to obtain the first %d packets" % (self.max_prebuf_packets) else: print >>sys.stderr,"vod: trans: Prebuffering: %.2f seconds left" % (self.expected_buffering_time()) def got_have(self,piece): vs = self.videostatus # update stats self.stat_pieces.set( piece, "known", time.time() ) """ if vs.playing and vs.wraparound: # check whether we've slipped back too far d = vs.wraparound_delta n = max(1,self.piecepicker.num_nonempty_neighbours()/2) if self.piecepicker.numhaves[piece] > n and d/2 < (piece - vs.playback_pos) % vs.movie_numpieces < d: # have is confirmed by more than half of the neighours and is in second half of future window print >>sys.stderr,"vod: trans: Forcing restart. Am at playback position %d but saw %d at %d>%d peers." % (vs.playback_pos,piece,self.piecepicker.numhaves[piece],n) self.start(force=True) """ def got_piece(self, piece_id, begin, length): """ Called when a chunk has been downloaded. This information can be used to estimate download speed. """ if self.videostatus.in_high_range(piece_id): self.high_range_rate.update_rate(length) if DEBUG: print >>sys.stderr, "vod: high priority rate:", self.high_range_rate.get_rate() def complete(self,piece,downloaded=True): """ Called when a movie piece has been downloaded or was available from the start (disk). """ vs = self.videostatus if vs.in_high_range(piece): self._playback_stats.add_event(self._playback_key, "hipiece:%d" % piece) else: self._playback_stats.add_event(self._playback_key, "piece:%d" % piece) if not self._complete and self.piecepicker.am_I_complete(): self._complete = True self._playback_stats.add_event(self._playback_key, "complete") self._playback_stats.flush() self.stat_pieces.set( piece, "complete", time.time() ) if DEBUG: print >>sys.stderr,"vod: trans: Completed",piece if downloaded: self.overall_rate.update_rate( vs.piecelen ) if vs.in_download_range( piece ): self.pieces_in_buffer += 1 else: if DEBUG: print >>sys.stderr,"vod: piece %d too late [pos=%d]" % (piece,vs.playback_pos) self.stat_latepieces += 1 if vs.playing and vs.playback_pos == piece: # we were delaying for this piece self.refill_buffer() self.update_prebuffering( piece ) def set_pos(self,pos): """ Update the playback position. Called when playback is started (depending on requested offset). """ vs = self.videostatus oldpos = vs.playback_pos vs.playback_pos = pos # fast forward for i in xrange(oldpos,pos+1): if self.has[i]: self.pieces_in_buffer -= 1 # fast rewind for i in xrange(pos,oldpos+1): if self.has[i]: self.pieces_in_buffer += 1 def inc_pos(self): vs = self.videostatus if self.has[vs.playback_pos]: self.pieces_in_buffer -= 1 vs.inc_playback_pos() def expected_download_time(self): """ Expected download time left. """ vs = self.videostatus if vs.wraparound: return float(2 ** 31) # Ric: TODO for the moment keep track only of the base layer. Afterwards we will send # different signals depending on the buffer layer pieces_left = vs.last_piece - vs.playback_pos - self.pieces_in_buffer if pieces_left <= 0: return 0.0 # list all pieces from the high priority set that have not # been completed uncompleted_pieces = filter(self.storagewrapper.do_I_have, vs.generate_high_range()) # when all pieces in the high-range have been downloaded, # we have an expected download time of zero if not uncompleted_pieces: return 0.0 # the download time estimator is very inacurate when we only # have a few chunks left. therefore, we will put more emphesis # on the overall_rate as the number of uncompleted_pieces does # down. total_length = vs.get_high_range_length() uncompleted_length = len(uncompleted_pieces) expected_download_speed = self.high_range_rate.get_rate() * (1 - float(uncompleted_length) / total_length) + \ self.overall_rate.get_rate() * uncompleted_length / total_length if expected_download_speed < 0.1: return float(2 ** 31) return pieces_left * vs.piecelen / expected_download_speed def expected_playback_time(self): """ Expected playback time left. """ vs = self.videostatus pieces_to_play = vs.last_piece - vs.playback_pos + 1 if pieces_to_play <= 0: return 0.0 if not vs.bitrate: return float(2 ** 31) return pieces_to_play * vs.piecelen / vs.bitrate def expected_buffering_time(self): """ Expected time required for buffering. """ download_time = self.expected_download_time() playback_time = self.expected_playback_time() #print >>sys.stderr,"EXPECT",self.expected_download_time(),self.expected_playback_time() # Infinite minus infinite is still infinite if download_time > float(2 ** 30) and playback_time > float(2 ** 30): return float(2 ** 31) return abs(download_time - playback_time) def enough_buffer(self): """ Returns True if we can safely start playback without expecting to run out of buffer. """ return max(0.0, self.expected_download_time() - self.expected_playback_time()) == 0.0 def tick_second(self): self.rawserver.add_task( self.tick_second, 1.0 ) vs = self.videostatus # Adjust estimate every second, but don't display every second display = False # (int(time.time()) % 5) == 0 if DEBUG: # display print >>sys.stderr,"vod: Estimated download time: %5.1fs [priority: %7.2f Kbyte/s] [overall: %7.2f Kbyte/s]" % (self.expected_download_time(), self.high_range_rate.get_rate()/1024, self.overall_rate.get_rate()/1024) if vs.playing and round(self.playbackrate.rate) > self.MINPLAYBACKRATE and not vs.prebuffering: if self.doing_bitrate_est: if display: print >>sys.stderr,"vod: Estimated playback time: %5.0fs [%7.2f Kbyte/s], doing estimate=%d" % (self.expected_playback_time(),self.playbackrate.rate/1024, self.ffmpeg_est_bitrate is None) if self.ffmpeg_est_bitrate is None: vs.set_bitrate( self.playbackrate.rate ) if display: sys.stderr.flush() # # MovieTransport interface # # WARNING: these methods will be called by other threads than NetworkThread! # def size( self ): # Ric: returning the size of the base layer return self.videostatus.selected_movie[0]["size"] def read(self,numbytes=None): """ Read a set of pieces. The return data will be a byte for the pieces presence and a set of pieces depending on the available quality. Return None in case of an error or end-of-stream. """ vs = self.videostatus # keep track in the base layer if not self.curpiece: # curpiece_pos could be set to something other than 0! # for instance, a seek request sets curpiece_pos but does not # set curpiece. base_layer_piece = self.pop() if base_layer_piece is None: return None piecenr,self.curpiece = base_layer_piece relatives = vs.get_respective_piece(piecenr) if DEBUG: print >>sys.stderr,"vod: trans: popped piece %d to transport to player," % piecenr, "relative pieces are", relatives curpos = self.curpiece_pos left = len(self.curpiece) - curpos if numbytes is None: # default on one piece per read numbytes = left # TODO ask, we could leave it like this if left > numbytes: # piece contains enough -- return what was requested data = self.curpiece[curpos:curpos+numbytes] self.curpiece_pos += numbytes else: # TODO add get_bitrate method in SVC status to see how many # pieces we need from the different layers! header = str(vs.piecelen) data = header # return remainder of the piece, could be less than numbytes data += self.curpiece[curpos:] for i in relatives: if self.has[i]: if DEBUG: print>>sys.stderr, "vod: trans: filling stream with piece %d from an enhancement layer" % i data += self.get_piece(i) #print>>sys.stderr, "vod: trans: filling stream with piece %d from an enhancement layer" % i, len(data) self.curpiece = "" self.curpiece_pos = 0 return data def start( self, bytepos = 0, force = False ): """ Initialise to start playing at position `bytepos'. """ self._playback_stats.add_event(self._playback_key, "play") # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we # should. Also see if we need the read numbytes here, or that it # is better handled at a higher layer. For live it is currently # done at a higher level, see VariableReadAuthStreamWrapper because # we have to strip the signature. Hence the self.curpiece buffer here # is superfluous. Get rid off it or check if # # curpiece[0:piecelen] # # returns curpiece if piecelen has length piecelen == optimize for # piecesized case. # # For VOD seeking we may use the numbytes facility to seek to byte offsets # not just piece offsets. # vs = self.videostatus if vs.playing and not force: return # lock before changing startpos or any other playing variable self.data_ready.acquire() try: # Determine piece number and offset if bytepos < vs.piecelen: piece = vs.first_piece offset = bytepos else: newbytepos = bytepos - vs.first_piecelen piece = vs.first_piece + newbytepos / vs.piecelen + 1 offset = newbytepos % vs.piecelen if DEBUG: print >>sys.stderr,"vod: trans: === START, START, START, START, START, START, START, START, START, START, START, START, START,START" print >>sys.stderr,"vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) # Initialise all playing variables self.curpiece = "" # piece currently being popped self.curpiece_pos = offset # TODO self.set_pos( piece ) self.outbuf = [] #self.last_pop = time.time() self.reset_bitrate_prediction() vs.playing = True self.playbackrate = Measure( 60 ) # boudewijn: decrease the initial minimum buffer size vs.decrease_high_range() finally: self.data_ready.release() # ARNOTODO: start is called by non-NetworkThreads, these following methods # are usually called by NetworkThread. # # We now know that this won't be called until notify_playable() so # perhaps this can be removed? # # CAREFUL: if we use start() for seeking... that's OK. User won't be # able to seek before he got his hands on the stream, so after # notify_playable() # See what we can do right now self.update_prebuffering() self.refill_buffer() def stop( self ): """ Playback is stopped. """ self._playback_stats.add_event(self._playback_key, "stop") vs = self.videostatus if DEBUG: print >>sys.stderr,"vod: trans: === STOP = player closed conn === " if not vs.playing: return vs.playing = False # clear buffer and notify possible readers self.data_ready.acquire() self.outbuf = [] #self.last_pop = None vs.prebuffering = False self.data_ready.notify() self.data_ready.release() def pause( self, autoresume = False ): """ Pause playback. If `autoresume' is set, playback is expected to be resumed automatically once enough data has arrived. """ self._playback_stats.add_event(self._playback_key, "pause") vs = self.videostatus if not vs.playing or not vs.pausable: return if vs.paused: vs.autoresume = autoresume return if DEBUG: print >>sys.stderr,"vod: trans: paused (autoresume: %s)" % (autoresume,) vs.paused = True vs.autoresume = autoresume self.paused_at = time.time() #self.reset_bitrate_prediction() self.videoinfo["usercallback"](VODEVENT_PAUSE,{ "autoresume": autoresume }) def resume( self ): """ Resume paused playback. """ self._playback_stats.add_event(self._playback_key, "resume") vs = self.videostatus if not vs.playing or not vs.paused or not vs.pausable: return if DEBUG: print >>sys.stderr,"vod: trans: resumed" vs.paused = False vs.autoresume = False self.stat_stalltime += time.time() - self.paused_at self.addtime_bitrate_prediction( time.time() - self.paused_at ) self.videoinfo["usercallback"](VODEVENT_RESUME,{}) self.update_prebuffering() self.refill_buffer() def autoresume( self, testfunc = lambda: True ): """ Resumes if testfunc returns True. If not, will test every second. """ vs = self.videostatus if not vs.playing or not vs.paused or not vs.autoresume: return if not testfunc(): self.rawserver.add_task( lambda: self.autoresume( testfunc ), 1.0 ) return if DEBUG: print >>sys.stderr,"vod: trans: Resuming, since we can maintain this playback position" self.resume() def done( self ): vs = self.videostatus if not vs.playing: return True if vs.wraparound: return False return vs.playback_pos == vs.last_piece+1 and self.curpiece_pos >= len(self.curpiece) def seek(self,pos,whence=os.SEEK_SET): """ Seek to the given position, a number in bytes relative to both the "whence" reference point and the file being played. We currently actually seek at byte level, via the start() method. We support all forms of seeking, including seeking past the current playback pos. Note this may imply needing to prebuffer again or being paused. vs.playback_pos in NetworkThread domain. Does data_ready lock cover that? Nope. However, this doesn't appear to be respected in any of the MovieTransport methods, check all. Check * When seeking reset other buffering, e.g. read()'s self.curpiece and higher layers. """ vs = self.videostatus length = self.size() # lock before changing startpos or any other playing variable self.data_ready.acquire() try: if whence == os.SEEK_SET: abspos = pos elif whence == os.SEEK_END: if pos > 0: raise ValueError("seeking beyond end of stream") else: abspos = size+pos else: # SEEK_CUR raise ValueError("seeking does not currently support SEEK_CUR") self.stop() self.start(pos) finally: self.data_ready.release() def get_mimetype(self): return self.mimetype def set_mimetype(self,mimetype): self.mimetype = mimetype # # End of MovieTransport interface # def have_piece(self,piece): return self.piecepicker.has[piece] def get_piece(self,piece): """ Returns the data of a certain piece, or None. """ vs = self.videostatus if not self.have_piece( piece ): return None begin = 0 length = vs.piecelen data = self.storagewrapper.do_get_piece(piece, 0, length) if data is None: return None return data.tostring() def reset_bitrate_prediction(self): self.start_playback = None self.last_playback = None self.history_playback = collections.deque() def addtime_bitrate_prediction(self,seconds): if self.start_playback is not None: self.start_playback["local_ts"] += seconds def valid_piece_data(self,i,piece): if not piece: return False if not self.start_playback or self.authenticator is None: # no check possible return True s = self.start_playback seqnum = self.authenticator.get_seqnum( piece ) source_ts = self.authenticator.get_rtstamp( piece ) if seqnum < s["absnr"] or source_ts < s["source_ts"]: # old packet??? print >>sys.stderr,"vod: trans: **** INVALID PIECE #%s **** seqnum=%d but we started at seqnum=%d" % (i,seqnum,s["absnr"]) return False return True def update_bitrate_prediction(self,i,piece): """ Update the rate prediction given that piece i has just been pushed to the buffer. """ if self.authenticator is not None: seqnum = self.authenticator.get_seqnum( piece ) source_ts = self.authenticator.get_rtstamp( piece ) else: seqnum = i source_ts = 0 d = { "nr": i, "absnr": seqnum, "local_ts": time.time(), "source_ts": source_ts, } # record if self.start_playback is None: self.start_playback = d if self.last_playback and self.last_playback["absnr"] > d["absnr"]: # called out of order return self.last_playback = d # keep a recent history MAX_HIST_LEN = 10*60 # seconds self.history_playback.append( d ) # of at most 10 entries (or minutes if we keep receiving pieces) while source_ts - self.history_playback[0]["source_ts"] > MAX_HIST_LEN: self.history_playback.popleft() if DEBUG: vs = self.videostatus first, last = self.history_playback[0], self.history_playback[-1] if first["source_ts"] and first != last: divd = (last["source_ts"] - first["source_ts"]) if divd == 0: divd = 0.000001 bitrate = "%.2f kbps" % (8.0 / 1024 * (vs.piecelen - vs.sigsize) * (last["absnr"] - first["absnr"]) / divd,) else: bitrate = "%.2f kbps (external info)" % (8.0 / 1024 * vs.bitrate) print >>sys.stderr,"vod: trans: %i: pushed at t=%.2f, age is t=%.2f, bitrate = %s" % (i,d["local_ts"]-self.start_playback["local_ts"],d["source_ts"]-self.start_playback["source_ts"],bitrate) def piece_due(self,i): """ Return the time when we expect to have to send a certain piece to the player. For wraparound, future pieces are assumed. """ if self.start_playback is None: return float(2 ** 31) # end of time s = self.start_playback l = self.last_playback vs = self.videostatus if not vs.wraparound and i < l["nr"]: # should already have arrived! return time.time() # assume at most one wrap-around between l and i piecedist = (i - l["nr"]) % vs.movie_numpieces if s["source_ts"]: # ----- we have timing information from the source first, last = self.history_playback[0], self.history_playback[-1] if first != last: # we have at least two recent pieces, so can calculate average bitrate. use the recent history # do *not* adjust for sigsize since we don't want the actual video speed but the piece rate bitrate = 1.0 * vs.piecelen * (last["absnr"] - first["absnr"]) / (last["source_ts"] - first["source_ts"]) else: # fall-back to bitrate predicted from torrent / ffmpeg bitrate = vs.bitrate # extrapolate with the average bitrate so far return s["local_ts"] + l["source_ts"] - s["source_ts"] + piecedist * vs.piecelen / bitrate - self.PIECE_DUE_SKEW else: # ----- no timing information from pieces, so do old-fashioned methods if vs.live_streaming: # Arno, 2008-11-20: old-fashioned method is well bad, # ignore. return time.time() + 60.0 else: i = piecedist + (l["absnr"] - s["absnr"]) if s["nr"] == vs.first_piece: bytepos = vs.first_piecelen + (i-1) * vs.piecelen else: bytepos = i * vs.piecelen return s["local_ts"] + bytepos / vs.bitrate - self.PIECE_DUE_SKEW def max_buffer_size( self ): vs = self.videostatus return max(256*1024, vs.piecelen * 4, self.BUFFER_TIME * vs.bitrate) def refill_buffer( self ): """ Push pieces (from the base layer) into the player FIFO when needed and able. This counts as playing the pieces as far as playback_pos is concerned.""" self.data_ready.acquire() vs = self.videostatus if vs.prebuffering or not vs.playing: self.data_ready.release() return if vs.paused: self.data_ready.release() return mx = self.max_buffer_size() self.outbuflen = sum( [len(d) for (p,d) in self.outbuf] ) now = time.time() def buffer_underrun(): return self.outbuflen == 0 and self.start_playback and now - self.start_playback["local_ts"] > 1.0 if buffer_underrun(): # TODO def sustainable(): self.sustainable_counter += 1 if self.sustainable_counter > 10: self.sustainable_counter = 0 base_high_range_length = vs.get_base_high_range_length() have_length = len(filter(lambda n:self.has[n], vs.generate_base_high_range())) # progress self.prebufprogress = min(1.0, float(have_length) / max(1, base_high_range_length)) return have_length >= base_high_range_length else: num_immediate_packets = 0 base_high_range_length = vs.get_base_high_range_length() for piece in vs.generate_base_high_range(): if self.has[piece]: num_immediate_packets += 1 if num_immediate_packets >= base_high_range_length: break else: break else: # progress self.prebufprogress = 1.0 # completed loop without breaking, so we have everything we need return True return num_immediate_packets >= base_high_range_length sus = sustainable() if vs.pausable and not sus: if DEBUG: print >>sys.stderr,"vod: trans: BUFFER UNDERRUN -- PAUSING" self.pause( autoresume = True ) self.autoresume( sustainable ) # boudewijn: increase the minimum buffer size vs.increase_high_range() self.data_ready.release() return elif sus: if DEBUG: print >>sys.stderr,"vod: trans: BUFFER UNDERRUN -- IGNORING, rate is sustainable" else: if DEBUG: print >>sys.stderr,"vod: trans: BUFFER UNDERRUN -- STALLING, cannot pause player to fall back some, so just wait for more pieces" self.data_ready.release() return def push( i, data ): # push packet into queue if DEBUG: print >>sys.stderr,"vod: trans: %d: pushed l=%d" % (vs.playback_pos,piece) # update predictions based on this piece self.update_bitrate_prediction( i, data ) self.stat_playedpieces += 1 self.stat_pieces.set( i, "tobuffer", time.time() ) self.outbuf.append( (vs.playback_pos,data) ) self.outbuflen += len(data) self.data_ready.notify() self.inc_pos() def drop( i ): # drop packet if DEBUG: print >>sys.stderr,"vod: trans: %d: dropped pos=%d; deadline expired %.2f sec ago !!!!!!!!!!!!!!!!!!!!!!" % (piece,vs.playback_pos,time.time()-self.piece_due(i)) self.stat_droppedpieces += 1 self.stat_pieces.complete( i ) self.inc_pos() # We push in queue only pieces from the base layer download_range = vs.download_range() base_range = download_range[0] for piece in vs.generate_range( [base_range] ): ihavepiece = self.has[piece] forcedrop = False # check whether we have room to store it if self.outbuflen > mx: # buffer full break # final check for piece validity if ihavepiece: data = self.get_piece( piece ) if not self.valid_piece_data( piece, data ): # I should have the piece, but I don't: WAAAAHH! forcedrop = True ihavepiece = False if ihavepiece: # have piece - push it into buffer if DEBUG: print >>sys.stderr,"vod: trans: BUFFER STATUS (max %.0f): %.0f kbyte" % (mx/1024.0,self.outbuflen/1024.0) # piece found -- add it to the queue push( piece, data ) else: # don't have piece, or forced to drop if not vs.dropping and forcedrop: print >>sys.stderr,"vod: trans: DROPPING INVALID PIECE #%s, even though we shouldn't drop anything." % piece if vs.dropping or forcedrop: if time.time() >= self.piece_due( piece ) or buffer_underrun() or forcedrop: # piece is too late or we have an empty buffer (and future data to play, otherwise we would have paused) -- drop packet drop( piece ) else: # we have time to wait for the piece and still have data in our buffer -- wait for packet if DEBUG: print >>sys.stderr,"vod: trans: %d: due in %.2fs pos=%d" % (piece,self.piece_due(piece)-time.time(),vs.playback_pos) break else: # not dropping if self.outbuflen == 0: print >>sys.stderr,"vod: trans: SHOULD NOT HAPPEN: missing piece but not dropping. should have paused. pausable=",vs.pausable,"player reading too fast looking for I-Frame?" else: if DEBUG: print >>sys.stderr,"vod: trans: prebuffering done, but could not fill buffer." break self.data_ready.release() def refill_rawserv_tasker( self ): self.refill_buffer() self.rawserver.add_task( self.refill_rawserv_tasker, self.REFILL_INTERVAL ) def pop( self ): self.data_ready.acquire() vs = self.videostatus while vs.prebuffering and not self.done(): # wait until done prebuffering self.data_ready.wait() while not self.outbuf and not self.done(): # wait until a piece is available #if DEBUG: # print >>sys.stderr,"vod: trans: Player waiting for data" self.data_ready.wait() if not self.outbuf: piece = None else: piece = self.outbuf.pop( 0 ) # nr,data pair self.playbackrate.update_rate( len(piece[1]) ) #self.last_pop = time.time() self.data_ready.release() if piece: self.stat_pieces.set( piece[0], "toplayer", time.time() ) self.stat_pieces.complete( piece[0] ) return piece def notify_playable(self): """ Tell user he can play the media, cf. Tribler.Core.DownloadConfig.set_vod_event_callback() """ #if self.bufferinfo: # self.bufferinfo.set_playable() #self.progressinf.bufferinfo_updated_callback() # triblerAPI if self.usernotified: return self.usernotified = True self.prebufprogress = 1.0 self.playable = True #print >>sys.stderr,"vod: trans: notify_playable: Calling usercallback to tell it we're ready to play",self.videoinfo['usercallback'] # MIME type determined normally in LaunchManyCore.network_vod_event_callback # However, allow for recognition by videoanalyser mimetype = self.get_mimetype() complete = self.piecepicker.am_I_complete() if complete: stream = None filename = self.videoinfo["outpath"] else: endstream = MovieTransportStreamWrapper(self) filename = None print >>sys.stderr,"3.3", self.size(), endstream, self.vodeventfunc, complete, self.size() # Call user callback #print >>sys.stderr,"vod: trans: notify_playable: calling:",self.vodeventfunc self.vodeventfunc( self.videoinfo, VODEVENT_START, { "complete": complete, "filename": filename, "mimetype": mimetype, "stream": endstream, "length": self.size(), } ) # # Methods for DownloadState to extract status info of VOD mode. # def get_stats(self): """ Returns accumulated statistics. The piece data is cleared after this call to save memory. """ """ Called by network thread """ s = { "played": self.stat_playedpieces, "late": self.stat_latepieces, "dropped": self.stat_droppedpieces, "stall": self.stat_stalltime, "pos": self.videostatus.playback_pos, "prebuf": self.stat_prebuffertime, "pp": self.piecepicker.stats, "pieces": self.stat_pieces.pop_completed(), } return s def get_prebuffering_progress(self): """ Called by network thread """ return self.prebufprogress def is_playable(self): """ Called by network thread """ if not self.playable or self.videostatus.prebuffering: self.playable = (self.prebufprogress == 1.0 and self.enough_buffer()) return self.playable def get_playable_after(self): """ Called by network thread """ return self.expected_buffering_time() def get_duration(self): return 1.0 * self.videostatus.selected_movie[0]["size"] / self.videostatus.bitrate
class SingleDownload(SingleDownloadHelperInterface): # _2fastbt def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper self.proxy_have = Bitfield(downloader.numpieces) # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0 def _backlog(self, just_unchoked): self.backlog = int(min( 2+int(4*self.measure.get_rate()/self.downloader.chunksize), (2*just_unchoked)+self.downloader.queue_limit() )) if self.backlog > 50: self.backlog = int(max(50, self.backlog * 0.075)) return self.backlog def disconnected(self): self.downloader.lost_peer(self) """ JD: obsoleted -- moved to picker.lost_peer if self.have.complete(): self.downloader.picker.lost_seed() else: for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) """ if self.have.complete() and self.downloader.storage.is_endgame(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self._letgo() self.guard.download = None def _letgo(self): if self.downloader.queued_out.has_key(self): del self.downloader.queued_out[self] if not self.active_requests: return if self.downloader.endgamemode: self.active_requests = [] return lost = {} for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) lost[index] = 1 lost = lost.keys() self.active_requests = [] if self.downloader.paused: return ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more() for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke = True) self.last2 = clock() def is_choked(self): return self.choked def is_interested(self): return self.interested def send_interested(self): if not self.interested: self.interested = True self.connection.send_interested() def send_not_interested(self): if self.interested: self.interested = False self.connection.send_not_interested() def got_piece(self, index, begin, hashlist, piece): """ Returns True if the piece is complete. Note that in this case a -piece- means a chunk! """ if self.bad_performance_counter: self.bad_performance_counter -= 1 if DEBUG: print >>sys.stderr, "decreased bad_performance_counter to", self.bad_performance_counter length = len(piece) #if DEBUG: # print >> sys.stderr, 'Downloader: got piece of length %d' % length try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) if DEBUG: print >>sys.stderr, "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining" self.last = clock() self.last2 = clock() self.measure.update_rate(length) # Update statistic gatherer status = get_status_holder("LivingLab") s_download = status.get_or_create_status_element("downloaded",0) s_download.inc(length) self.short_term_measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): self.downloader.piece_flunked(index) return False # boudewijn: we need more accurate (if possibly invalid) # measurements on current download speed self.downloader.picker.got_piece(index, begin, length) # print "Got piece=", index, "begin=", begin, "len=", length if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) # BarterCast counter self.connection.total_downloaded += length return self.downloader.storage.do_I_have(index) # 2fastbt_ def helper_forces_unchoke(self): self.choked = False # _2fastbt def _request_more(self, new_unchoke = False, slowpieces = []): # 2fastbt_ if DEBUG: print >>sys.stderr,"Downloader: _request_more()" if self.helper is not None and self.is_frozen_by_helper(): if DEBUG: print >>sys.stderr,"Downloader: _request_more: blocked, returning" return # _2fastbt if self.choked: if DEBUG: print >>sys.stderr,"Downloader: _request_more: choked, returning" return # 2fastbt_ # do not download from coordinator if self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr,"Downloader: _request_more: coordinator conn" return # _2fastbt if self.downloader.endgamemode: self.fix_download_endgame(new_unchoke) if DEBUG: print >>sys.stderr,"Downloader: _request_more: endgame mode, returning" return if self.downloader.paused: if DEBUG: print >>sys.stderr,"Downloader: _request_more: paused, returning" return if len(self.active_requests) >= self._backlog(new_unchoke): if DEBUG: print >>sys.stderr,"Downloader: more req than unchoke (active req: %d >= backlog: %d)" % (len(self.active_requests), self._backlog(new_unchoke)) # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages # may stop, if they arrive to quickly if self.downloader.download_rate: wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0 # Boudewijn: when wait_period is 0.0 this will cause # the the _request_more method to be scheduled # multiple times (recursively), causing severe cpu # problems. # # Therefore, only schedule _request_more to be called # if the call will be made in the future. The minimal # wait_period should be tweaked. if wait_period > 1.0: if DEBUG: print >>sys.stderr,"Downloader: waiting for %f s to call _request_more again" % wait_period self.downloader.scheduler(self._request_more, wait_period) if not (self.active_requests or self.backlog): self.downloader.queued_out[self] = 1 return #if DEBUG: # print >>sys.stderr,"Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog lost_interests = [] while len(self.active_requests) < self.backlog: #if DEBUG: # print >>sys.stderr,"Downloader: Looking for interesting piece" #st = time.time() #print "DOWNLOADER self.have=", self.have.toboollist() # This is the PiecePicker call is the current client is a Coordinator interest = self.downloader.picker.next(self.have, self.downloader.storage.do_I_have_requests, self, self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), slowpieces = slowpieces, connection = self.connection, proxyhave = self.proxy_have) #et = time.time() #diff = et-st diff=-1 if DEBUG: print >>sys.stderr,"Downloader: _request_more: next() returned",interest,"took %.5f" % (diff) if interest is None: break if self.helper and self.downloader.storage.inactive_requests[interest] is None: # The current node is a helper and received a request from a coordinator for a piece it has already downloaded # Should send a Have message to the coordinator self.connection.send_have(interest) break if self.helper and self.downloader.storage.inactive_requests[interest] == []: # The current node is a helper and received a request from a coordinator for a piece that is downloading # (all blocks are requested to the swarm, and have not arrived yet) break self.example_interest = interest self.send_interested() loop = True while len(self.active_requests) < self.backlog and loop: begin, length = self.downloader.storage.new_request(interest) if DEBUG: print >>sys.stderr,"Downloader: new_request",interest,begin,length,"to",self.connection.connection.get_ip(),self.connection.connection.get_port() self.downloader.picker.requested(interest, begin, length) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) self.downloader.chunk_requested(length) if not self.downloader.storage.do_I_have_requests(interest): loop = False lost_interests.append(interest) if not self.active_requests: self.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue # 2fastbt_ #st = time.time() interest = self.downloader.picker.next(d.have, self.downloader.storage.do_I_have_requests, self, # Arno, 2008-05-22; self -> d? Original Pawel code self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), willrequest=False,connection=self.connection, proxyhave = self.proxy_have) #et = time.time() #diff = et-st diff=-1 if DEBUG: print >>sys.stderr,"Downloader: _request_more: next()2 returned",interest,"took %.5f" % (diff) if interest is not None: # The helper has at least one piece that the coordinator requested if self.helper and self.downloader.storage.inactive_requests[interest] is None: # The current node is a helper and received a request from a coordinator for a piece it has already downloaded # Should send a Have message to the coordinator self.connection.send_have(interest) break if self.helper and self.downloader.storage.inactive_requests[interest] == []: # The current node is a helper and received a request from a coordinator for a piece that is downloading # (all blocks are requested to the swarm, and have not arrived yet) break # _2fastbt if interest is None: d.send_not_interested() else: d.example_interest = interest # Arno: LIVEWRAP: no endgame if not self.downloader.endgamemode and \ self.downloader.storage.is_endgame() and \ not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming): self.downloader.start_endgame() def fix_download_endgame(self, new_unchoke = False): # 2fastbt_ # do not download from coordinator if self.downloader.paused or self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con() return # _2fastbt if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog) and not self.choked: self.downloader.queued_out[self] = 1 if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: returned" return # 2fastbt_ want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0]))] # _2fastbt if not (self.active_requests or want): self.send_not_interested() if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: not interested" return if want: self.send_interested() if self.choked: if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: choked" return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: # 2fastbt_ if self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece,self): self.connection.send_request(piece, begin, length) self.downloader.chunk_requested(length) # _2fastbt def got_have(self, index): # print >>sys.stderr,"Downloader: got_have",index if DEBUG: print >>sys.stderr,"Downloader: got_have",index if index == self.downloader.numpieces-1: self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) else: self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.piece_length) # Arno: LIVEWRAP if not self.downloader.picker.is_valid_piece(index): if DEBUG: print >>sys.stderr,"Downloader: got_have",index,"is invalid piece" return # TODO: should we request_more()? if self.have[index]: return self.have[index] = True self.downloader.picker.got_have(index,self.connection) # ProxyService_ # # Aggregate the haves bitfields and send them to the coordinator # If I am a coordinator, i will exit shortly self.downloader.aggregate_and_send_haves() # # _ProxyService if self.have.complete(): self.downloader.picker.became_seed() if self.downloader.picker.am_I_complete(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self.connection.close() return if self.downloader.endgamemode: self.fix_download_endgame() elif ( not self.downloader.paused and not self.downloader.picker.is_blocked(index) and self.downloader.storage.do_I_have_requests(index) ): if not self.choked: self._request_more() else: self.send_interested() def _check_interests(self): if self.interested or self.downloader.paused: return for i in xrange(len(self.have)): if ( self.have[i] and not self.downloader.picker.is_blocked(i) and ( self.downloader.endgamemode or self.downloader.storage.do_I_have_requests(i) ) ): self.send_interested() return def got_have_bitfield(self, have): if self.downloader.picker.am_I_complete() and have.complete(): # Arno: If we're both seeds if self.downloader.super_seeding: self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too self.connection.close() self.downloader.add_disconnected_seed(self.connection.get_readable_id()) return if DEBUGBF: st = time.time() if have.complete(): # Arno: He is seed self.downloader.picker.got_seed() else: # Arno: pass on HAVE knowledge to PiecePicker and if LIVEWRAP: # filter out valid pieces # STBSPEED: if we haven't hooked in yet, don't iterate over whole range # just over the active ranges in the received Bitfield activerangeiterators = [] if self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming and self.downloader.picker.videostatus.get_live_startpos() is None: # Not hooked in activeranges = have.get_active_ranges() if len(activeranges) == 0: # Bug, fallback to whole range activerangeiterators = [self.downloader.picker.get_valid_range_iterator()] else: # Create iterators for the active ranges for (s,e) in activeranges: activerangeiterators.append(xrange(s,e+1)) else: # Hooked in, use own valid range as active range # Arno, 2010-04-20: Not correct for VOD with seeking, then we # should store the HAVE info for things before playback too. activerangeiterators = [self.downloader.picker.get_valid_range_iterator()] if DEBUGBF: print >>sys.stderr,"Downloader: got_have_field: live: Filtering bitfield",activerangeiterators if not self.downloader.picker.videostatus or self.downloader.picker.videostatus.live_streaming: if DEBUGBF: print >>sys.stderr,"Downloader: got_have_field: live or normal filter" # Transfer HAVE knowledge to PiecePicker and filter pieces if live validhave = Bitfield(self.downloader.numpieces) for iterator in activerangeiterators: for i in iterator: if have[i]: validhave[i] = True self.downloader.picker.got_have(i,self.connection) else: # VOD if DEBUGBF: print >>sys.stderr,"Downloader: got_have_field: VOD filter" validhave = Bitfield(self.downloader.numpieces) (first,last) = self.downloader.picker.videostatus.download_range() for i in xrange(first,last): if have[i]: validhave[i] = True self.downloader.picker.got_have(i,self.connection) # ProxyService_ # # Aggregate the haves bitfields and send them to the coordinator # ARNOPS: Shouldn't this be done after have = validhave? self.downloader.aggregate_and_send_haves() # # _ProxyService """ # SANITY CHECK checkhave = Bitfield(self.downloader.numpieces) for i in self.downloader.picker.get_valid_range_iterator(): if have[i]: checkhave[i] = True assert validhave.tostring() == checkhave.tostring() """ # Store filtered bitfield instead of received one have = validhave if DEBUGBF: et = time.time() diff = et - st print >>sys.stderr,"Download: got_have_field: took",diff self.have = have #print >>sys.stderr,"Downloader: got_have_bitfield: valid",`have.toboollist()` if self.downloader.endgamemode and not self.downloader.paused: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.send_interested() break return self._check_interests() def get_rate(self): return self.measure.get_rate() def get_short_term_rate(self): return self.short_term_measure.get_rate() def is_snubbed(self): # 2fastbt_ if not self.choked and clock() - self.last2 > self.downloader.snub_time and \ not self.connection.connection.is_helper_con() and \ not self.connection.connection.is_coordinator_con(): # _2fastbt for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time def peer_is_complete(self): return self.have.complete()
class Upload: def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config, is_supporter_seed=False): #SmoothIT: supporter seed self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config['max_slice_length'] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config['breakup_seed_bitfield']: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None # Merkle self.hashlist = [] # SmoothIT_ self.is_supporter_seed = is_supporter_seed print >> sys.stderr, "Uploader: am_supporter_seed=%s" % self.is_supporter_seed # _SmoothIT def send_haves(self, connection): """ Send all pieces I have a series of HAVEs - this is done by closed swarms after successfully connecting (will send blank bitfields until remote node is authorized) """ have_list = self.storage.get_have_list() print >> sys.stderr, "Have list:", have_list def send_bitfield(self, connection): """ Send the bitfield (again) """ if self.storage.get_amount_left() == 0: if not self.super_seeding: if self.config['breakup_seed_bitfield']: bitfield, msgs = self.storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(self.storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(self.storage.get_have_list()) def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.was_ever_interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if self.choked or not self.buffer: return None index, begin, length = self.buffer.pop(0) if self.config['buffer_reads']: if index != self.piecedl: if self.piecebuf: self.piecebuf.release() self.piecedl = index # Merkle [self.piecebuf, self.hashlist] = self.storage.get_piece(index, 0, -1) try: piece = self.piecebuf[begin:begin + length] assert len(piece) == length except: # fails if storage.get_piece returns None or if out of range self.connection.close() return None if begin == 0: hashlist = self.hashlist else: hashlist = [] else: if self.piecebuf: self.piecebuf.release() self.piecedl = None [piece, hashlist] = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) status = Status.get_status_holder("LivingLab") s_upload = status.get_or_create_status_element("uploaded", 0) s_upload.inc(len(piece)) # BarterCast counter self.connection.total_uploaded += length return (index, begin, hashlist, piece) #SmoothIT def am_supporter_seed(self): return self.is_supporter_seed #SmoothIT_ def got_request(self, index, begin, length): if ((self.super_seeding and not index in self.seed_have_list) or (not self.connection.connection.is_coordinator_con() and not self.interested and not self.am_supporter_seed()) or length > self.max_slice_length): #SmoothIT: supporter seed option print >> sys.stderr, "Uploader: got request and close" self.connection.close() return if not self.cleared: self.buffer.append((index, begin, length)) if not self.choked and self.connection.next_upload is None: self.ratelimiter.queue(self.connection) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None def choke_sent(self): del self.buffer[:] self.cleared = True def unchoke(self): if self.choked: try: self.connection.send_unchoke() self.choked = False self.cleared = False except: pass def disconnected(self): if self.piecebuf: self.piecebuf.release() self.piecebuf = None def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return not self.choked and self.buffer def get_rate(self): return self.measure.get_rate() # smoothit_ def get_ip(self): return self.connection.get_ip()
class Upload: def __init__(self, connection, ratelimiter, totalup, choker, storage, picker, config): self.connection = connection self.ratelimiter = ratelimiter self.totalup = totalup self.choker = choker self.storage = storage self.picker = picker self.config = config self.max_slice_length = config['max_slice_length'] self.choked = True self.cleared = True self.interested = False self.super_seeding = False self.buffer = [] self.measure = Measure(config['max_rate_period'], config['upload_rate_fudge']) self.was_ever_interested = False if storage.get_amount_left() == 0: if choker.super_seed: self.super_seeding = True # flag, and don't send bitfield self.seed_have_list = [] # set from piecepicker self.skipped_count = 0 else: if config['breakup_seed_bitfield']: bitfield, msgs = storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(storage.get_have_list()) else: if storage.do_I_have_anything(): connection.send_bitfield(storage.get_have_list()) self.piecedl = None self.piecebuf = None # Merkle self.hashlist = [] def send_haves(self, connection): """ Send all pieces I have a series of HAVEs - this is done by closed swarms after successfully connecting (will send blank bitfields until remote node is authorized) """ have_list = self.storage.get_have_list() print >>sys.stderr, "Have list:",have_list def send_bitfield(self, connection): """ Send the bitfield (again) """ if self.storage.get_amount_left() == 0: if not self.super_seeding: if self.config['breakup_seed_bitfield']: bitfield, msgs = self.storage.get_have_list_cloaked() connection.send_bitfield(bitfield) for have in msgs: connection.send_have(have) else: connection.send_bitfield(self.storage.get_have_list()) else: if self.storage.do_I_have_anything(): connection.send_bitfield(self.storage.get_have_list()) def got_not_interested(self): if self.interested: self.interested = False del self.buffer[:] self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None self.choker.not_interested(self.connection) def got_interested(self): if not self.interested: self.interested = True self.was_ever_interested = True self.choker.interested(self.connection) def get_upload_chunk(self): if self.choked or not self.buffer: return None index, begin, length = self.buffer.pop(0) if self.config['buffer_reads']: if index != self.piecedl: if self.piecebuf: self.piecebuf.release() self.piecedl = index # Merkle [ self.piecebuf, self.hashlist ] = self.storage.get_piece(index, 0, -1) try: piece = self.piecebuf[begin:begin+length] assert len(piece) == length except: # fails if storage.get_piece returns None or if out of range self.connection.close() return None if begin == 0: hashlist = self.hashlist else: hashlist = [] else: if self.piecebuf: self.piecebuf.release() self.piecedl = None [piece, hashlist] = self.storage.get_piece(index, begin, length) if piece is None: self.connection.close() return None self.measure.update_rate(len(piece)) self.totalup.update_rate(len(piece)) status = get_status_holder("LivingLab") s_upload = status.get_or_create_status_element("uploaded",0) s_upload.inc(len(piece)) # BarterCast counter self.connection.total_uploaded += length return (index, begin, hashlist, piece) def got_request(self, index, begin, length): if ((self.super_seeding and not index in self.seed_have_list) or (not self.connection.connection.is_coordinator_con() and not self.interested) or length > self.max_slice_length): self.connection.close() return if not self.cleared: self.buffer.append((index, begin, length)) if not self.choked and self.connection.next_upload is None: self.ratelimiter.queue(self.connection) def got_cancel(self, index, begin, length): try: self.buffer.remove((index, begin, length)) except ValueError: pass def choke(self): if not self.choked: self.choked = True self.connection.send_choke() self.piecedl = None if self.piecebuf: self.piecebuf.release() self.piecebuf = None def choke_sent(self): del self.buffer[:] self.cleared = True def unchoke(self): if self.choked: try: if self.connection.send_unchoke(): self.choked = False self.cleared = False except: pass def disconnected(self): if self.piecebuf: self.piecebuf.release() self.piecebuf = None def is_choked(self): return self.choked def is_interested(self): return self.interested def has_queries(self): return not self.choked and self.buffer def get_rate(self): return self.measure.get_rate()
class SingleDownload(SingleDownloadHelperInterface): # _2fastbt def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt # SmoothIT_ self.logger = logging.getLogger("Tribler.SingleDownload") self.support_required = True # _SmoothIT self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0 # SmoothIT_ : collect block stats self.block_stats = ( [] ) # hold statistics of received blocks, format: piece_index, block_offset, block_size, sender_ip, sender_port, sender_id # _SmoothIT def _backlog(self, just_unchoked): self.backlog = int( min( 2 + int(4 * self.measure.get_rate() / self.downloader.chunksize), (2 * just_unchoked) + self.downloader.queue_limit(), ) ) if self.backlog > 50: self.backlog = int(max(50, self.backlog * 0.075)) return self.backlog def disconnected(self): self.downloader.lost_peer(self) """ JD: obsoleted -- moved to picker.lost_peer if self.have.complete(): self.downloader.picker.lost_seed() else: for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) """ if self.have.complete() and self.downloader.storage.is_endgame(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self._letgo() self.guard.download = None def _letgo(self): if self.downloader.queued_out.has_key(self): del self.downloader.queued_out[self] if not self.active_requests: return if self.downloader.endgamemode: self.active_requests = [] return lost = {} for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) lost[index] = 1 lost = lost.keys() self.active_requests = [] if self.downloader.paused: return ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more() for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke=True) self.last2 = clock() def is_choked(self): return self.choked def is_interested(self): return self.interested def toInitialSeed(self): # Whether our counterpart is an initial seed supporter_ips = self.downloader.supporter_ips its_ip = self.connection.get_ip() toSeed = its_ip in supporter_ips print >>sys.stderr, "Downloader:SD: get_ip=%s, supporter_ips=%s, toInitSeed=%s" % ( its_ip, supporter_ips, toSeed, ) return toSeed def send_interested(self): # print >>sys.stderr, "Send interested: before_interested=%s, support_required=%s" % (self.interested, self.support_required) if not self.interested: # print >>sys.stderr, "send_interested with toInitSeed=%s supp_required=%s:" % (self.toInitialSeed, self.support_required) if self.toInitialSeed() and not self.support_required: print >>sys.stderr, "DO NOT Send interested:" return print >>sys.stderr, "Send interested" self.interested = True self.connection.send_interested() def send_not_interested(self): if self.interested: # print >>sys.stderr, "Send NOT interested:" self.interested = False self.connection.send_not_interested() def got_piece(self, index, begin, hashlist, piece): """ Returns True if the piece is complete. Note that in this case a -piece- means a chunk! """ # SmoothIT_ # print >>sys.stderr, "Downloader got piece (%i, %i, %i) from (%s:%s=%s) at %d" % (index, begin, len(piece), self.connection.get_ip(), self.connection.get_port(), self.connection.get_id(), time.time()) try: entry = (index, begin, len(piece), time.time()) self.block_stats.append(entry) # print >>sys.stderr, "Block stats after last piece: %s" % self.block_stats except: print >>sys.stderr, "Unexpected error:", sys.exc_info() # _SmoothIT if self.bad_performance_counter: self.bad_performance_counter -= 1 if DEBUG: print >>sys.stderr, "decreased bad_performance_counter to", self.bad_performance_counter length = len(piece) # if DEBUG: # print >> sys.stderr, 'Downloader: got piece of length %d' % length try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) if DEBUG: print >>sys.stderr, "Downloader: got_piece: removed one request from all_requests", len( self.downloader.all_requests ), "remaining" self.last = clock() self.last2 = clock() self.measure.update_rate(length) # Update statistic gatherer status = Status.get_status_holder("LivingLab") s_download = status.get_or_create_status_element("downloaded", 0) s_download.inc(length) self.short_term_measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): self.downloader.piece_flunked(index) return False # boudewijn: we need more accurate (if possibly invalid) # measurements on current download speed self.downloader.picker.got_piece(index, begin, length) if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) # BarterCast counter self.connection.total_downloaded += length return self.downloader.storage.do_I_have(index) # 2fastbt_ def helper_forces_unchoke(self): self.choked = False # _2fastbt def _request_more(self, new_unchoke=False, slowpieces=[]): # 2fastbt_ if DEBUG: print >>sys.stderr, "Downloader: _request_more()" if self.helper is not None and self.is_frozen_by_helper(): if DEBUG: print >>sys.stderr, "Downloader: _request_more: blocked, returning" return # _2fastbt if self.choked: if DEBUG: print >>sys.stderr, "Downloader: _request_more: choked, returning" return # 2fastbt_ # do not download from coordinator if self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr, "Downloader: _request_more: coordinator conn" return # _2fastbt if self.downloader.endgamemode: self.fix_download_endgame(new_unchoke) if DEBUG: print >>sys.stderr, "Downloader: _request_more: endgame mode, returning" return if self.downloader.paused: if DEBUG: print >>sys.stderr, "Downloader: _request_more: paused, returning" return if len(self.active_requests) >= self._backlog(new_unchoke): if DEBUG: print >>sys.stderr, "Downloader: more req than unchoke (active req: %d >= backlog: %d)" % ( len(self.active_requests), self._backlog(new_unchoke), ) # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages # may stop, if they arrive to quickly if self.downloader.download_rate: wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0 # Boudewijn: when wait_period is 0.0 this will cause # the the _request_more method to be scheduled # multiple times (recursively), causing severe cpu # problems. # # Therefore, only schedule _request_more to be called # if the call will be made in the future. The minimal # wait_period should be tweaked. if wait_period > 1.0: if DEBUG: print >>sys.stderr, "Downloader: waiting for %f s to call _request_more again" % wait_period self.downloader.scheduler(self._request_more, wait_period) if not (self.active_requests or self.backlog): self.downloader.queued_out[self] = 1 return # if DEBUG: # print >>sys.stderr,"Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog lost_interests = [] while len(self.active_requests) < self.backlog: # if DEBUG: # print >>sys.stderr,"Downloader: Looking for interesting piece" # st = time.time() interest = self.downloader.picker.next( self.have, self.downloader.storage.do_I_have_requests, self, self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), slowpieces=slowpieces, connection=self.connection, ) # et = time.time() # diff = et-st diff = -1 if DEBUG: print >>sys.stderr, "Downloader: _request_more: next() returned", interest, "took %.5f" % (diff) if interest is None: break self.example_interest = interest self.send_interested() loop = True while len(self.active_requests) < self.backlog and loop: begin, length = self.downloader.storage.new_request(interest) if DEBUG: print >>sys.stderr, "Downloader: new_request", interest, begin, length, "to", self.connection.connection.get_ip(), self.connection.connection.get_port() self.downloader.picker.requested(interest, begin, length) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) self.downloader.chunk_requested(length) if not self.downloader.storage.do_I_have_requests(interest): loop = False lost_interests.append(interest) if not self.active_requests: self.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue # 2fastbt_ # st = time.time() interest = self.downloader.picker.next( d.have, self.downloader.storage.do_I_have_requests, self, # Arno, 2008-05-22; self -> d? Original Pawel code self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), willrequest=False, connection=self.connection, ) # et = time.time() # diff = et-st diff = -1 if DEBUG: print >>sys.stderr, "Downloader: _request_more: next()2 returned", interest, "took %.5f" % (diff) # _2fastbt if interest is None: d.send_not_interested() else: d.example_interest = interest # Arno: LIVEWRAP: no endgame if ( not self.downloader.endgamemode and self.downloader.storage.is_endgame() and not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming) ): self.downloader.start_endgame() def fix_download_endgame(self, new_unchoke=False): # 2fastbt_ # do not download from coordinator if self.downloader.paused or self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con() return # _2fastbt if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog) and not self.choked: self.downloader.queued_out[self] = 1 if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: returned" return # 2fastbt_ want = [ a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0])) ] # _2fastbt if not (self.active_requests or want): self.send_not_interested() if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: not interested" return if want: self.send_interested() if self.choked: if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: choked" return shuffle(want) del want[self.backlog - len(self.active_requests) :] self.active_requests.extend(want) for piece, begin, length in want: # 2fastbt_ if ( self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece, self) ): self.connection.send_request(piece, begin, length) self.downloader.chunk_requested(length) # _2fastbt def got_have(self, index): if DEBUG: print >>sys.stderr, "Downloader: got_have", index if index == self.downloader.numpieces - 1: self.downloader.totalmeasure.update_rate( self.downloader.storage.total_length - (self.downloader.numpieces - 1) * self.downloader.storage.piece_length ) self.peermeasure.update_rate( self.downloader.storage.total_length - (self.downloader.numpieces - 1) * self.downloader.storage.piece_length ) else: self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.piece_length) # Arno: LIVEWRAP if not self.downloader.picker.is_valid_piece(index): if DEBUG: print >>sys.stderr, "Downloader: got_have", index, "is invalid piece" return # TODO: should we request_more()? if self.have[index]: return self.have[index] = True self.downloader.picker.got_have(index, self.connection) if self.have.complete(): self.downloader.picker.became_seed() if self.downloader.picker.am_I_complete(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self.connection.close() return if self.downloader.endgamemode: self.fix_download_endgame() elif ( not self.downloader.paused and not self.downloader.picker.is_blocked(index) and self.downloader.storage.do_I_have_requests(index) ): if not self.choked: self._request_more() else: self.send_interested() def _check_interests(self): if self.interested or self.downloader.paused: return for i in xrange(len(self.have)): if ( self.have[i] and not self.downloader.picker.is_blocked(i) and (self.downloader.endgamemode or self.downloader.storage.do_I_have_requests(i)) ): self.send_interested() return def got_have_bitfield(self, have): if self.downloader.picker.am_I_complete() and have.complete(): # Arno: If we're both seeds if self.downloader.super_seeding: self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too self.connection.close() self.downloader.add_disconnected_seed(self.connection.get_readable_id()) return # print >>sys.stderr,"Downloader: got_have_bitfield: VVV#############################################################################################VVVVVVVVVVVVVVVVVVVVVVVVV valid",self.downloader.picker.get_valid_range_iterator(),"len",self.downloader.numpieces # print >>sys.stderr,"Downloader: got_have_bitfield: input",`have.toboollist()` if have.complete(): # Arno: He is seed self.downloader.picker.got_seed() else: # Arno: LIVEWRAP: filter out valid pieces # TODO: may be slow with 32K pieces. validhave = Bitfield(self.downloader.numpieces) for i in self.downloader.picker.get_valid_range_iterator(): if have[i]: validhave[i] = True self.downloader.picker.got_have(i, self.connection) have = validhave # Store filtered bitfield self.have = have # print >>sys.stderr,"Downloader: got_have_bitfield: valid",`have.toboollist()` if self.downloader.endgamemode and not self.downloader.paused: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.send_interested() break return self._check_interests() def get_rate(self): return self.measure.get_rate() def get_short_term_rate(self): return self.short_term_measure.get_rate() def is_snubbed(self): # 2fastbt_ if ( not self.choked and clock() - self.last2 > self.downloader.snub_time and not self.connection.connection.is_helper_con() and not self.connection.connection.is_coordinator_con() ): # _2fastbt for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time def peer_is_complete(self): return self.have.complete()
def __init__(self,bt1download,videostatus,videoinfo,videoanalyserpath,vodeventfunc): # dirty hack to get the Tribler Session from BaseLib.Core.Session import Session session = Session.get_instance() if session.get_overlay(): # see comment in else section on importing... from BaseLib.Core.CacheDB.SqliteVideoPlaybackStatsCacheDB import VideoPlaybackDBHandler self._playback_stats = VideoPlaybackDBHandler.get_instance() else: # hack: we should not import this since it is not part of # the core nor should we import here, but otherwise we # will get import errors from BaseLib.Player.Reporter import VideoPlaybackReporter self._playback_stats = VideoPlaybackReporter.get_instance() # add an event to indicate that the user wants playback to # start def set_nat(nat): self._playback_stats.add_event(self._playback_key, "nat:%s" % nat) self._playback_key = base64.b64encode(os.urandom(20)) self._playback_stats.add_event(self._playback_key, "play-init") self._playback_stats.add_event(self._playback_key, "piece-size:%d" % videostatus.piecelen) self._playback_stats.add_event(self._playback_key, "num-pieces:%d" % videostatus.movie_numpieces) self._playback_stats.add_event(self._playback_key, "bitrate:%d" % videostatus.bitrate) self._playback_stats.add_event(self._playback_key, "nat:%s" % session.get_nat_type(callback=set_nat)) self._complete = False self.videoinfo = videoinfo self.bt1download = bt1download self.piecepicker = bt1download.picker self.rawserver = bt1download.rawserver self.storagewrapper = bt1download.storagewrapper self.fileselector = bt1download.fileselector self.vodeventfunc = vodeventfunc self.videostatus = vs = videostatus # Add quotes around path, as that's what os.popen() wants on win32 if sys.platform == "win32" and videoanalyserpath is not None and videoanalyserpath.find(' ') != -1: self.video_analyser_path='"'+videoanalyserpath+'"' else: self.video_analyser_path=videoanalyserpath # counter for the sustainable() call. Every X calls the # buffer-percentage is updated. self.sustainable_counter = sys.maxint # boudewijn: because we now update the downloadrate for each # received chunk instead of each piece we do not need to # average the measurement over a 'long' period of time. Also, # we only update the downloadrate for pieces that are in the # high priority range giving us a better estimation on how # likely the pieces will be available on time. self.overall_rate = Measure(10) self.high_range_rate = Measure(2) # buffer: a link to the piecepicker buffer self.has = self.piecepicker.has # number of pieces in buffer self.pieces_in_buffer = 0 self.data_ready = Condition() # Arno: Call FFMPEG only if the torrent did not provide the # bitrate and video dimensions. This is becasue FFMPEG # sometimes hangs e.g. Ivaylo's Xvid Finland AVI, for unknown # reasons # Arno: 2007-01-06: Since we use VideoLan player, videodimensions not important assert vs.bitrate_set self.doing_ffmpeg_analysis = False self.doing_bitrate_est = False self.videodim = None #self.movieselector.videodim self.player_opened_with_width_height = False self.ffmpeg_est_bitrate = None prebufsecs = self.PREBUF_SEC_VOD # assumes first piece is whole (first_piecelen == piecelen) piecesneeded = vs.time_to_pieces( prebufsecs ) bytesneeded = piecesneeded * vs.piecelen self.max_prebuf_packets = min(vs.movie_numpieces, piecesneeded) if self.doing_ffmpeg_analysis and DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Want",self.max_prebuf_packets,"pieces for FFMPEG analysis, piecesize",vs.piecelen if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Want",self.max_prebuf_packets,"pieces for prebuffering" self.nreceived = 0 if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: Setting MIME type to",self.videoinfo['mimetype'] self.set_mimetype(self.videoinfo['mimetype']) # some statistics self.stat_playedpieces = 0 # number of pieces played successfully self.stat_latepieces = 0 # number of pieces that arrived too late self.stat_droppedpieces = 0 # number of pieces dropped self.stat_stalltime = 0.0 # total amount of time the video was stalled self.stat_prebuffertime = 0.0 # amount of prebuffer time used self.stat_pieces = PieceStats() # information about each piece # start periodic tasks self.curpiece = "" self.curpiece_pos = 0 # The outbuf keeps only the pieces from the base layer.. We play if we # have at least a piece from the base layer! self.outbuf = [] #self.last_pop = None # time of last pop self.reset_bitrate_prediction() self.lasttime=0 # For DownloadState self.prebufprogress = 0.0 self.prebufstart = time.time() self.playable = False self.usernotified = False self.outbuflen = None # LIVESOURCEAUTH self.authenticator = None self.refill_rawserv_tasker() self.tick_second() # link to others (last thing to do) self.piecepicker.set_transporter( self ) #self.start() if FAKEPLAYBACK: import threading class FakeReader(threading.Thread): def __init__(self,movie): threading.Thread.__init__(self) self.movie = movie def run(self): self.movie.start() while not self.movie.done(): self.movie.read() t = FakeReader(self) t.start()
def start( self, bytepos = 0, force = False ): """ Initialise to start playing at position `bytepos'. """ self._playback_stats.add_event(self._playback_key, "play") # ARNOTODO: we don't use start(bytepos != 0) at the moment. See if we # should. Also see if we need the read numbytes here, or that it # is better handled at a higher layer. For live it is currently # done at a higher level, see VariableReadAuthStreamWrapper because # we have to strip the signature. Hence the self.curpiece buffer here # is superfluous. Get rid off it or check if # # curpiece[0:piecelen] # # returns curpiece if piecelen has length piecelen == optimize for # piecesized case. # # For VOD seeking we may use the numbytes facility to seek to byte offsets # not just piece offsets. # vs = self.videostatus if vs.playing and not force: return # lock before changing startpos or any other playing variable self.data_ready.acquire() try: # Determine piece number and offset if bytepos < vs.piecelen: piece = vs.first_piece offset = bytepos else: newbytepos = bytepos - vs.first_piecelen piece = vs.first_piece + newbytepos / vs.piecelen + 1 offset = newbytepos % vs.piecelen if DEBUG: print >>sys.stderr,time.asctime(),'-', "vod: trans: === START, START, START, START, START, START, START, START, START, START, START, START, START,START" print >>sys.stderr,time.asctime(),'-', "vod: trans: === START at offset %d (piece %d) (forced: %s) ===" % (bytepos,piece,force) # Initialise all playing variables self.curpiece = "" # piece currently being popped self.curpiece_pos = offset # TODO self.set_pos( piece ) self.outbuf = [] #self.last_pop = time.time() self.reset_bitrate_prediction() vs.playing = True self.playbackrate = Measure( 60 ) finally: self.data_ready.release() # ARNOTODO: start is called by non-NetworkThreads, these following methods # are usually called by NetworkThread. # # We now know that this won't be called until notify_playable() so # perhaps this can be removed? # # CAREFUL: if we use start() for seeking... that's OK. User won't be # able to seek before he got his hands on the stream, so after # notify_playable() # See what we can do right now self.update_prebuffering() self.refill_buffer()
class SingleDownload(SingleDownloadHelperInterface): # _2fastbt def __init__(self, downloader, connection): # 2fastbt_ SingleDownloadHelperInterface.__init__(self) # _2fastbt # SmoothIT_ self.logger = logging.getLogger("Tribler.SingleDownload") self.support_required = True # _SmoothIT self.downloader = downloader self.connection = connection self.choked = True self.interested = False self.active_requests = [] self.measure = Measure(downloader.max_rate_period) self.peermeasure = Measure(downloader.max_rate_period) self.have = Bitfield(downloader.numpieces) self.last = -1000 self.last2 = -1000 self.example_interest = None self.backlog = 2 self.ip = connection.get_ip() self.guard = BadDataGuard(self) # 2fastbt_ self.helper = downloader.picker.helper # _2fastbt # boudewijn: VOD needs a download measurement that is not # averaged over a 'long' period. downloader.max_rate_period is # (by default) 20 seconds because this matches the unchoke # policy. self.short_term_measure = Measure(5) # boudewijn: each download maintains a counter for the number # of high priority piece requests that did not get any # responce within x seconds. self.bad_performance_counter = 0 #SmoothIT_ : collect block stats self.block_stats = [] # hold statistics of received blocks, format: piece_index, block_offset, block_size, sender_ip, sender_port, sender_id #_SmoothIT def _backlog(self, just_unchoked): self.backlog = int(min( 2+int(4*self.measure.get_rate()/self.downloader.chunksize), (2*just_unchoked)+self.downloader.queue_limit() )) if self.backlog > 50: self.backlog = int(max(50, self.backlog * 0.075)) return self.backlog def disconnected(self): self.downloader.lost_peer(self) """ JD: obsoleted -- moved to picker.lost_peer if self.have.complete(): self.downloader.picker.lost_seed() else: for i in xrange(len(self.have)): if self.have[i]: self.downloader.picker.lost_have(i) """ if self.have.complete() and self.downloader.storage.is_endgame(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self._letgo() self.guard.download = None def _letgo(self): if self.downloader.queued_out.has_key(self): del self.downloader.queued_out[self] if not self.active_requests: return if self.downloader.endgamemode: self.active_requests = [] return lost = {} for index, begin, length in self.active_requests: self.downloader.storage.request_lost(index, begin, length) lost[index] = 1 lost = lost.keys() self.active_requests = [] if self.downloader.paused: return ds = [d for d in self.downloader.downloads if not d.choked] shuffle(ds) for d in ds: d._request_more() for d in self.downloader.downloads: if d.choked and not d.interested: for l in lost: if d.have[l] and self.downloader.storage.do_I_have_requests(l): d.send_interested() break def got_choke(self): if not self.choked: self.choked = True self._letgo() def got_unchoke(self): if self.choked: self.choked = False if self.interested: self._request_more(new_unchoke = True) self.last2 = clock() def is_choked(self): return self.choked def is_interested(self): return self.interested def toInitialSeed(self):# Whether our counterpart is an initial seed supporter_ips = self.downloader.supporter_ips its_ip = self.connection.get_ip() toSeed = its_ip in supporter_ips print >>sys.stderr,"Downloader:SD: get_ip=%s, supporter_ips=%s, toInitSeed=%s" % (its_ip, supporter_ips, toSeed) return toSeed def send_interested(self): #print >>sys.stderr, "Send interested: before_interested=%s, support_required=%s" % (self.interested, self.support_required) if not self.interested: #print >>sys.stderr, "send_interested with toInitSeed=%s supp_required=%s:" % (self.toInitialSeed, self.support_required) if (self.toInitialSeed() and not self.support_required): print >>sys.stderr, "DO NOT Send interested:" return print >>sys.stderr, "Send interested" self.interested = True self.connection.send_interested() def send_not_interested(self): if self.interested: # print >>sys.stderr, "Send NOT interested:" self.interested = False self.connection.send_not_interested() def got_piece(self, index, begin, hashlist, piece): """ Returns True if the piece is complete. Note that in this case a -piece- means a chunk! """ #SmoothIT_ #print >>sys.stderr, "Downloader got piece (%i, %i, %i) from (%s:%s=%s) at %d" % (index, begin, len(piece), self.connection.get_ip(), self.connection.get_port(), self.connection.get_id(), time.time()) try: entry = (index, begin, len(piece), time.time()) self.block_stats.append(entry) #print >>sys.stderr, "Block stats after last piece: %s" % self.block_stats except: print >>sys.stderr, "Unexpected error:", sys.exc_info() #_SmoothIT if self.bad_performance_counter: self.bad_performance_counter -= 1 if DEBUG: print >>sys.stderr, "decreased bad_performance_counter to", self.bad_performance_counter length = len(piece) #if DEBUG: # print >> sys.stderr, 'Downloader: got piece of length %d' % length try: self.active_requests.remove((index, begin, length)) except ValueError: self.downloader.discarded += length return False if self.downloader.endgamemode: self.downloader.all_requests.remove((index, begin, length)) if DEBUG: print >>sys.stderr, "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining" self.last = clock() self.last2 = clock() self.measure.update_rate(length) # Update statistic gatherer status = Status.get_status_holder("LivingLab") s_download = status.get_or_create_status_element("downloaded",0) s_download.inc(length) self.short_term_measure.update_rate(length) self.downloader.measurefunc(length) if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard): self.downloader.piece_flunked(index) return False # boudewijn: we need more accurate (if possibly invalid) # measurements on current download speed self.downloader.picker.got_piece(index, begin, length) if self.downloader.storage.do_I_have(index): self.downloader.picker.complete(index) if self.downloader.endgamemode: for d in self.downloader.downloads: if d is not self: if d.interested: if d.choked: assert not d.active_requests d.fix_download_endgame() else: try: d.active_requests.remove((index, begin, length)) except ValueError: continue d.connection.send_cancel(index, begin, length) d.fix_download_endgame() else: assert not d.active_requests self._request_more() self.downloader.check_complete(index) # BarterCast counter self.connection.total_downloaded += length return self.downloader.storage.do_I_have(index) # 2fastbt_ def helper_forces_unchoke(self): self.choked = False # _2fastbt def _request_more(self, new_unchoke = False, slowpieces = []): # 2fastbt_ if DEBUG: print >>sys.stderr,"Downloader: _request_more()" if self.helper is not None and self.is_frozen_by_helper(): if DEBUG: print >>sys.stderr,"Downloader: _request_more: blocked, returning" return # _2fastbt if self.choked: if DEBUG: print >>sys.stderr,"Downloader: _request_more: choked, returning" return # 2fastbt_ # do not download from coordinator if self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr,"Downloader: _request_more: coordinator conn" return # _2fastbt if self.downloader.endgamemode: self.fix_download_endgame(new_unchoke) if DEBUG: print >>sys.stderr,"Downloader: _request_more: endgame mode, returning" return if self.downloader.paused: if DEBUG: print >>sys.stderr,"Downloader: _request_more: paused, returning" return if len(self.active_requests) >= self._backlog(new_unchoke): if DEBUG: print >>sys.stderr,"Downloader: more req than unchoke (active req: %d >= backlog: %d)" % (len(self.active_requests), self._backlog(new_unchoke)) # Jelle: Schedule _request more to be called in some time. Otherwise requesting and receiving packages # may stop, if they arrive to quickly if self.downloader.download_rate: wait_period = self.downloader.chunksize / self.downloader.download_rate / 2.0 # Boudewijn: when wait_period is 0.0 this will cause # the the _request_more method to be scheduled # multiple times (recursively), causing severe cpu # problems. # # Therefore, only schedule _request_more to be called # if the call will be made in the future. The minimal # wait_period should be tweaked. if wait_period > 1.0: if DEBUG: print >>sys.stderr,"Downloader: waiting for %f s to call _request_more again" % wait_period self.downloader.scheduler(self._request_more, wait_period) if not (self.active_requests or self.backlog): self.downloader.queued_out[self] = 1 return #if DEBUG: # print >>sys.stderr,"Downloader: _request_more: len act",len(self.active_requests),"back",self.backlog lost_interests = [] while len(self.active_requests) < self.backlog: #if DEBUG: # print >>sys.stderr,"Downloader: Looking for interesting piece" #st = time.time() interest = self.downloader.picker.next(self.have, self.downloader.storage.do_I_have_requests, self, self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), slowpieces = slowpieces, connection = self.connection) #et = time.time() #diff = et-st diff=-1 if DEBUG: print >>sys.stderr,"Downloader: _request_more: next() returned",interest,"took %.5f" % (diff) if interest is None: break self.example_interest = interest self.send_interested() loop = True while len(self.active_requests) < self.backlog and loop: begin, length = self.downloader.storage.new_request(interest) if DEBUG: print >>sys.stderr,"Downloader: new_request",interest,begin,length,"to",self.connection.connection.get_ip(),self.connection.connection.get_port() self.downloader.picker.requested(interest, begin, length) self.active_requests.append((interest, begin, length)) self.connection.send_request(interest, begin, length) self.downloader.chunk_requested(length) if not self.downloader.storage.do_I_have_requests(interest): loop = False lost_interests.append(interest) if not self.active_requests: self.send_not_interested() if lost_interests: for d in self.downloader.downloads: if d.active_requests or not d.interested: continue if d.example_interest is not None and self.downloader.storage.do_I_have_requests(d.example_interest): continue for lost in lost_interests: if d.have[lost]: break else: continue # 2fastbt_ #st = time.time() interest = self.downloader.picker.next(d.have, self.downloader.storage.do_I_have_requests, self, # Arno, 2008-05-22; self -> d? Original Pawel code self.downloader.too_many_partials(), self.connection.connection.is_helper_con(), willrequest=False,connection=self.connection) #et = time.time() #diff = et-st diff=-1 if DEBUG: print >>sys.stderr,"Downloader: _request_more: next()2 returned",interest,"took %.5f" % (diff) # _2fastbt if interest is None: d.send_not_interested() else: d.example_interest = interest # Arno: LIVEWRAP: no endgame if not self.downloader.endgamemode and \ self.downloader.storage.is_endgame() and \ not (self.downloader.picker.videostatus and self.downloader.picker.videostatus.live_streaming): self.downloader.start_endgame() def fix_download_endgame(self, new_unchoke = False): # 2fastbt_ # do not download from coordinator if self.downloader.paused or self.connection.connection.is_coordinator_con(): if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: paused", self.downloader.paused, "or is_coordinator_con", self.connection.connection.is_coordinator_con() return # _2fastbt if len(self.active_requests) >= self._backlog(new_unchoke): if not (self.active_requests or self.backlog) and not self.choked: self.downloader.queued_out[self] = 1 if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: returned" return # 2fastbt_ want = [a for a in self.downloader.all_requests if self.have[a[0]] and a not in self.active_requests and (self.helper is None or self.connection.connection.is_helper_con() or not self.helper.is_ignored(a[0]))] # _2fastbt if not (self.active_requests or want): self.send_not_interested() if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: not interested" return if want: self.send_interested() if self.choked: if DEBUG: print >>sys.stderr, "Downloader: fix_download_endgame: choked" return shuffle(want) del want[self.backlog - len(self.active_requests):] self.active_requests.extend(want) for piece, begin, length in want: # 2fastbt_ if self.helper is None or self.connection.connection.is_helper_con() or self.helper.reserve_piece(piece,self): self.connection.send_request(piece, begin, length) self.downloader.chunk_requested(length) # _2fastbt def got_have(self, index): if DEBUG: print >>sys.stderr,"Downloader: got_have",index if index == self.downloader.numpieces-1: self.downloader.totalmeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.total_length-(self.downloader.numpieces-1)*self.downloader.storage.piece_length) else: self.downloader.totalmeasure.update_rate(self.downloader.storage.piece_length) self.peermeasure.update_rate(self.downloader.storage.piece_length) # Arno: LIVEWRAP if not self.downloader.picker.is_valid_piece(index): if DEBUG: print >>sys.stderr,"Downloader: got_have",index,"is invalid piece" return # TODO: should we request_more()? if self.have[index]: return self.have[index] = True self.downloader.picker.got_have(index,self.connection) if self.have.complete(): self.downloader.picker.became_seed() if self.downloader.picker.am_I_complete(): self.downloader.add_disconnected_seed(self.connection.get_readable_id()) self.connection.close() return if self.downloader.endgamemode: self.fix_download_endgame() elif ( not self.downloader.paused and not self.downloader.picker.is_blocked(index) and self.downloader.storage.do_I_have_requests(index) ): if not self.choked: self._request_more() else: self.send_interested() def _check_interests(self): if self.interested or self.downloader.paused: return for i in xrange(len(self.have)): if ( self.have[i] and not self.downloader.picker.is_blocked(i) and ( self.downloader.endgamemode or self.downloader.storage.do_I_have_requests(i) ) ): self.send_interested() return def got_have_bitfield(self, have): if self.downloader.picker.am_I_complete() and have.complete(): # Arno: If we're both seeds if self.downloader.super_seeding: self.connection.send_bitfield(have.tostring()) # be nice, show you're a seed too self.connection.close() self.downloader.add_disconnected_seed(self.connection.get_readable_id()) return #print >>sys.stderr,"Downloader: got_have_bitfield: VVV#############################################################################################VVVVVVVVVVVVVVVVVVVVVVVVV valid",self.downloader.picker.get_valid_range_iterator(),"len",self.downloader.numpieces #print >>sys.stderr,"Downloader: got_have_bitfield: input",`have.toboollist()` if have.complete(): # Arno: He is seed self.downloader.picker.got_seed() else: # Arno: LIVEWRAP: filter out valid pieces # TODO: may be slow with 32K pieces. validhave = Bitfield(self.downloader.numpieces) for i in self.downloader.picker.get_valid_range_iterator(): if have[i]: validhave[i] = True self.downloader.picker.got_have(i,self.connection) have = validhave # Store filtered bitfield self.have = have #print >>sys.stderr,"Downloader: got_have_bitfield: valid",`have.toboollist()` if self.downloader.endgamemode and not self.downloader.paused: for piece, begin, length in self.downloader.all_requests: if self.have[piece]: self.send_interested() break return self._check_interests() def get_rate(self): return self.measure.get_rate() def get_short_term_rate(self): return self.short_term_measure.get_rate() def is_snubbed(self): # 2fastbt_ if not self.choked and clock() - self.last2 > self.downloader.snub_time and \ not self.connection.connection.is_helper_con() and \ not self.connection.connection.is_coordinator_con(): # _2fastbt for index, begin, length in self.active_requests: self.connection.send_cancel(index, begin, length) self.got_choke() # treat it just like a choke return clock() - self.last > self.downloader.snub_time def peer_is_complete(self): return self.have.complete()