예제 #1
0
    def __init__(self, infohash, storage, picker, backlog, max_rate_period,
                 numpieces, chunksize, measurefunc, snub_time,
                 kickbans_ok, kickfunc, banfunc, scheduler = None):
        self.infohash = infohash
        self.b64_infohash = b64encode(infohash)
        self.storage = storage
        self.picker = picker
        self.backlog = backlog
        self.max_rate_period = max_rate_period
        self.measurefunc = measurefunc
        self.totalmeasure = Measure(max_rate_period*storage.piece_length/storage.request_size)
        self.numpieces = numpieces
        self.chunksize = chunksize
        self.snub_time = snub_time
        self.kickfunc = kickfunc
        self.banfunc = banfunc
        self.disconnectedseeds = {}
        self.downloads = []
        self.perip = {}
        self.gotbaddata = {}
        self.kicked = {}
        self.banned = {}
        self.kickbans_ok = kickbans_ok
        self.kickbans_halted = False
        self.super_seeding = False
        self.endgamemode = False
        self.endgame_queued_pieces = []
        self.all_requests = []
        self.discarded = 0L
        self.download_rate = 0
#        self.download_rate = 25000  # 25K/s test rate
        self.bytes_requested = 0
        self.last_time = clock()
        self.queued_out = {}
        self.requeueing = False
        self.paused = False
        self.scheduler = scheduler

        # hack: we should not import this since it is not part of the
        # core nor should we import here, but otherwise we will get
        # import errors
        #
        # _event_reporter stores events that are logged somewhere...
        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
        # self._event_reporter = get_reporter_instance()
        self._event_reporter = get_status_holder("LivingLab")

        # check periodicaly
        self.scheduler(self.dlr_periodic_check, 1)
예제 #2
0
    def get_upload_chunk(self):
        if self.choked or not self.buffer:
            return None
        index, begin, length = self.buffer.pop(0)
        if self.config['buffer_reads']:
            if index != self.piecedl:
                if self.piecebuf:
                    self.piecebuf.release()
                self.piecedl = index
                # Merkle
                [ self.piecebuf, self.hashlist ] = self.storage.get_piece(index, 0, -1)
            try:
                piece = self.piecebuf[begin:begin+length]
                assert len(piece) == length
            except:     # fails if storage.get_piece returns None or if out of range
                self.connection.close()
                return None
            if begin == 0:
                hashlist = self.hashlist
            else:
                hashlist = []
        else:
            if self.piecebuf:
                self.piecebuf.release()
                self.piecedl = None
            [piece, hashlist] = self.storage.get_piece(index, begin, length)
            if piece is None:
                self.connection.close()
                return None
        self.measure.update_rate(len(piece))
        self.totalup.update_rate(len(piece))

        status = get_status_holder("LivingLab")
        s_upload = status.get_or_create_status_element("uploaded",0)
        s_upload.inc(len(piece))

        # BarterCast counter
        self.connection.total_uploaded += length
        
        return (index, begin, hashlist, piece)
예제 #3
0
    def __init__(self, connecter, raw_server, my_id, max_len,
            schedulefunc, keepalive_delay, download_id, 
            measurefunc, config):
        self.raw_server = raw_server
        self.connecter = connecter
        self.my_id = my_id
        self.max_len = max_len
        self.schedulefunc = schedulefunc
        self.keepalive_delay = keepalive_delay
        self.download_id = download_id
        self.measurefunc = measurefunc
        self.config = config
        self.connections = {}
        self.banned = {}
        self.to_connect = Set()
        self.trackertime = 0
        self.paused = False
        if self.config['max_connections'] == 0:
            self.max_connections = 2 ** 30
        else:
            self.max_connections = self.config['max_connections']
        """
        In r529 there was a problem when a single Windows client 
        would connect to our text-based seeder (i.e. btlaunchmany) 
        with no other clients present. Apparently both the seeder 
        and client would connect to eachother simultaneously, but 
        not end up with a good connection, halting the client.

        Arno, 2006-03-10: Reappears in ~r890, fixed in r892. It 
        appears to be a problem of writing to a nonblocking socket 
        before it signalled it is ready for writing, although the 
        evidence is inconclusive. 

        Arno: 2006-12-15: Reappears in r2319. There is some weird
        socket problem here. Using Python 2.4.4 doesn't solve it.
        The problem I see here is that as soon as we register
        at the tracker, the single seeder tries to connect to
        us. He succeeds, but after a short while the connection
        appears to be closed by him. We then wind up with no
        connection at all and have to wait until we recontact
        the tracker.

        My workaround is to refuse these initial connections from
        the seeder and wait until I've started connecting to peers
        based on the info I got from the tracker before accepting
        remote connections.
        
        Arno: 2007-02-16: I think I finally found it. The Tribler 
        tracker (BitTornado/BT1/track.py) will do a NAT check
        (BitTornado/BT1/NATCheck) by default, which consists of
        initiating a connection and then closing it after a good 
        BT handshake was received.
        
        The solution now is to make sure we check IP and port to
        identify existing connections. I already added that 2006-12-15,
        so I just removed the restriction on initial connections, 
        which are superfluous.
        """
        self.rerequest = None
# 2fastbt_
        self.toofast_banned = {}
        self.coordinator_ip = None
# _2fastbt        

        # hack: we should not import this since it is not part of the
        # core nor should we import here, but otherwise we will get
        # import errors
        #
        # _event_reporter stores events that are logged somewhere...
        # from BaseLib.Core.Statistics.StatusReporter import get_reporter_instance
        self._event_reporter = get_status_holder("LivingLab")

        # the addresses that have already been reported
        self._known_addresses = {}

        schedulefunc(self.send_keepalives, keepalive_delay)
        
        # RePEX: added repexer field.
        # Note: perhaps call it observer in the future and make the 
        # download engine more observable?
        self.repexer = None
예제 #4
0
    def __init__(self,infohash,metainfo,kvconfig,multihandler,get_extip_func,listenport,videoanalyserpath,vodfileindex,set_error_func,pstate,lmvodeventcallback,lmhashcheckcompletecallback):
        self.dow = None
        self.set_error_func = set_error_func
        self.videoinfo = None
        self.videostatus = None
        self.lmvodeventcallback = lmvodeventcallback
        self.lmhashcheckcompletecallback = lmhashcheckcompletecallback
        self.logmsgs = []
        self._hashcheckfunc = None
        self._getstatsfunc = None
        self.infohash = infohash
        self.b64_infohash = b64encode(infohash)
        self.repexer = None
        try:
            self.dldoneflag = Event()
            self.dlrawserver = multihandler.newRawServer(infohash,self.dldoneflag)
            self.lmvodeventcallback = lmvodeventcallback
    
            if pstate is not None:
                self.hashcheckfrac = pstate['dlstate']['progress']
            else:
                self.hashcheckfrac = 0.0
    
            self.peerid = createPeerID()
            
            # LOGGING
            event_reporter = get_status_holder("LivingLab")
            event_reporter.create_and_add_event("peerid", [self.b64_infohash, b64encode(self.peerid)])
            
            #print >>sys.stderr,time.asctime(),'-', "SingleDownload: __init__: My peer ID is",`peerid`
    
            self.dow = BT1Download(self.hashcheckprogressfunc,
                            self.finishedfunc,
                            self.fatalerrorfunc, 
                            self.nonfatalerrorfunc,
                            self.logerrorfunc,
                            self.dldoneflag,
                            kvconfig,
                            metainfo, 
                            infohash,
                            self.peerid,
                            self.dlrawserver,
                            get_extip_func,
                            listenport,
                            videoanalyserpath
                            )
        
            file = self.dow.saveAs(self.save_as)
            #if DEBUG:
            #    print >>sys.stderr,time.asctime(),'-', "SingleDownload: dow.saveAs returned",file
            
            # Set local filename in vodfileindex
            if vodfileindex is not None:
                # Ric: for SVC the index is a list of indexes
                index = vodfileindex['index']
                if type(index) == ListType:
                    svc = len(index) > 1
                else:
                    svc = False
                
                if svc:
                    outpathindex = self.dow.get_dest(index[0])
                else:
                    if index == -1:
                        index = 0
                    outpathindex = self.dow.get_dest(index)

                vodfileindex['outpath'] = outpathindex
                self.videoinfo = vodfileindex
                if 'live' in metainfo['info']:
                    authparams = metainfo['info']['live']
                else:
                    authparams = None
                if svc:
                    self.videostatus = SVCVideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams)
                else:
                    self.videostatus = VideoStatus(metainfo['info']['piece length'],self.dow.files,vodfileindex,authparams)
                self.videoinfo['status'] = self.videostatus
                self.dow.set_videoinfo(vodfileindex,self.videostatus)

            #if DEBUG:
            #    print >>sys.stderr,time.asctime(),'-', "SingleDownload: setting vodfileindex",vodfileindex
            
            # RePEX: Start in RePEX mode
            if kvconfig['initialdlstatus'] == DLSTATUS_REPEXING:
                if pstate is not None and pstate.has_key('dlstate'):
                    swarmcache = pstate['dlstate'].get('swarmcache',{})
                else:
                    swarmcache = {}
                self.repexer = RePEXer(self.infohash, swarmcache)
            else:
                self.repexer = None
            
            if pstate is None:
                resumedata = None
            else:
                # Restarting download
                resumedata=pstate['engineresumedata']
            self._hashcheckfunc = self.dow.initFiles(resumedata=resumedata)

            
        except Exception,e:
            self.fatalerrorfunc(e)
예제 #5
0
    def _reporting_thread(self):
        """
        Send the report on a separate thread

        We choose not to use a lock object to protect access to
        self._enable_reporting, self._retry_delay, and
        self._report_deadline because only a single thread will write
        and the other threads will only read there variables. Python
        doesn't cause problems in this case.
        """
        # minimum retry delay. this value will grow exponentially with
        # every failure
        retry_delay = 15

        # the amount of time to sleep before the next report (or until
        # the _thread_event is set)
        timeout = retry_delay

        # a list containing all urlencoded reports that have yet been
        # send (most of the time this list will be empty, except when
        # reports could not be delivered)
        reports = []

        # local copy of the self._event when it is being reported
        event = None

        if USE_LIVING_LAB_REPORTING:
            # the m18 trial statistics are gathered at the 'living lab'
            session = Session.get_instance()
            living_lab_reporter = LivingLabOnChangeReporter(
                "vod-stats-reporter")
            living_lab_reporter.set_permid(session.get_permid())
            status_holder = get_status_holder("vod-stats")
            status_holder.add_reporter(living_lab_reporter)
            status_element = status_holder.create_status_element(
                "action-list",
                "A list containing timestamped VOD playback events",
                initial_value=[])

        else:
            # there are several urls available where reports can be
            # send. one should be picked randomly each time.
            #
            # when a report is successfull it will stay with the same
            # reporter. when a report is unsuccessfull (could not
            # connect) it will cycle through reporters.
            report_urls = [
                [0, 0, "http://reporter1.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter2.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter3.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter4.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter5.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter6.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter7.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter8.tribler.org/swarmplayer.py"],
                [0, 0, "http://reporter9.tribler.org/swarmplayer.py"]
            ]
            shuffle(report_urls)

        while True:
            # sleep in between reports. will send a report immediately
            # when the flush event is set
            self._thread_flush.wait(timeout)
            self._thread_flush.clear()

            # create report
            self._thread_lock.acquire()
            try:
                if self._event:
                    # copy between threads while locked
                    event = self._event

                    self._event = []
                else:
                    # we have nothing to report... sleep
                    timeout = retry_delay
                    event = None

            finally:
                self._thread_lock.release()

            if event:
                # prepend the session-key
                event.insert(
                    0, {
                        "key": "session-key",
                        "timestamp": time(),
                        "event": self._session_key
                    })
                event.insert(
                    0, {
                        "key": "sequence-number",
                        "timestamp": time(),
                        "event": self._sequence_number
                    })
                self._sequence_number += 1

            if USE_LIVING_LAB_REPORTING:
                if event:
                    try:
                        if status_element.set_value(event):
                            # Living lab's doesn't support dynamic reporting.
                            # We use 60 seconds by default
                            timeout = 60
                        else:
                            # something went wrong...
                            retry_delay *= 2
                            timeout = retry_delay
                    except:
                        # error contacting server
                        print_exc(file=sys.stderr)
                        retry_delay *= 2
                        timeout = retry_delay

            else:
                # add new report
                if event:
                    if len(event) < 10:
                        # uncompressed
                        report = {
                            "version": "3",
                            "created": time(),
                            "event": event
                        }
                    else:
                        # compress
                        report = {
                            "version": "4",
                            "created": time(),
                            "event":
                            urllib.quote(zlib.compress(repr(event), 9))
                        }

                    reports.append(urllib.urlencode(report))

                if not reports:
                    timeout = retry_delay
                    continue

                reporter = report_urls[0]

                if DEBUG:
                    print >> sys.stderr, "EventStatusReporter: attempting to report,", len(
                        reports[0]), "bytes to", reporter[2]
                try:
                    sock = urllib.urlopen(reporter[2], reports[0])
                    result = sock.read()
                    sock.close()

                    # all ok? then remove the report
                    del reports[0]

                    # increase the 'good-report' counter, no need to re-order
                    reporter[1] += 1
                except:
                    # error contacting server
                    print_exc(file=sys.stderr)
                    retry_delay *= 2

                    # increase the 'bad-report' counter and order by failures
                    reporter[0] += 1
                    report_urls.sort(lambda x, y: cmp(x[0], y[0]))
                    continue

                if result.isdigit():
                    result = int(result)
                    if result == 0:
                        # remote server is not recording, so don't bother
                        # sending events
                        if DEBUG:
                            print >> sys.stderr, "EventStatusReporter: received -zero- from the HTTP server. Reporting disabled"
                        self._thread_lock.acquire()
                        self._enable_reporting = False
                        self._thread_lock.release()

                        # close thread
                        return

                    else:
                        # I choose not to reset the retry_delay because
                        # swarmplayer sessions tend to be short. And the
                        # if there are connection failures I want as few
                        # retries as possible
                        if DEBUG:
                            print >> sys.stderr, "EventStatusReporter: report successfull. Next report in", result, "seconds"
                        timeout = result
                else:
                    self._thread_lock.acquire()
                    self._enable_reporting = False
                    self._thread_lock.release()

                    # close thread
                    return
    def _reporting_thread(self):
        """
        Send the report on a separate thread

        We choose not to use a lock object to protect access to
        self._enable_reporting, self._retry_delay, and
        self._report_deadline because only a single thread will write
        and the other threads will only read there variables. Python
        doesn't cause problems in this case.
        """
        # minimum retry delay. this value will grow exponentially with
        # every failure
        retry_delay = 15

        # the amount of time to sleep before the next report (or until
        # the _thread_event is set)
        timeout = retry_delay

        # a list containing all urlencoded reports that have yet been
        # send (most of the time this list will be empty, except when
        # reports could not be delivered)
        reports = []

        # local copy of the self._event when it is being reported
        event = None
        
        if USE_LIVING_LAB_REPORTING:
            # the m18 trial statistics are gathered at the 'living lab'
            session = Session.get_instance()
            living_lab_reporter = LivingLabOnChangeReporter("vod-stats-reporter")
            living_lab_reporter.set_permid(session.get_permid())
            status_holder = get_status_holder("vod-stats")
            status_holder.add_reporter(living_lab_reporter)
            status_element = status_holder.create_status_element("action-list", "A list containing timestamped VOD playback events", initial_value=[])

        else:
            # there are several urls available where reports can be
            # send. one should be picked randomly each time.
            #
            # when a report is successfull it will stay with the same
            # reporter. when a report is unsuccessfull (could not
            # connect) it will cycle through reporters.
            report_urls = [[0, 0, "http://reporter1.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter2.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter3.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter4.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter5.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter6.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter7.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter8.tribler.org/swarmplayer.py"],
                           [0, 0, "http://reporter9.tribler.org/swarmplayer.py"]]
            shuffle(report_urls)

        while True:
            # sleep in between reports. will send a report immediately
            # when the flush event is set
            self._thread_flush.wait(timeout)
            self._thread_flush.clear()

            # create report
            self._thread_lock.acquire()
            try:
                if self._event:
                    # copy between threads while locked
                    event = self._event

                    self._event = []
                else:
                    # we have nothing to report... sleep
                    timeout = retry_delay
                    event = None

            finally:
                self._thread_lock.release()

            if event:
                # prepend the session-key
                event.insert(0, {"key":"session-key", "timestamp":time(), "event":self._session_key})
                event.insert(0, {"key":"sequence-number", "timestamp":time(), "event":self._sequence_number})
                self._sequence_number += 1

            if USE_LIVING_LAB_REPORTING:
                if event:
                    try:
                        if status_element.set_value(event):
                            # Living lab's doesn't support dynamic reporting.
                            # We use 60 seconds by default
                            timeout = 60
                        else:
                            # something went wrong...
                            retry_delay *= 2
                            timeout = retry_delay
                    except:
                        # error contacting server
                        print_exc(file=sys.stderr)
                        retry_delay *= 2
                        timeout = retry_delay

            else:
                # add new report
                if event:
                    if len(event) < 10:
                        # uncompressed
                        report = {"version":"3",
                                  "created":time(),
                                  "event":event}
                    else:
                        # compress
                        report = {"version":"4",
                                  "created":time(),
                                  "event":urllib.quote(zlib.compress(repr(event), 9))}

                    reports.append(urllib.urlencode(report))

                if not reports:
                    timeout = retry_delay
                    continue

                reporter = report_urls[0]

                if DEBUG: print >> sys.stderr, "EventStatusReporter: attempting to report,", len(reports[0]), "bytes to", reporter[2]
                try:
                    sock = urllib.urlopen(reporter[2], reports[0])
                    result = sock.read()
                    sock.close()

                    # all ok? then remove the report
                    del reports[0]

                    # increase the 'good-report' counter, no need to re-order
                    reporter[1] += 1
                except:
                    # error contacting server
                    print_exc(file=sys.stderr)
                    retry_delay *= 2

                    # increase the 'bad-report' counter and order by failures
                    reporter[0] += 1
                    report_urls.sort(lambda x, y:cmp(x[0], y[0]))
                    continue

                if result.isdigit():
                    result = int(result)
                    if result == 0:
                        # remote server is not recording, so don't bother
                        # sending events
                        if DEBUG: print >> sys.stderr, "EventStatusReporter: received -zero- from the HTTP server. Reporting disabled"
                        self._thread_lock.acquire()
                        self._enable_reporting = False
                        self._thread_lock.release()

                        # close thread
                        return

                    else:
                        # I choose not to reset the retry_delay because
                        # swarmplayer sessions tend to be short. And the
                        # if there are connection failures I want as few
                        # retries as possible
                        if DEBUG: print >> sys.stderr, "EventStatusReporter: report successfull. Next report in", result, "seconds"
                        timeout = result
                else:
                    self._thread_lock.acquire()
                    self._enable_reporting = False
                    self._thread_lock.release()

                    # close thread
                    return
예제 #7
0
    def got_piece(self, index, begin, hashlist, piece):
        """
        Returns True if the piece is complete.
        Note that in this case a -piece- means a chunk!
        """

        if self.bad_performance_counter:
            self.bad_performance_counter -= 1
            if DEBUG: print >>sys.stderr, "decreased bad_performance_counter to", self.bad_performance_counter

        length = len(piece)
        #if DEBUG:
        #    print >> sys.stderr, 'Downloader: got piece of length %d' % length
        try:
            self.active_requests.remove((index, begin, length))
        except ValueError:
            self.downloader.discarded += length
            return False
        if self.downloader.endgamemode:
            self.downloader.all_requests.remove((index, begin, length))
            if DEBUG: print >>sys.stderr, "Downloader: got_piece: removed one request from all_requests", len(self.downloader.all_requests), "remaining"

        self.last = clock()
        self.last2 = clock()
        self.measure.update_rate(length)
        # Update statistic gatherer
        status = get_status_holder("LivingLab")
        s_download = status.get_or_create_status_element("downloaded",0)
        s_download.inc(length)
        
        self.short_term_measure.update_rate(length)
        self.downloader.measurefunc(length)
        if not self.downloader.storage.piece_came_in(index, begin, hashlist, piece, self.guard):
            self.downloader.piece_flunked(index)
            return False

        # boudewijn: we need more accurate (if possibly invalid)
        # measurements on current download speed
        self.downloader.picker.got_piece(index, begin, length)

#        print "Got piece=", index, "begin=", begin, "len=", length
        if self.downloader.storage.do_I_have(index):
            self.downloader.picker.complete(index)

        if self.downloader.endgamemode:
            for d in self.downloader.downloads:
                if d is not self:
                    if d.interested:
                        if d.choked:
                            assert not d.active_requests
                            d.fix_download_endgame()
                        else:
                            try:
                                d.active_requests.remove((index, begin, length))
                            except ValueError:
                                continue
                            d.connection.send_cancel(index, begin, length)
                            d.fix_download_endgame()
                    else:
                        assert not d.active_requests
        self._request_more()
        self.downloader.check_complete(index)
        
        # BarterCast counter
        self.connection.total_downloaded += length
    
        return self.downloader.storage.do_I_have(index)