def _check_version(self):
        """Actually check for an auto-update:
        1.  Check the version number from the file on the version site.
        2.  Check the stable version number from the file on the version site.
        3.  Notify the user and stop if they are on an OS with no installer.
        4.  Get the torrent file from the version site.
        5.  Get the signature from the version site.
        6.  Check the signature against the torrent file using the public key.
        7a. Start the torrent if it's not in the client.
        7b. Restart the torrent if it's in the client but not running.
        8.  Put the infohash of the torrent into estate so the butler knows
            to butle it.
        9.  AutoUpdateButler.started() ensures that only the most recent
            auto-update torrent is running.
        10. AutoUpdateButler.finished() indicates the new version is available,
            the UI polls for that value later.

        Whether an auto-update was found and started or not, requeue
        the call to check_version() to run a day later.  This means
        that the version check runs at startup, and once a day.
        """
        debug_prefix = '_check_version() run#%d: '%self.runs
        self.debug(debug_prefix + 'starting')

        url = self.version_site + self.current_version.name()

        df = ThreadedDeferred(_wrap_task(self.rawserver.external_add_task),
                              self._get_available, url, daemon=True)
        yield df
        try:
            available_version = df.getResult()
        except BTFailure, e:
            self.debug(debug_prefix + 'failed to load %s' % url)
            self._restart()
            return
예제 #2
0
    def parse_allowed(self):
        if self.parse_pending:
            return
        self.parse_pending = True

        df = ThreadedDeferred(wrap_task(self.rawserver.external_add_task),
                              self._parse_allowed, daemon=True)
        def eb(etup):
            self.parse_pending = False
            self._print_exc("parse_dir: ", etup)
        df.addCallbacks(self._parse_allowed_finished, eb)
예제 #3
0
    def save_dfile(self):
        if self.save_pending:
            return
        self.save_pending = True

        # if this is taking all the time, threading it won't help anyway because
        # of the GIL
        #state = bencode(self.state)
        state = cPickle.dumps(self.state) # pickle handles Unicode.

        df = ThreadedDeferred(wrap_task(self.rawserver.external_add_task),
                              self._save_dfile, state)
        def cb(r):
            self.save_pending = False
            if NOISY:
                self._print_event( "save_dfile: Completed" )
        def eb(etup):
            self.save_pending = False
            self._print_exc( "save_dfile: ", etup )
        df.addCallbacks(cb, eb)
예제 #4
0
def smart_gettext_and_install(domain, localedir, languages,
                              fallback=False, unicode=False):
    try:
        t = gettext.translation(domain, localedir, languages=languages,
                                fallback=fallback)
    except Exception, e:
        # if we failed to find the language, fetch it from the web async-style
        running_count = 0
        running_deferred = {}

        # Get some reasonable defaults for arguments that were not supplied
        if languages is None:
            languages = []
            for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
                val = os.environ.get(envar)
                if val:
                    languages = val.split(':')
                    break
            if 'C' not in languages:
                languages.append('C')

        # now normalize and expand the languages
        nelangs = []
        for lang in languages:
            for nelang in gettext._expand_lang(lang):
                if nelang not in nelangs:
                    nelangs.append(nelang)
        languages = nelangs

        for lang in languages:
            d = ThreadedDeferred(None, get_language, lang)
            def translate_and_install(r, td=d):
                running_deferred.pop(td)
                # only let the last one try to install
                if len(running_deferred) == 0:
                    t = gettext.translation(domain, localedir,
                                            languages=languages,
                                            fallback=True)
                    t.install(unicode)
            def failed(e, tlang=lang, td=d):
                if td in running_deferred:
                    running_deferred.pop(td)
                # don't raise an error, just continue untranslated
                sys.stderr.write('Could not find translation for language "%s"\n' %
                                 tlang)
                #traceback.print_exc(e)
            d.addCallback(translate_and_install)
            d.addErrback(failed)
            # accumulate all the deferreds first
            running_deferred[d] = 1

        # start them all, the last one finished will install the language
        for d in running_deferred:
            d.start()

        return
예제 #5
0
 def _start_announce(*a):
     self.running_df = ThreadedDeferred(_wrap_task(self.externalsched),
                                        self._rerequest, s, self.peerid,
                                        daemon=True)
     def _rerequest_finish(x):
         self.running_df = None
     def _rerequest_error(e):
         self.errorfunc(logging.ERROR, _("Rerequest failed!"),
                        exception=True, exc_info=e)
     self.running_df.addCallbacks(_rerequest_finish, _rerequest_error)
     if event == 'stopped':
         # if self._rerequest needs any state, pass it through args
         self.cleanup()
예제 #6
0
    def initialize(self, save_path, files):
        # a list of bytes ranges and filenames for window-based IO
        self.ranges = []
        # a dict of filename-to-ranges for piece priorities and filename lookup
        self.range_by_name = {}
        # a sparse set for smart allocation detection
        self.allocated_regions = SparseSet()

        # dict of filename-to-length on disk (for % complete in the file view)
        self.undownloaded = {}
        self.save_path = save_path

        # Rather implement this as an ugly hack here than change all the
        # individual calls. Affects all torrent instances using this module.
        if self.config['bad_libc_workaround']:
            bad_libc_workaround()

        self.initialized = False
        self.startup_df = ThreadedDeferred(_wrap_task(self.external_add_task),
                                           self._build_file_structs,
                                           self.filepool, files)
        return self.startup_df
예제 #7
0
class Rerequester(object):

    STATES = ['started', 'completed', 'stopped']

    def __init__(self, url, announce_list, config, sched, externalsched, rawserver,
                 howmany, connect,
                 amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
                 upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc):
        """
         @param url:       tracker's announce URL.
         @param announce_list: ?
         @param config:    preferences obj storing BitTorrent-wide
                           configuration.
         @param sched:     used to schedule events from inside rawserver's
                           thread.  (Oh boy.  externalsched and sched.
                           We expect Rerequester to
                           recognize the difference between rawserver's
                           thread and yet we go through the trouble of
                           abstracting away rawserver using a callback...
                           So what was the point?  --Dave)
         @param externalsched: see sched.  This one is called from outside
                           rawserver's thread.
         @param howmany:   callback to get the number of complete connections.
         @param connect:   callback to establish a connection to a peer
                           obtained from the tracker.
         @param amount_left: callback to obtain the number of bytes left to
                           download for this torrent.
         @param up:        callback to obtain the total number of bytes sent
                           for this torrent.
         @param down:      callback to obtain the total number of bytes
                           received for this torrent.
         @param port:      port to report to the tracker.  If the local peer
                           is behind a NAT then this is the local peer's port
                           on the NAT facing the outside world.
         @param myid:      local peer's unique (self-generated) id.
         @param infohash:  hash of the info section of the metainfo file.
         @param errorfunc: callback to report errors.
         @param doneflag:  when set all threads cleanup and then terminate.
         @param upratefunc: callback to obtain moving average rate on the
                           uplink for this torrent.
         @param downratefunc: callback to obtain moving average rate on the
                           downlink for this torrent.
         @param ever_got_incoming: callback to determine if this torrent
                           has ever received any mesages from other peers.
         @param diefunc:   callback that is called when announce fails to find
                           any peers.
         @param sfunc:     success function?  With regard to what?  --Dave

        """
        assert isinstance(url, str)
        assert isinstance(config, Preferences)
        assert type(port) in (int,long) and port > 0 and port < 65536, "Port: %s" % repr(port)
        assert callable(connect)
        assert callable(externalsched)
        assert callable(amount_left)
        assert callable(errorfunc)
        assert isinstance(doneflag, threading._Event)
        assert callable(upratefunc)
        assert callable(downratefunc)
        assert callable(ever_got_incoming)
        assert callable(diefunc)
        assert callable(sfunc)

        self.rawserver = rawserver
        self.dead = False
        self.baseurl = url
        self.announce_list = None
        if announce_list:
            # shuffle a new copy of the whole set only once
            shuffled_announce_list = []
            for tier in announce_list:
                if not tier:
                    # strip blank lists
                    continue
                shuffled_tier = list(tier)
                random.shuffle(shuffled_tier)
                shuffled_announce_list.append(shuffled_tier)
            if shuffled_announce_list:
                self.announce_list = shuffled_announce_list
                self.tier = 0
                self.announce_i = 0
                self.baseurl = self.announce_list_next()
        self.announce_infohash = infohash
        self.peerid = None
        self.wanted_peerid = myid
        self.port = port
        self.url = None
        self.config = config
        self.last = None
        self.trackerid = None
        self.announce_interval = 30 * 60
        self.sched = sched
        self.howmany = howmany
        self.connect = connect
        self.externalsched = externalsched
        self.amount_left = amount_left
        self.up = up
        self.down = down
        self.errorfunc = errorfunc
        self.doneflag = doneflag
        self.upratefunc = upratefunc
        self.downratefunc = downratefunc
        self.ever_got_incoming = ever_got_incoming
        self.diefunc = diefunc
        self.successfunc = sfunc
        self.finish = False
        self.running_df = None
        self.current_started = None
        self.fail_wait = None
        self.last_time = bttime()
        self.previous_down = 0
        self.previous_up = 0
        self.tracker_num_peers = None
        self.tracker_num_seeds = None

    def _makeurl(self, peerid, port):
        return ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
                (self.baseurl, quote(self.announce_infohash), quote(peerid), str(port),
                 b2a_hex(''.join([chr(random.randrange(256)) for i in xrange(4)]))))

    def change_port(self, peerid, port):
        assert thread.get_ident() == self.rawserver.ident

        self.wanted_peerid = peerid
        self.port = port
        self.last = None
        self.trackerid = None
        self._check()

    def begin(self):
        if self.sched:
            self.sched(10, self.begin)
            self._check()

    def announce_list_success(self):
        tmp = self.announce_list[self.tier].pop(self.announce_i)
        self.announce_list[self.tier].insert(0, tmp)
        self.tier = 0
        self.announce_i = 0

    def announce_list_fail(self):
        """returns True if the announce-list was restarted"""
        self.announce_i += 1
        if self.announce_i == len(self.announce_list[self.tier]):
            self.announce_i = 0
            self.tier += 1
            if self.tier == len(self.announce_list):
                self.tier = 0
                return True
        return False

    def announce_list_next(self):
        return self.announce_list[self.tier][self.announce_i]

    def announce_finish(self):
        if self.dead:
            return
        self.finish = True
        self._check()

    def announce_stop(self):
        if self.dead:
            return
        self._announce('stopped')

    def _check(self):
        assert thread.get_ident() == self.rawserver.ident
        assert not self.dead
        #self.errorfunc(logging.INFO, 'check: ' + str(self.current_started))
        if self.current_started is not None:
            if self.current_started <= bttime() - 58:
                self.errorfunc(logging.WARNING,
                               _("Tracker announce still not complete "
                                 "%d seconds after starting it") %
                               int(bttime() - self.current_started))
            return
        if self.peerid is None:
            self.peerid = self.wanted_peerid
            self.url = self._makeurl(self.peerid, self.port)
            self._announce('started')
            return
        if self.peerid != self.wanted_peerid:
            # _announce will clean up these
            up = self.up
            down = self.down
            self._announce('stopped')
            self.peerid = None
            self.previous_up = up()
            self.previous_down = down()
            return
        if self.finish:
            self.finish = False
            self._announce('completed')
            return
        if self.fail_wait is not None:
            if self.last_time + self.fail_wait <= bttime():
                self._announce()
            return
        if self.last_time > bttime() - self.config['rerequest_interval']:
            return
        if self.ever_got_incoming():
            getmore = self.howmany() <= self.config['min_peers'] / 3
        else:
            getmore = self.howmany() < self.config['min_peers']
        if getmore or bttime() - self.last_time > self.announce_interval:
            self._announce()

    def get_next_announce_time_est(self):
        # I'm sure this is wrong, but _check is confusing
        return bttime() - (self.last_time + self.announce_interval)

    def _announce(self, event=None):
        assert not self.dead
        assert thread.get_ident() == self.rawserver.ident
        self.current_started = bttime()
        self.errorfunc(logging.INFO, 'announce: ' + str(self.current_started))
        s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
             (self.url, str(self.up()*self.config.get('lie',1) - self.previous_up),
              str(self.down() - self.previous_down), str(self.amount_left())))
        if self.last is not None:
            s += '&last=' + quote(str(self.last))
        if self.trackerid is not None:
            s += '&trackerid=' + quote(str(self.trackerid))
        if self.howmany() >= self.config['max_initiate']:
            s += '&numwant=0'
        else:
            s += '&compact=1'
        if event is not None:
            s += '&event=' + event

        def _start_announce(*a):
            self.running_df = ThreadedDeferred(_wrap_task(self.externalsched),
                                               self._rerequest, s, self.peerid,
                                               daemon=True)
            def _rerequest_finish(x):
                self.running_df = None
            def _rerequest_error(e):
                self.errorfunc(logging.ERROR, _("Rerequest failed!"),
                               exception=True, exc_info=e)
            self.running_df.addCallbacks(_rerequest_finish, _rerequest_error)
            if event == 'stopped':
                # if self._rerequest needs any state, pass it through args
                self.cleanup()

        if not event:
            assert self.running_df == None, "Previous rerequest event is still running!"
        if self.running_df:
            self.running_df.addCallback(_start_announce)
        else:
            _start_announce()

    # Must destroy all references that could cause reference circles
    def cleanup(self):
        assert thread.get_ident() == self.rawserver.ident
        self.dead = True
        self.sched = None
        self.howmany = None
        self.connect = None
        self.externalsched = lambda *args: None
        self.amount_left = None
        self.up = None
        self.down = None
        # don't zero this one, we need it on shutdown w/ error
        #self.errorfunc = None
        self.upratefunc = None
        self.downratefunc = None
        self.ever_got_incoming = None
        self.diefunc = None
        self.successfunc = None

    def _rerequest(self, url, peerid):
        if self.config['ip']:
            try:
                url += '&ip=' + socket.gethostbyname(self.config['ip'])
            except:
                self.errorfunc(logging.WARNING,
                               _("Problem resolving config ip (%s), gethostbyname failed") % self.config['ip'],
                               exc_info=sys.exc_info())
        request = Request(url)
        request.add_header('User-Agent', 'BitTorrent/' + version)
        if self.config['tracker_proxy']:
            request.set_proxy(self.config['tracker_proxy'], 'http')
        try:
            h = urlopen(request)
            data = h.read()
            h.close()
        # urllib2 can raise various crap that doesn't have a common base
        # exception class especially when proxies are used, at least
        # ValueError and stuff from httplib
        except Exception, e:
            try:
                s = unicode(e.args[0])
            except:
                s = unicode(e)
            r = _("Problem connecting to tracker - %s: %s") % (e.__class__, s)
            def f():
                self._postrequest(errormsg=r, exc=e, peerid=peerid)
        else:
예제 #8
0
class Storage(object):

    def __init__(self, config, filepool, save_path, files, add_task,
                 external_add_task, doneflag):
        self.filepool = filepool
        self.config = config
        self.doneflag = doneflag
        self.add_task = add_task
        self.external_add_task = external_add_task
        self.initialize(save_path, files)

    def initialize(self, save_path, files):
        # a list of bytes ranges and filenames for window-based IO
        self.ranges = []
        # a dict of filename-to-ranges for piece priorities and filename lookup
        self.range_by_name = {}
        # a sparse set for smart allocation detection
        self.allocated_regions = SparseSet()

        # dict of filename-to-length on disk (for % complete in the file view)
        self.undownloaded = {}
        self.save_path = save_path

        # Rather implement this as an ugly hack here than change all the
        # individual calls. Affects all torrent instances using this module.
        if self.config['bad_libc_workaround']:
            bad_libc_workaround()

        self.initialized = False
        self.startup_df = ThreadedDeferred(_wrap_task(self.external_add_task),
                                           self._build_file_structs,
                                           self.filepool, files)
        return self.startup_df

    def _build_file_structs(self, filepool, files):
        total = 0
        for filename, length in files:
            # we're shutting down, abort.
            if self.doneflag.isSet():
                return False

            self.undownloaded[filename] = length
            if length > 0:
                self.ranges.append((total, total + length, filename))

            self.range_by_name[filename] = (total, total + length)

            if os.path.exists(filename):
                if not os.path.isfile(filename):
                    raise BTFailure(_("File %s already exists, but is not a "
                                      "regular file") % filename)
                l = os.path.getsize(filename)
                if l > length:
                    # This is the truncation Bram was talking about that no one
                    # else thinks is a good idea.
                    #h = file(filename, 'rb+')
                    #make_file_sparse(filename, h, length)
                    #h.truncate(length)
                    #h.close()
                    l = length

                a = get_allocated_regions(filename, begin=0, length=l)
                if a is not None:
                    a.offset(total)
                else:
                    a = SparseSet()
                    if l > 0:
                        a.add(total, total + l)
                self.allocated_regions += a
            total += length
        self.total_length = total
        self.initialized = True
        return True

    def get_byte_range_for_filename(self, filename):
        if filename not in self.range_by_name:
            filename = os.path.normpath(filename)
            filename = os.path.join(self.save_path, filename)
        return self.range_by_name[filename]

    def was_preallocated(self, pos, length):
        return self.allocated_regions.is_range_in(pos, pos+length)

    def get_total_length(self):
        return self.total_length

    def _intervals(self, pos, amount):
        r = []
        stop = pos + amount
        p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
        for begin, end, filename in self.ranges[p:]:
            if begin >= stop:
                break
            r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
        return r

    def _file_op(self, filename, pos, param, write):
        begin, end = self.get_byte_range_for_filename(filename)
        length = end - begin
        final = Deferred()
        hdf = self.filepool.acquire_handle(filename, for_write=write, length=length)
        def handle_error(f=None):
            final.callback(0)
        # error acquiring handle
        if hdf is None:
            handle_error()
            return final
        def op(h):
            h.seek(pos)
            if write:
                odf = h.write(param)
            else:
                odf = h.read(param)
            def complete(r):
                self.filepool.release_handle(filename, h)
                final.callback(r)
            odf.addCallback(complete)
            odf.addErrback(final.errback)
        hdf.addCallback(op)
        hdf.addErrback(handle_error)
        return final

    def _batch_read(self, pos, amount):
        dfs = []
        r = []

        # queue all the reads
        for filename, pos, end in self._intervals(pos, amount):
            df = self._file_op(filename, pos, end - pos, write=False)
            dfs.append(df)

        # yield on all the reads in order - they complete in any order
        for df in dfs:
            yield df
            r.append(df.getResult())

        r = ''.join(r)

        if len(r) != amount:
            raise BTFailure(_("Short read (%d of %d) - something truncated files?") %
                            (len(r), amount))

        yield r

    def read(self, pos, amount):
        df = launch_coroutine(_wrap_task(self.add_task),
                              self._batch_read, pos, amount)
        return df

    def _batch_write(self, pos, s):
        dfs = []

        total = 0
        amount = len(s)

        # queue all the writes
        for filename, begin, end in self._intervals(pos, amount):
            length = end - begin
            assert length > 0, '%s %s' % (pos, amount)
            d = buffer(s, total, length)
            total += length
            df = self._file_op(filename, begin, d, write=True)
            dfs.append(df)
        assert total == amount, '%s and %s' % (total, amount)

        written = 0            
        # yield on all the writes - they complete in any order
        for df in dfs:
            yield df
            written += df.getResult()            
        assert total == written, '%s and %s' % (total, written)
        
        yield total

    def write(self, pos, s):
        df = launch_coroutine(_wrap_task(self.add_task),
                              self._batch_write, pos, s)
        return df

    def close(self):
        if not self.initialized:
            end = Deferred()
            def post_init(r):
                df = self.filepool.close_files(self.range_by_name)
                df.addCallback(end.callback)
            self.startup_df.addCallback(post_init)
            return end
        df = self.filepool.close_files(self.range_by_name)
        return df

    def downloaded(self, pos, length):
        for filename, begin, end in self._intervals(pos, length):
            self.undownloaded[filename] -= end - begin
        df = ThreadedDeferred(_wrap_task(self.rawserver.external_add_task),
                              self._get_available, url, daemon=True)
        yield df
        try:
            available_version = df.getResult()
        except BTFailure, e:
            self.debug(debug_prefix + 'failed to load %s' % url)
            self._restart()
            return

        if available_version.is_beta():
            if available_version[1] != self.current_version[1]:
                available_version = self.current_version
        if self.current_version.is_beta():
            stable_url = self.version_site + 'stable'
            df = ThreadedDeferred(_wrap_task(self.rawserver.external_add_task),
                                  self._get_available, stable_url)
            yield df
            try:
                available_stable_version = df.getResult()
            except BTFailure, e:
                self.debug(debug_prefix + 'failed to load %s' % url)
                self._restart()
                return

            if available_stable_version > available_version:
                available_version = available_stable_version
        self.debug(debug_prefix + 'got %s' % str(available_version))


        if available_version <= self.current_version:
            self.debug(debug_prefix + 'not updating old version %s' %