Ejemplo n.º 1
0
def smart_gettext_and_install(domain, localedir, languages,
                              fallback=False, unicode=False):
    try:
        t = gettext.translation(domain, localedir, languages=languages,
                                fallback=fallback)
    except Exception, e:
        # if we failed to find the language, fetch it from the web async-style
        running_count = 0
        running_deferred = {}

        # Get some reasonable defaults for arguments that were not supplied
        if languages is None:
            languages = []
            for envar in ('LANGUAGE', 'LC_ALL', 'LC_MESSAGES', 'LANG'):
                val = os.environ.get(envar)
                if val:
                    languages = val.split(':')
                    break
            if 'C' not in languages:
                languages.append('C')

        # now normalize and expand the languages
        nelangs = []
        for lang in languages:
            for nelang in gettext._expand_lang(lang):
                if nelang not in nelangs:
                    nelangs.append(nelang)
        languages = nelangs

        for lang in languages:
            d = ThreadedDeferred(None, get_language, lang)
            def translate_and_install(r, td=d):
                running_deferred.pop(td)
                # only let the last one try to install
                if len(running_deferred) == 0:
                    t = gettext.translation(domain, localedir,
                                            languages=languages,
                                            fallback=True)
                    t.install(unicode)
            def failed(e, tlang=lang, td=d):
                if td in running_deferred:
                    running_deferred.pop(td)
                # don't raise an error, just continue untranslated
                sys.stderr.write('Could not find translation for language "%s"\n' %
                                 tlang)
                #traceback.print_exc(e)
            d.addCallback(translate_and_install)
            d.addErrback(failed)
            # accumulate all the deferreds first
            running_deferred[d] = 1

        # start them all, the last one finished will install the language
        for d in running_deferred:
            d.start()

        return
Ejemplo n.º 2
0
class Rerequester(object):

    STATES = ['started', 'completed', 'stopped']

    def __init__(self, url, announce_list, config, sched, externalsched, rawserver,
                 howmany, connect,
                 amount_left, up, down, port, myid, infohash, errorfunc, doneflag,
                 upratefunc, downratefunc, ever_got_incoming, diefunc, sfunc):
        """
         @param url:       tracker's announce URL.
         @param announce_list: ?
         @param config:    preferences obj storing BitTorrent-wide
                           configuration.
         @param sched:     used to schedule events from inside rawserver's
                           thread.  (Oh boy.  externalsched and sched.
                           We expect Rerequester to
                           recognize the difference between rawserver's
                           thread and yet we go through the trouble of
                           abstracting away rawserver using a callback...
                           So what was the point?  --Dave)
         @param externalsched: see sched.  This one is called from outside
                           rawserver's thread.
         @param howmany:   callback to get the number of complete connections.
         @param connect:   callback to establish a connection to a peer
                           obtained from the tracker.
         @param amount_left: callback to obtain the number of bytes left to
                           download for this torrent.
         @param up:        callback to obtain the total number of bytes sent
                           for this torrent.
         @param down:      callback to obtain the total number of bytes
                           received for this torrent.
         @param port:      port to report to the tracker.  If the local peer
                           is behind a NAT then this is the local peer's port
                           on the NAT facing the outside world.
         @param myid:      local peer's unique (self-generated) id.
         @param infohash:  hash of the info section of the metainfo file.
         @param errorfunc: callback to report errors.
         @param doneflag:  when set all threads cleanup and then terminate.
         @param upratefunc: callback to obtain moving average rate on the
                           uplink for this torrent.
         @param downratefunc: callback to obtain moving average rate on the
                           downlink for this torrent.
         @param ever_got_incoming: callback to determine if this torrent
                           has ever received any mesages from other peers.
         @param diefunc:   callback that is called when announce fails to find
                           any peers.
         @param sfunc:     success function?  With regard to what?  --Dave

        """
        assert isinstance(url, str)
        assert isinstance(config, Preferences)
        assert type(port) in (int,long) and port > 0 and port < 65536, "Port: %s" % repr(port)
        assert callable(connect)
        assert callable(externalsched)
        assert callable(amount_left)
        assert callable(errorfunc)
        assert isinstance(doneflag, threading._Event)
        assert callable(upratefunc)
        assert callable(downratefunc)
        assert callable(ever_got_incoming)
        assert callable(diefunc)
        assert callable(sfunc)

        self.rawserver = rawserver
        self.dead = False
        self.baseurl = url
        self.announce_list = None
        if announce_list:
            # shuffle a new copy of the whole set only once
            shuffled_announce_list = []
            for tier in announce_list:
                if not tier:
                    # strip blank lists
                    continue
                shuffled_tier = list(tier)
                random.shuffle(shuffled_tier)
                shuffled_announce_list.append(shuffled_tier)
            if shuffled_announce_list:
                self.announce_list = shuffled_announce_list
                self.tier = 0
                self.announce_i = 0
                self.baseurl = self.announce_list_next()
        self.announce_infohash = infohash
        self.peerid = None
        self.wanted_peerid = myid
        self.port = port
        self.url = None
        self.config = config
        self.last = None
        self.trackerid = None
        self.announce_interval = 30 * 60
        self.sched = sched
        self.howmany = howmany
        self.connect = connect
        self.externalsched = externalsched
        self.amount_left = amount_left
        self.up = up
        self.down = down
        self.errorfunc = errorfunc
        self.doneflag = doneflag
        self.upratefunc = upratefunc
        self.downratefunc = downratefunc
        self.ever_got_incoming = ever_got_incoming
        self.diefunc = diefunc
        self.successfunc = sfunc
        self.finish = False
        self.running_df = None
        self.current_started = None
        self.fail_wait = None
        self.last_time = bttime()
        self.previous_down = 0
        self.previous_up = 0
        self.tracker_num_peers = None
        self.tracker_num_seeds = None

    def _makeurl(self, peerid, port):
        return ('%s?info_hash=%s&peer_id=%s&port=%s&key=%s' %
                (self.baseurl, quote(self.announce_infohash), quote(peerid), str(port),
                 b2a_hex(''.join([chr(random.randrange(256)) for i in xrange(4)]))))

    def change_port(self, peerid, port):
        assert thread.get_ident() == self.rawserver.ident

        self.wanted_peerid = peerid
        self.port = port
        self.last = None
        self.trackerid = None
        self._check()

    def begin(self):
        if self.sched:
            self.sched(10, self.begin)
            self._check()

    def announce_list_success(self):
        tmp = self.announce_list[self.tier].pop(self.announce_i)
        self.announce_list[self.tier].insert(0, tmp)
        self.tier = 0
        self.announce_i = 0

    def announce_list_fail(self):
        """returns True if the announce-list was restarted"""
        self.announce_i += 1
        if self.announce_i == len(self.announce_list[self.tier]):
            self.announce_i = 0
            self.tier += 1
            if self.tier == len(self.announce_list):
                self.tier = 0
                return True
        return False

    def announce_list_next(self):
        return self.announce_list[self.tier][self.announce_i]

    def announce_finish(self):
        if self.dead:
            return
        self.finish = True
        self._check()

    def announce_stop(self):
        if self.dead:
            return
        self._announce('stopped')

    def _check(self):
        assert thread.get_ident() == self.rawserver.ident
        assert not self.dead
        #self.errorfunc(logging.INFO, 'check: ' + str(self.current_started))
        if self.current_started is not None:
            if self.current_started <= bttime() - 58:
                self.errorfunc(logging.WARNING,
                               _("Tracker announce still not complete "
                                 "%d seconds after starting it") %
                               int(bttime() - self.current_started))
            return
        if self.peerid is None:
            self.peerid = self.wanted_peerid
            self.url = self._makeurl(self.peerid, self.port)
            self._announce('started')
            return
        if self.peerid != self.wanted_peerid:
            # _announce will clean up these
            up = self.up
            down = self.down
            self._announce('stopped')
            self.peerid = None
            self.previous_up = up()
            self.previous_down = down()
            return
        if self.finish:
            self.finish = False
            self._announce('completed')
            return
        if self.fail_wait is not None:
            if self.last_time + self.fail_wait <= bttime():
                self._announce()
            return
        if self.last_time > bttime() - self.config['rerequest_interval']:
            return
        if self.ever_got_incoming():
            getmore = self.howmany() <= self.config['min_peers'] / 3
        else:
            getmore = self.howmany() < self.config['min_peers']
        if getmore or bttime() - self.last_time > self.announce_interval:
            self._announce()

    def get_next_announce_time_est(self):
        # I'm sure this is wrong, but _check is confusing
        return bttime() - (self.last_time + self.announce_interval)

    def _announce(self, event=None):
        assert not self.dead
        assert thread.get_ident() == self.rawserver.ident
        self.current_started = bttime()
        self.errorfunc(logging.INFO, 'announce: ' + str(self.current_started))
        s = ('%s&uploaded=%s&downloaded=%s&left=%s' %
             (self.url, str(self.up()*self.config.get('lie',1) - self.previous_up),
              str(self.down() - self.previous_down), str(self.amount_left())))
        if self.last is not None:
            s += '&last=' + quote(str(self.last))
        if self.trackerid is not None:
            s += '&trackerid=' + quote(str(self.trackerid))
        if self.howmany() >= self.config['max_initiate']:
            s += '&numwant=0'
        else:
            s += '&compact=1'
        if event is not None:
            s += '&event=' + event

        def _start_announce(*a):
            self.running_df = ThreadedDeferred(_wrap_task(self.externalsched),
                                               self._rerequest, s, self.peerid,
                                               daemon=True)
            def _rerequest_finish(x):
                self.running_df = None
            def _rerequest_error(e):
                self.errorfunc(logging.ERROR, _("Rerequest failed!"),
                               exception=True, exc_info=e)
            self.running_df.addCallbacks(_rerequest_finish, _rerequest_error)
            if event == 'stopped':
                # if self._rerequest needs any state, pass it through args
                self.cleanup()

        if not event:
            assert self.running_df == None, "Previous rerequest event is still running!"
        if self.running_df:
            self.running_df.addCallback(_start_announce)
        else:
            _start_announce()

    # Must destroy all references that could cause reference circles
    def cleanup(self):
        assert thread.get_ident() == self.rawserver.ident
        self.dead = True
        self.sched = None
        self.howmany = None
        self.connect = None
        self.externalsched = lambda *args: None
        self.amount_left = None
        self.up = None
        self.down = None
        # don't zero this one, we need it on shutdown w/ error
        #self.errorfunc = None
        self.upratefunc = None
        self.downratefunc = None
        self.ever_got_incoming = None
        self.diefunc = None
        self.successfunc = None

    def _rerequest(self, url, peerid):
        if self.config['ip']:
            try:
                url += '&ip=' + socket.gethostbyname(self.config['ip'])
            except:
                self.errorfunc(logging.WARNING,
                               _("Problem resolving config ip (%s), gethostbyname failed") % self.config['ip'],
                               exc_info=sys.exc_info())
        request = Request(url)
        request.add_header('User-Agent', 'BitTorrent/' + version)
        if self.config['tracker_proxy']:
            request.set_proxy(self.config['tracker_proxy'], 'http')
        try:
            h = urlopen(request)
            data = h.read()
            h.close()
        # urllib2 can raise various crap that doesn't have a common base
        # exception class especially when proxies are used, at least
        # ValueError and stuff from httplib
        except Exception, e:
            try:
                s = unicode(e.args[0])
            except:
                s = unicode(e)
            r = _("Problem connecting to tracker - %s: %s") % (e.__class__, s)
            def f():
                self._postrequest(errormsg=r, exc=e, peerid=peerid)
        else:
Ejemplo n.º 3
0
class Storage(object):

    def __init__(self, config, filepool, save_path, files, add_task,
                 external_add_task, doneflag):
        self.filepool = filepool
        self.config = config
        self.doneflag = doneflag
        self.add_task = add_task
        self.external_add_task = external_add_task
        self.initialize(save_path, files)

    def initialize(self, save_path, files):
        # a list of bytes ranges and filenames for window-based IO
        self.ranges = []
        # a dict of filename-to-ranges for piece priorities and filename lookup
        self.range_by_name = {}
        # a sparse set for smart allocation detection
        self.allocated_regions = SparseSet()

        # dict of filename-to-length on disk (for % complete in the file view)
        self.undownloaded = {}
        self.save_path = save_path

        # Rather implement this as an ugly hack here than change all the
        # individual calls. Affects all torrent instances using this module.
        if self.config['bad_libc_workaround']:
            bad_libc_workaround()

        self.initialized = False
        self.startup_df = ThreadedDeferred(_wrap_task(self.external_add_task),
                                           self._build_file_structs,
                                           self.filepool, files)
        return self.startup_df

    def _build_file_structs(self, filepool, files):
        total = 0
        for filename, length in files:
            # we're shutting down, abort.
            if self.doneflag.isSet():
                return False

            self.undownloaded[filename] = length
            if length > 0:
                self.ranges.append((total, total + length, filename))

            self.range_by_name[filename] = (total, total + length)

            if os.path.exists(filename):
                if not os.path.isfile(filename):
                    raise BTFailure(_("File %s already exists, but is not a "
                                      "regular file") % filename)
                l = os.path.getsize(filename)
                if l > length:
                    # This is the truncation Bram was talking about that no one
                    # else thinks is a good idea.
                    #h = file(filename, 'rb+')
                    #make_file_sparse(filename, h, length)
                    #h.truncate(length)
                    #h.close()
                    l = length

                a = get_allocated_regions(filename, begin=0, length=l)
                if a is not None:
                    a.offset(total)
                else:
                    a = SparseSet()
                    if l > 0:
                        a.add(total, total + l)
                self.allocated_regions += a
            total += length
        self.total_length = total
        self.initialized = True
        return True

    def get_byte_range_for_filename(self, filename):
        if filename not in self.range_by_name:
            filename = os.path.normpath(filename)
            filename = os.path.join(self.save_path, filename)
        return self.range_by_name[filename]

    def was_preallocated(self, pos, length):
        return self.allocated_regions.is_range_in(pos, pos+length)

    def get_total_length(self):
        return self.total_length

    def _intervals(self, pos, amount):
        r = []
        stop = pos + amount
        p = max(bisect_right(self.ranges, (pos, 2 ** 500)) - 1, 0)
        for begin, end, filename in self.ranges[p:]:
            if begin >= stop:
                break
            r.append((filename, max(pos, begin) - begin, min(end, stop) - begin))
        return r

    def _file_op(self, filename, pos, param, write):
        begin, end = self.get_byte_range_for_filename(filename)
        length = end - begin
        final = Deferred()
        hdf = self.filepool.acquire_handle(filename, for_write=write, length=length)
        def handle_error(f=None):
            final.callback(0)
        # error acquiring handle
        if hdf is None:
            handle_error()
            return final
        def op(h):
            h.seek(pos)
            if write:
                odf = h.write(param)
            else:
                odf = h.read(param)
            def complete(r):
                self.filepool.release_handle(filename, h)
                final.callback(r)
            odf.addCallback(complete)
            odf.addErrback(final.errback)
        hdf.addCallback(op)
        hdf.addErrback(handle_error)
        return final

    def _batch_read(self, pos, amount):
        dfs = []
        r = []

        # queue all the reads
        for filename, pos, end in self._intervals(pos, amount):
            df = self._file_op(filename, pos, end - pos, write=False)
            dfs.append(df)

        # yield on all the reads in order - they complete in any order
        for df in dfs:
            yield df
            r.append(df.getResult())

        r = ''.join(r)

        if len(r) != amount:
            raise BTFailure(_("Short read (%d of %d) - something truncated files?") %
                            (len(r), amount))

        yield r

    def read(self, pos, amount):
        df = launch_coroutine(_wrap_task(self.add_task),
                              self._batch_read, pos, amount)
        return df

    def _batch_write(self, pos, s):
        dfs = []

        total = 0
        amount = len(s)

        # queue all the writes
        for filename, begin, end in self._intervals(pos, amount):
            length = end - begin
            assert length > 0, '%s %s' % (pos, amount)
            d = buffer(s, total, length)
            total += length
            df = self._file_op(filename, begin, d, write=True)
            dfs.append(df)
        assert total == amount, '%s and %s' % (total, amount)

        written = 0            
        # yield on all the writes - they complete in any order
        for df in dfs:
            yield df
            written += df.getResult()            
        assert total == written, '%s and %s' % (total, written)
        
        yield total

    def write(self, pos, s):
        df = launch_coroutine(_wrap_task(self.add_task),
                              self._batch_write, pos, s)
        return df

    def close(self):
        if not self.initialized:
            end = Deferred()
            def post_init(r):
                df = self.filepool.close_files(self.range_by_name)
                df.addCallback(end.callback)
            self.startup_df.addCallback(post_init)
            return end
        df = self.filepool.close_files(self.range_by_name)
        return df

    def downloaded(self, pos, length):
        for filename, begin, end in self._intervals(pos, length):
            self.undownloaded[filename] -= end - begin