Exemplo n.º 1
0
    def subfunc(match):
        # TODO: readd indentation
        attrs, script = match.groups()
        if not script.strip():
            return "<script type='text/javascript'%s></script>" % attrs

        proc = Popen(csargs, stdin=PIPE, stdout=PIPE, stderr=PIPE)
        stdout, stderr = proc.communicate(kaa.py3_b(script))
        if stderr:
            # Compile failed. Figure out what line caused the error.
            errmsg = stderr.lstrip().splitlines()[0]
            error = re.sub(r'\s+on line \d+', '', stderr.lstrip().splitlines()[0])
            linematch = re.search('on line (\d+)', stderr)
            if linematch:
                # This is the line relative to the coffescript portion
                linenum = int(linematch.group(1))
                # Grab the bad line.
                badline = script.splitlines()[linenum-1]
                # Now translate the number it into an absolute line from the
                # source file.  Count the number of newlines in the source up
                # to the match start.
                linenum += data.count('\n', 0, match.start())
                dump = 'File "%s", line %d\n    %s\n\n%s' % (src, linenum, badline.lstrip(), error)
            else:
                dump = 'Unable to determine line number.  Full coffee output follows:\n\n' + stderr
            raise CSCompileError('CoffeeScript ' + error, dump)

        if is_html:
            return "<script type='text/javascript'%s>\n%s</script>" % (attrs, kaa.py3_str(stdout))
        else:
            return kaa.py3_str(stdout)
Exemplo n.º 2
0
    def _get_episode(self, ep, search_result):
        """
        Initiate the retriever plugin for the given search result.

        On failure, False is returned, and it's up to the caller to retry with
        a different search result.
        """
        if not os.path.isdir(ep.season.path):
            # TODO: handle failure
            os.makedirs(ep.season.path)

        # Determine name of target file based on naming preferences.
        if config.naming.rename:
            ext = os.path.splitext(search_result.filename)[-1]
            target = ep.preferred_path + kaa.py3_b(ext.lower())
        else:
            target = os.path.join(ep.season.path, kaa.py3_b(search_result.filename))

        ep.search_result = search_result
        ep.filename = os.path.basename(target)

        msg = 'Starting retrieval of %s %s (%s)' % (ep.series.name, ep.code, search_result.searcher)
        log.info(msg)
        msg += '<br/><br/>Check progress of <a href="{{root}}/schedule/aired">active downloads</a>.'
        web.notify('Episode Download', msg)

        try:
            # retrieve() ensures that only RetrieverError is raised
            ip = retrieve(search_result, target, ep)
            #log.debug('not actually retrieving %s %s', ep.series.name, ep.code)
            #ip = fake_download(search_result, target, ep)
            self._retrieve_inprogress[ep] = ip
            yield ip
        except RetrieverError, e:
            ep.filename = ep.search_result = None
            if os.path.exists(target):
                # TODO: handle permission problem
                log.debug('deleting failed attempt %s', target)
                os.unlink(target)
            log.error(e.args[0])
            yield False
Exemplo n.º 3
0
def cscompile_with_cache(src, cachedir, is_html=None):
    compiled = src + '.compiled'
    cached = os.path.join(cachedir, hashlib.md5(kaa.py3_b(src, fs=True)).hexdigest())

    if os.path.isfile(compiled) and os.path.getmtime(src) <= os.path.getmtime(compiled):
        #log.debug2('Using system compiled %s', compiled)
        return True, open(compiled, 'r').read()
    elif os.path.isfile(cached) and os.path.getmtime(src) <= os.path.getmtime(cached):
        #log.debug2('Using local cached %s', cached)
        return True, open(cached, 'r').read()
    else:
        try:
            data = cscompile(src, cached, is_html)
        except CSCompileError, (err, line):
            raise bottle.HTTPError(500, err, traceback=line)
        else:
Exemplo n.º 4
0
    def get(self, url, target, resume=True):
        if self._state == Curl.STATE_ACTIVE:
            raise ValueError("Curl is active")
        else:
            self._state = Curl.STATE_ACTIVE

        if not self._multi:
            self._reinit_curl()

        if isinstance(target, basestring):
            mode = "w"
            if resume == True and os.path.exists(target):
                self.resume_from = os.path.getsize(target)
                mode = "a"
            self._target = file(target, mode)
            self._target_needs_close = True
        elif hasattr(target, "write"):
            self._target = target
            self._target_needs_close = False
        else:
            raise ValueError("Invalid target: must be filename or file object")

        if isinstance(self._target, file):
            self._curl.setopt(pycurl.WRITEDATA, self._target)
        else:
            self._curl.setopt(pycurl.WRITEFUNCTION, self._target.write)

        if self._inprogress.finished:
            self._inprogress = self._make_inprogress()

        self._last_progress_check = -1, -1
        del self._speed_down_samples[:]
        del self._speed_up_samples[:]
        self._progress_check_timer.stop()
        self._curl.setopt(pycurl.URL, kaa.py3_b(url))
        self._perform()
        # state may become inactive here indirectly via _perform(), e.g.  if
        # DNS resolution fails, it can complete immediately.  So we need to
        # test it even though we just set it to ACTIVE above before starting the
        # progress timer.
        if self._state == Curl.STATE_ACTIVE and len(self.signals["progress"]):
            self._progress_check_timer.start(self._progress_interval)
            self._speed_sample_timer.start(1)
        return self._inprogress
Exemplo n.º 5
0
    def _get_api_zipfile(self, url):
        STAY_LOCAL = os.getenv('STAY_LOCAL', 0)
        tmpname = kaa.tempfile(hashlib.md5(kaa.py3_b(url, fs=True)).hexdigest() + '.zip')
        url = self._apiurl + url

        if STAY_LOCAL and os.path.exists(tmpname):
            status = 200
        else:
            # Try 3 times before giving up, unless it's a permanent error
            log.debug('fetching zip file %s', url)
            status, curl = yield download(url, tmpname, retry=3, resume=False)
        if status != 200:
            if os.path.exists(tmpname):
                os.unlink(tmpname)
            raise ProviderError('thetvdb gave status %d for %s' % (status, url))

        try:
            z = zipfile.ZipFile(tmpname)
        except zipfile.BadZipfile:
            os.unlink(tmpname)
            raise ProviderError('invalid zip file from thetvdb at %s' % url)

        yield z