示例#1
0
    if ntts:
        rel_ids = list(set(eid for ntt in ntts.itervalues() for eids in ntt["r"].itervalues() if "r" in ntt for eid in eids))
        ntts.update({int(ntt["_id"]):ntt for ntt in entitiesdb.get_entities(rel_ids, None, (False, [u"episode"]))})
    '''

    result = {"time": max(stats["t"].itervalues()) if stats["t"] else 0, "total_found": stats["cs"]}

    # elimina el id del download de la lista de resultados
    if download and "file_data" in download and download["file_data"]:
        download_id = mid2hex(download["file_data"]["file"]["_id"])
        ids = list(aid for aid in ids if aid[0]!=download_id)
    else:
        download_id = None

    profiler.checkpoint(profiler_data, opening=["mongo"])
    files_dict={str(f["_id"]):secure_fill_data(f,text=query,ntts=ntts) for f in get_files(ids,s)}
    profiler.checkpoint(profiler_data, closing=["mongo"])

    # añade download a los resultados
    if download_id:
        files_dict[download_id] = download["file_data"]
        ids.insert(0,(download_id, -1, -1, -1))

    # ordena resultados y añade informacion de la busqueda
    files = []
    for search_result in ids:
        fid = search_result[0]
        if fid in files_dict and files_dict[fid]:
            afile = files_dict[fid]
            afile["search"] = search_result
            files.append(afile)
示例#2
0
文件: files.py 项目: Weej1/www
def prepare_data(f, text=None, ntts=[], details=False, current_category=None):
    try:
        return torrents_data(secure_fill_data(f,text,ntts), details, current_category)
    except BaseException as e:
        logging.error("Error retrieving torrent data.")
        return None
示例#3
0
文件: filters.py 项目: prodigeni/www
def download(file_id, file_name=""):
    if request.referrer:
        try:
            posibles_queries = referrer_parser.match(request.referrer)
            if posibles_queries:
                query = posibles_queries.group(1) or posibles_queries.group(2) or ""
                if query:
                    get_query_info(u(urllib.unquote_plus(query).decode("utf-8")))
        except:
            pass

    error = None
    file_data = None
    if file_id is not None:  # si viene un id se comprueba que sea correcto
        try:  # intentar convertir el id que viene de la url a uno interno
            file_id = url2mid(file_id)
        except TypeError as e:
            try:  # comprueba si se trate de un ID antiguo
                possible_file_id = filesdb.get_newid(file_id)
                if possible_file_id is None:
                    logging.warn("Identificadores numericos antiguos sin resolver: %s." % e, extra={"fileid": file_id})
                    error = 404
                else:
                    logging.warn("Identificadores numericos antiguos encontrados: %s." % e, extra={"fileid": file_id})
                    return {
                        "html": redirect(
                            url_for(".download", file_id=mid2url(possible_file_id), file_name=file_name), 301
                        ),
                        "error": 301,
                    }

            except BaseException as e:
                logging.exception(e)
                error = 503

            file_id = None

        if file_id:
            try:
                # ~ file_data=get_file_metadata(file_id, file_name.replace("-"," "))
                file_data = secure_fill_data(filesdb.get_file(file_id, bl=1), file_name.replace("-", " "))
            except DatabaseError:
                error = 503
            except FileNotExist:
                error = 404
            except (FileRemoved, FileFoofindRemoved, FileNoSources):
                error = 410
            except FileUnknownBlock:
                error = 404

            if (
                error is None and not file_data
            ):  # si no ha habido errores ni hay datos, es porque existe y no se ha podido recuperar
                error = 503

    if error:
        abort(error)

    # completa datos de torrent
    file_data = torrents_data(file_data, True)
    if not file_data:
        abort(404)

    # no permite acceder ficheros que deberian ser bloqueados
    prepared_phrase = blacklists.prepare_phrase(file_data["view"]["nfn"])
    if prepared_phrase in blacklists["forbidden"] or (
        prepared_phrase in blacklists["misconduct"] and prepared_phrase in blacklists["underage"]
    ):
        g.blacklisted_content = True
        if not g.show_blacklisted_content:
            abort(410)

    query = download_search(file_data, file_name, "torrent")
    # ~ related = single_search(query, category=None, title=("Related torrents",3,None), zone="File / Related", last_items=[], limit=30, max_limit=15, ignore_ids=[mid2hex(file_id)], show_order=None)
    related = None

    # elige el titulo de la página
    title = file_data["view"]["fn"]

    # recorta el titulo hasta el proximo separador
    if len(title) > 101:
        for pos in xrange(101, 30, -1):
            if title[pos] in SEPPER:
                title = title[:pos].strip()
                break
        else:
            title = title[:101]

    g.title = title

    page_description = ""
    if "description" in file_data["view"]["md"]:
        page_description = file_data["view"]["md"]["description"].replace("\n", " ")

    if not page_description:
        page_description = "Download %s torrents from %s" % (
            file_data["view"]["file_type"].capitalize() if file_data["view"]["file_type"] != "unknown" else "All",
            g.domain_capitalized,
        )

    if len(page_description) < 50:
        if page_description:
            page_description += ". "
        # ~ page_description += " ".join(text.capitalize()+"." for text in related[1]["files_text"])

    if len(page_description) > 180:
        last_stop = page_description[:180].rindex(".") if "." in page_description[:180] else 0
        if last_stop < 100:
            last_stop = page_description[:180].rindex(" ") if " " in page_description[:180] else 0
        if last_stop < 100:
            last_stop = 180
        page_description = page_description[:last_stop] + "."

    g.page_description = page_description

    is_canonical_filename = file_data["view"]["seo-fn"] == file_name

    # registra visita al fichero
    if g.search_bot:
        searchd.log_bot_event(g.search_bot, True)
    else:
        save_visited([file_data])

    # ~ if related[0]:
    # ~ g.must_cache = 3600

    return render_template(
        "file.html",
        related_query=query,
        file_data=file_data,
        related_files=related,
        is_canonical_filename=is_canonical_filename,
        featured=None,
    )